From cedbe8fcd73a2b1e428703146d82459694a2f351 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Wed, 22 Feb 2023 09:04:23 +0100 Subject: [PATCH] fix .blend --- invokeai/backend/invoke_ai_web_server.py | 4 +- ldm/invoke/args.py | 2 +- ldm/invoke/conditioning.py | 64 ++++++++++++++++++++++-- 3 files changed, 63 insertions(+), 7 deletions(-) diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index 45712ee0da..21ad418b68 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -25,12 +25,12 @@ from invokeai.backend.modules.parameters import parameters_to_command import invokeai.frontend.dist as frontend from ldm.generate import Generate from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash -from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure +from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure, split_weighted_subprompts from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState from ldm.invoke.generator.inpaint import infill_methods from ldm.invoke.globals import Globals, global_converted_ckpts_dir from ldm.invoke.pngwriter import PngWriter, retrieve_metadata -from compel.prompt_parser import split_weighted_subprompts, Blend +from compel.prompt_parser import Blend from ldm.invoke.globals import global_models_dir from ldm.invoke.merge_diffusers import merge_diffusion_models diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 2a1b7e2d40..2500b5b97d 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -96,7 +96,7 @@ from pathlib import Path import ldm.invoke import ldm.invoke.pngwriter -from compel.prompt_parser import split_weighted_subprompts +from ldm.invoke.conditioning import split_weighted_subprompts from ldm.invoke.globals import Globals diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index 70ff484b07..646066a869 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -7,10 +7,10 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an ''' import re -from typing import Union +from typing import Union, Optional from compel import Compel -from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute +from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from .devices import torch_dtype from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent from ldm.invoke.globals import Globals @@ -27,8 +27,13 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l dtype_for_device_getter=torch_dtype) positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string) - positive_prompt = compel.parse_prompt_string(positive_prompt_string) - negative_prompt = compel.parse_prompt_string(negative_prompt_string) + legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend) + positive_prompt: FlattenedPrompt|Blend + if legacy_blend is not None: + positive_prompt = legacy_blend + else: + positive_prompt = compel.parse_prompt_string(positive_prompt_string) + negative_prompt: FlattenedPrompt|Blend = compel.parse_prompt_string(negative_prompt_string) if log_tokens or getattr(Globals, "log_tokenization", False): log_tokenization(positive_prompt, negative_prompt, tokenizer=model.tokenizer) @@ -155,3 +160,54 @@ def log_tokenization_for_text(text, tokenizer, display_label=None): if discarded != "": print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):') print(f'{discarded}\x1b[0m') + + +def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]: + weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize) + if len(weighted_subprompts) <= 1: + return None + strings = [x[0] for x in weighted_subprompts] + weights = [x[1] for x in weighted_subprompts] + + pp = PromptParser() + parsed_conjunctions = [pp.parse_conjunction(x) for x in strings] + flattened_prompts = [x.prompts[0] for x in parsed_conjunctions] + + return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize) + + +def split_weighted_subprompts(text, skip_normalize=False)->list: + """ + Legacy blend parsing. + + grabs all text up to the first occurrence of ':' + uses the grabbed text as a sub-prompt, and takes the value following ':' as weight + if ':' has no value defined, defaults to 1.0 + repeats until no text remaining + """ + prompt_parser = re.compile(""" + (?P # capture group for 'prompt' + (?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:' + ) # end 'prompt' + (?: # non-capture group + :+ # match one or more ':' characters + (?P # capture group for 'weight' + -?\d+(?:\.\d+)? # match positive or negative integer or decimal number + )? # end weight capture group, make optional + \s* # strip spaces after weight + | # OR + $ # else, if no ':' then match end of line + ) # end non-capture group + """, re.VERBOSE) + parsed_prompts = [(match.group("prompt").replace("\\:", ":"), float( + match.group("weight") or 1)) for match in re.finditer(prompt_parser, text)] + if skip_normalize: + return parsed_prompts + weight_sum = sum(map(lambda x: x[1], parsed_prompts)) + if weight_sum == 0: + print( + "* Warning: Subprompt weights add up to zero. Discarding and using even weights instead.") + equal_weight = 1 / max(len(parsed_prompts), 1) + return [(x[0], equal_weight) for x in parsed_prompts] + return [(x[0], x[1] / weight_sum) for x in parsed_prompts] +