Merge branch 'v2.3' into bugfix/lora-incompatibility-handling

This commit is contained in:
Lincoln Stein
2023-04-13 22:23:59 -04:00
committed by GitHub
4 changed files with 59 additions and 28 deletions

View File

@@ -1 +1 @@
__version__='2.3.4'
__version__='2.3.4.post1'

View File

@@ -12,7 +12,8 @@ from typing import Union, Optional, Any
from transformers import CLIPTokenizer
from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
Conjunction
from .devices import torch_dtype
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals
@@ -55,22 +56,25 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
# get rid of any newline characters
prompt_string = prompt_string.replace("\n", " ")
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_prompt: FlattenedPrompt|Blend
lora_conditions = None
positive_conjunction: Conjunction
if legacy_blend is not None:
positive_prompt = legacy_blend
positive_conjunction = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
should_use_lora_manager = True
lora_weights = positive_conjunction.lora_weights
if model.peft_manager:
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
if not should_use_lora_manager:
model.peft_manager.set_loras(lora_weights)
if model.lora_manager and should_use_lora_manager:
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
positive_prompt = positive_conjunction.prompts[0]
should_use_lora_manager = True
lora_weights = positive_conjunction.lora_weights
lora_conditions = None
if model.peft_manager:
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
if not should_use_lora_manager:
model.peft_manager.set_loras(lora_weights)
if model.lora_manager and should_use_lora_manager:
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
@@ -93,12 +97,12 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
Union[FlattenedPrompt, Blend], FlattenedPrompt):
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_prompt: FlattenedPrompt|Blend
positive_conjunction: Conjunction
if legacy_blend is not None:
positive_prompt = legacy_blend
positive_conjunction = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
positive_prompt = positive_conjunction.prompts[0]
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
@@ -217,18 +221,26 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
print(f'{discarded}\x1b[0m')
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]:
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Conjunction]:
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
if len(weighted_subprompts) <= 1:
return None
strings = [x[0] for x in weighted_subprompts]
weights = [x[1] for x in weighted_subprompts]
pp = PromptParser()
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
flattened_prompts = []
weights = []
loras = []
for i, x in enumerate(parsed_conjunctions):
if len(x.prompts)>0:
flattened_prompts.append(x.prompts[0])
weights.append(weighted_subprompts[i][1])
if len(x.lora_weights)>0:
loras.extend(x.lora_weights)
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)],
lora_weights = loras)
def split_weighted_subprompts(text, skip_normalize=False)->list:

View File

@@ -4,14 +4,13 @@ pip install <path_to_git_source>.
'''
import os
import platform
import psutil
import requests
from rich import box, print
from rich.console import Console, Group, group
from rich.console import Console, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.style import Style
from rich.syntax import Syntax
from rich.text import Text
from ldm.invoke import __version__
@@ -32,6 +31,19 @@ else:
def get_versions()->dict:
return requests.get(url=INVOKE_AI_REL).json()
def invokeai_is_running()->bool:
for p in psutil.process_iter():
try:
cmdline = p.cmdline()
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
if matches:
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
return True
except psutil.AccessDenied:
continue
return False
def welcome(versions: dict):
@group()
@@ -62,6 +74,10 @@ def welcome(versions: dict):
def main():
versions = get_versions()
if invokeai_is_running():
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
return
welcome(versions)
tag = None

View File

@@ -100,8 +100,8 @@ def expand_prompts(
for command in commands:
sequence += 1
format = _get_fn_format(outdir, sequence)
parent_conn.send(
command + f' --fnformat="{format}"'
parent_conn.send_bytes(
(command + f' --fnformat="{format}"').encode('utf-8')
)
parent_conn.close()
else:
@@ -116,7 +116,10 @@ def _get_fn_format(directory:str, sequence:int)->str:
Get a filename that doesn't exceed filename length restrictions
on the current platform.
"""
max_length = os.pathconf(directory,'PC_NAME_MAX')
try:
max_length = os.pathconf(directory,'PC_NAME_MAX')
except:
max_length = 255
prefix = f'dp.{sequence:04}.'
suffix = '.png'
max_length -= len(prefix)+len(suffix)
@@ -130,7 +133,7 @@ class MessageToStdin(object):
def readline(self) -> str:
try:
if len(self.linebuffer) == 0:
message = self.connection.recv()
message = self.connection.recv_bytes().decode('utf-8')
self.linebuffer = message.split("\n")
result = self.linebuffer.pop(0)
return result