Compare commits

...

13 Commits

Author SHA1 Message Date
Lincoln Stein
d81584c8fd hotfix to 2.3.4 (#3188)
- Pin diffusers to 0.14
- Small fix to LoRA loading routine that was preventing placement of
LoRA files in subdirectories.
- Bump version to 2.3.4.post1
2023-04-13 12:39:16 -04:00
Lincoln Stein
1183bf96ed hotfix to 2.3.4
- Pin diffusers to 0.14
- Small fix to LoRA loading routine that was preventing placement of
  LoRA files in subdirectories.
- Bump version to 2.3.4.post1
2023-04-13 08:48:30 -04:00
Lincoln Stein
018d5dab53 [Bugfix] make invokeai-batch work on windows (#3164)
- Previous PR to truncate long filenames won't work on windows due to
lack of support for os.pathconf(). This works around the limitation by
hardcoding the value for PC_NAME_MAX when pathconf is unavailable.
- The `multiprocessing` send() and recv() methods weren't working
properly on Windows due to issues involving `utf-8` encoding and
pickling/unpickling. Changed these calls to `send_bytes()` and
`recv_bytes()` , which seems to fix the issue.

Not fully tested on Windows since I lack a GPU machine to test on, but
is working on CPU.
2023-04-11 11:37:39 -04:00
Lincoln Stein
96a5de30e3 Merge branch 'v2.3' into bugfix/pathconf-on-windows 2023-04-11 11:11:20 -04:00
Lincoln Stein
4d62d5b802 [Bugfix] detect running invoke before updating (#3163)
This PR addresses the issue that when `invokeai-update` is run on a
Windows system, and an instance of InvokeAI is open and running, the
user's `.venv` can get corrupted.

Issue first reported here:


https://discord.com/channels/1020123559063990373/1094688269356249108/1094688434750230628
2023-04-09 22:29:46 -04:00
Lincoln Stein
17de5c7008 Merge branch 'v2.3' into bugfix/pathconf-on-windows 2023-04-09 22:10:24 -04:00
Lincoln Stein
f95403dcda Merge branch 'v2.3' into bugfix/detect-running-invoke-before-updating 2023-04-09 22:09:17 -04:00
Lincoln Stein
e54d060d17 send and receive messages as bytes, not objects 2023-04-09 18:17:55 -04:00
Lincoln Stein
a01f1d4940 workaround no os.pathconf() on Windows platforms
- Previous PR to truncate long filenames won't work on windows
  due to lack of support for os.pathconf(). This works around the
  limitation by hardcoding the value for PC_NAME_MAX when pathconf
  is unavailable.
2023-04-09 17:45:34 -04:00
Lincoln Stein
1873817ac9 adjustments for windows 2023-04-09 17:24:47 -04:00
Lincoln Stein
31333a736c check if invokeai is running before trying to update
- on windows systems, updating the .venv while invokeai is using it leads to
  corruption.
2023-04-09 16:57:14 -04:00
Lincoln Stein
03274b6da6 fix extracting loras from legacy blends (#3161) 2023-04-09 16:43:35 -04:00
Damian Stewart
0646649c05 fix extracting loras from legacy blends 2023-04-09 22:21:44 +02:00
6 changed files with 65 additions and 32 deletions

View File

@@ -1 +1 @@
__version__='2.3.4'
__version__='2.3.4.post1'

View File

@@ -12,7 +12,8 @@ from typing import Union, Optional, Any
from transformers import CLIPTokenizer
from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
Conjunction
from .devices import torch_dtype
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals
@@ -55,22 +56,25 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
# get rid of any newline characters
prompt_string = prompt_string.replace("\n", " ")
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_prompt: FlattenedPrompt|Blend
lora_conditions = None
positive_conjunction: Conjunction
if legacy_blend is not None:
positive_prompt = legacy_blend
positive_conjunction = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
should_use_lora_manager = True
lora_weights = positive_conjunction.lora_weights
if model.peft_manager:
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
if not should_use_lora_manager:
model.peft_manager.set_loras(lora_weights)
if model.lora_manager and should_use_lora_manager:
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
positive_prompt = positive_conjunction.prompts[0]
should_use_lora_manager = True
lora_weights = positive_conjunction.lora_weights
lora_conditions = None
if model.peft_manager:
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
if not should_use_lora_manager:
model.peft_manager.set_loras(lora_weights)
if model.lora_manager and should_use_lora_manager:
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
@@ -93,12 +97,12 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
Union[FlattenedPrompt, Blend], FlattenedPrompt):
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_prompt: FlattenedPrompt|Blend
positive_conjunction: Conjunction
if legacy_blend is not None:
positive_prompt = legacy_blend
positive_conjunction = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
positive_prompt = positive_conjunction.prompts[0]
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
@@ -217,18 +221,26 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
print(f'{discarded}\x1b[0m')
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]:
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Conjunction]:
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
if len(weighted_subprompts) <= 1:
return None
strings = [x[0] for x in weighted_subprompts]
weights = [x[1] for x in weighted_subprompts]
pp = PromptParser()
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
flattened_prompts = []
weights = []
loras = []
for i, x in enumerate(parsed_conjunctions):
if len(x.prompts)>0:
flattened_prompts.append(x.prompts[0])
weights.append(weighted_subprompts[i][1])
if len(x.lora_weights)>0:
loras.extend(x.lora_weights)
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)],
lora_weights = loras)
def split_weighted_subprompts(text, skip_normalize=False)->list:

View File

@@ -4,14 +4,13 @@ pip install <path_to_git_source>.
'''
import os
import platform
import psutil
import requests
from rich import box, print
from rich.console import Console, Group, group
from rich.console import Console, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.style import Style
from rich.syntax import Syntax
from rich.text import Text
from ldm.invoke import __version__
@@ -32,6 +31,19 @@ else:
def get_versions()->dict:
return requests.get(url=INVOKE_AI_REL).json()
def invokeai_is_running()->bool:
for p in psutil.process_iter():
try:
cmdline = p.cmdline()
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
if matches:
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
return True
except psutil.AccessDenied:
continue
return False
def welcome(versions: dict):
@group()
@@ -62,6 +74,10 @@ def welcome(versions: dict):
def main():
versions = get_versions()
if invokeai_is_running():
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
return
welcome(versions)
tag = None

View File

@@ -100,8 +100,8 @@ def expand_prompts(
for command in commands:
sequence += 1
format = _get_fn_format(outdir, sequence)
parent_conn.send(
command + f' --fnformat="{format}"'
parent_conn.send_bytes(
(command + f' --fnformat="{format}"').encode('utf-8')
)
parent_conn.close()
else:
@@ -116,7 +116,10 @@ def _get_fn_format(directory:str, sequence:int)->str:
Get a filename that doesn't exceed filename length restrictions
on the current platform.
"""
max_length = os.pathconf(directory,'PC_NAME_MAX')
try:
max_length = os.pathconf(directory,'PC_NAME_MAX')
except:
max_length = 255
prefix = f'dp.{sequence:04}.'
suffix = '.png'
max_length -= len(prefix)+len(suffix)
@@ -130,7 +133,7 @@ class MessageToStdin(object):
def readline(self) -> str:
try:
if len(self.linebuffer) == 0:
message = self.connection.recv()
message = self.connection.recv_bytes().decode('utf-8')
self.linebuffer = message.split("\n")
result = self.linebuffer.pop(0)
return result

View File

@@ -339,12 +339,14 @@ class KohyaLoraManager:
return lora
def apply_lora_model(self, name, mult: float = 1.0):
path_file = None
for suffix in ["ckpt", "safetensors", "pt"]:
path_file = Path(self.lora_path, f"{name}.{suffix}")
if path_file.exists():
path_files = [x for x in Path(self.lora_path).glob(f"**/{name}.{suffix}")]
if len(path_files):
path_file = path_files[0]
print(f" | Loading lora {path_file.name} with weight {mult}")
break
if not path_file.exists():
if not path_file:
print(f" ** Unable to find lora: {name}")
return

View File

@@ -34,7 +34,7 @@ dependencies = [
"clip_anytorch",
"compel~=1.1.0",
"datasets",
"diffusers[torch]~=0.14",
"diffusers[torch]==0.14",
"dnspython==2.2.1",
"einops",
"eventlet",