mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-18 07:27:57 -05:00
Compare commits
13 Commits
v2.3.4
...
v2.3.4.pos
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d81584c8fd | ||
|
|
1183bf96ed | ||
|
|
018d5dab53 | ||
|
|
96a5de30e3 | ||
|
|
4d62d5b802 | ||
|
|
17de5c7008 | ||
|
|
f95403dcda | ||
|
|
e54d060d17 | ||
|
|
a01f1d4940 | ||
|
|
1873817ac9 | ||
|
|
31333a736c | ||
|
|
03274b6da6 | ||
|
|
0646649c05 |
@@ -1 +1 @@
|
||||
__version__='2.3.4'
|
||||
__version__='2.3.4.post1'
|
||||
|
||||
@@ -12,7 +12,8 @@ from typing import Union, Optional, Any
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from compel import Compel
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
|
||||
Conjunction
|
||||
from .devices import torch_dtype
|
||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from ldm.invoke.globals import Globals
|
||||
@@ -55,22 +56,25 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
|
||||
# get rid of any newline characters
|
||||
prompt_string = prompt_string.replace("\n", " ")
|
||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||
|
||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||
positive_prompt: FlattenedPrompt|Blend
|
||||
lora_conditions = None
|
||||
positive_conjunction: Conjunction
|
||||
if legacy_blend is not None:
|
||||
positive_prompt = legacy_blend
|
||||
positive_conjunction = legacy_blend
|
||||
else:
|
||||
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
should_use_lora_manager = True
|
||||
lora_weights = positive_conjunction.lora_weights
|
||||
if model.peft_manager:
|
||||
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
|
||||
if not should_use_lora_manager:
|
||||
model.peft_manager.set_loras(lora_weights)
|
||||
if model.lora_manager and should_use_lora_manager:
|
||||
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
|
||||
should_use_lora_manager = True
|
||||
lora_weights = positive_conjunction.lora_weights
|
||||
lora_conditions = None
|
||||
if model.peft_manager:
|
||||
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
|
||||
if not should_use_lora_manager:
|
||||
model.peft_manager.set_loras(lora_weights)
|
||||
if model.lora_manager and should_use_lora_manager:
|
||||
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
|
||||
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
|
||||
|
||||
@@ -93,12 +97,12 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
|
||||
Union[FlattenedPrompt, Blend], FlattenedPrompt):
|
||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||
positive_prompt: FlattenedPrompt|Blend
|
||||
positive_conjunction: Conjunction
|
||||
if legacy_blend is not None:
|
||||
positive_prompt = legacy_blend
|
||||
positive_conjunction = legacy_blend
|
||||
else:
|
||||
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
|
||||
|
||||
@@ -217,18 +221,26 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
|
||||
print(f'{discarded}\x1b[0m')
|
||||
|
||||
|
||||
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]:
|
||||
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Conjunction]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
|
||||
if len(weighted_subprompts) <= 1:
|
||||
return None
|
||||
strings = [x[0] for x in weighted_subprompts]
|
||||
weights = [x[1] for x in weighted_subprompts]
|
||||
|
||||
pp = PromptParser()
|
||||
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
|
||||
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
|
||||
flattened_prompts = []
|
||||
weights = []
|
||||
loras = []
|
||||
for i, x in enumerate(parsed_conjunctions):
|
||||
if len(x.prompts)>0:
|
||||
flattened_prompts.append(x.prompts[0])
|
||||
weights.append(weighted_subprompts[i][1])
|
||||
if len(x.lora_weights)>0:
|
||||
loras.extend(x.lora_weights)
|
||||
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
|
||||
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)],
|
||||
lora_weights = loras)
|
||||
|
||||
|
||||
def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||
|
||||
@@ -4,14 +4,13 @@ pip install <path_to_git_source>.
|
||||
'''
|
||||
import os
|
||||
import platform
|
||||
import psutil
|
||||
import requests
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
from rich.console import Console, group
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt
|
||||
from rich.style import Style
|
||||
from rich.syntax import Syntax
|
||||
from rich.text import Text
|
||||
|
||||
from ldm.invoke import __version__
|
||||
|
||||
@@ -32,6 +31,19 @@ else:
|
||||
def get_versions()->dict:
|
||||
return requests.get(url=INVOKE_AI_REL).json()
|
||||
|
||||
def invokeai_is_running()->bool:
|
||||
for p in psutil.process_iter():
|
||||
try:
|
||||
cmdline = p.cmdline()
|
||||
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
|
||||
if matches:
|
||||
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
|
||||
return True
|
||||
except psutil.AccessDenied:
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def welcome(versions: dict):
|
||||
|
||||
@group()
|
||||
@@ -62,6 +74,10 @@ def welcome(versions: dict):
|
||||
|
||||
def main():
|
||||
versions = get_versions()
|
||||
if invokeai_is_running():
|
||||
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
|
||||
return
|
||||
|
||||
welcome(versions)
|
||||
|
||||
tag = None
|
||||
|
||||
@@ -100,8 +100,8 @@ def expand_prompts(
|
||||
for command in commands:
|
||||
sequence += 1
|
||||
format = _get_fn_format(outdir, sequence)
|
||||
parent_conn.send(
|
||||
command + f' --fnformat="{format}"'
|
||||
parent_conn.send_bytes(
|
||||
(command + f' --fnformat="{format}"').encode('utf-8')
|
||||
)
|
||||
parent_conn.close()
|
||||
else:
|
||||
@@ -116,7 +116,10 @@ def _get_fn_format(directory:str, sequence:int)->str:
|
||||
Get a filename that doesn't exceed filename length restrictions
|
||||
on the current platform.
|
||||
"""
|
||||
max_length = os.pathconf(directory,'PC_NAME_MAX')
|
||||
try:
|
||||
max_length = os.pathconf(directory,'PC_NAME_MAX')
|
||||
except:
|
||||
max_length = 255
|
||||
prefix = f'dp.{sequence:04}.'
|
||||
suffix = '.png'
|
||||
max_length -= len(prefix)+len(suffix)
|
||||
@@ -130,7 +133,7 @@ class MessageToStdin(object):
|
||||
def readline(self) -> str:
|
||||
try:
|
||||
if len(self.linebuffer) == 0:
|
||||
message = self.connection.recv()
|
||||
message = self.connection.recv_bytes().decode('utf-8')
|
||||
self.linebuffer = message.split("\n")
|
||||
result = self.linebuffer.pop(0)
|
||||
return result
|
||||
|
||||
@@ -339,12 +339,14 @@ class KohyaLoraManager:
|
||||
return lora
|
||||
|
||||
def apply_lora_model(self, name, mult: float = 1.0):
|
||||
path_file = None
|
||||
for suffix in ["ckpt", "safetensors", "pt"]:
|
||||
path_file = Path(self.lora_path, f"{name}.{suffix}")
|
||||
if path_file.exists():
|
||||
path_files = [x for x in Path(self.lora_path).glob(f"**/{name}.{suffix}")]
|
||||
if len(path_files):
|
||||
path_file = path_files[0]
|
||||
print(f" | Loading lora {path_file.name} with weight {mult}")
|
||||
break
|
||||
if not path_file.exists():
|
||||
if not path_file:
|
||||
print(f" ** Unable to find lora: {name}")
|
||||
return
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ dependencies = [
|
||||
"clip_anytorch",
|
||||
"compel~=1.1.0",
|
||||
"datasets",
|
||||
"diffusers[torch]~=0.14",
|
||||
"diffusers[torch]==0.14",
|
||||
"dnspython==2.2.1",
|
||||
"einops",
|
||||
"eventlet",
|
||||
|
||||
Reference in New Issue
Block a user