Compare commits

...

8 Commits

Author SHA1 Message Date
Lincoln Stein
5beeb1a897 hotfix for broken merge function 2023-02-24 15:00:22 -05:00
Lincoln Stein
de6304b729 fixes crashes on merge in both WebUI and console (#2800)
- an inadvertent change to the model manager broke the merging functions
- corrected here - will be a hotfix
2023-02-24 14:58:06 -05:00
Lincoln Stein
d0be79c33d fixes crashes on merge in both WebUI and console
- an inadvertent change to the model manager broke the merging functions
- corrected here - will be a hotfix
2023-02-24 14:54:23 -05:00
Lincoln Stein
b4ed8bc47a Merge branch 'main' into v2.3 2023-02-24 10:52:03 -05:00
Matthias Wild
11a29fdc4d fix python 3.9 compatibility (#2780)
without this change, the project can be installed on 3.9 but not used
this also fixes the container images

Maybe we should re-enable Python 3.9 checks which would have prevented
this.
2023-02-24 00:49:25 +01:00
Matthias Wild
a7c2333312 Merge branch 'main' into fix/py39-compatibility 2023-02-23 23:53:38 +01:00
Lincoln Stein
ad6ea02c9c Update main with V2.3 fixes (#2774)
Until the nodes merge happens, we can continue to merge bugfixes from
the 2.3 branch into `main`. This will bring main into sync with
`v2.3.1+rc3`
2023-02-23 17:38:16 -05:00
mauwii
1a6ed85d99 fix typeing to be compatible with python 3.9
without this, the project can be installed on 3.9 but not used
this also fixes the container images
2023-02-23 23:27:16 +01:00
3 changed files with 7 additions and 7 deletions

View File

@@ -1 +1 @@
__version__='2.3.1'
__version__='2.3.1p1'

View File

@@ -88,7 +88,7 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
return positive_prompt, negative_prompt
def get_max_token_count(tokenizer, prompt: FlattenedPrompt|Blend, truncate_if_too_long=True) -> int:
def get_max_token_count(tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=True) -> int:
if type(prompt) is Blend:
blend: Blend = prompt
return max([get_max_token_count(tokenizer, c, truncate_if_too_long) for c in blend.prompts])
@@ -129,8 +129,8 @@ def split_prompt_to_positive_and_negative(prompt_string_uncleaned):
return prompt_string_cleaned, unconditioned_words
def log_tokenization(positive_prompt: Blend | FlattenedPrompt,
negative_prompt: Blend | FlattenedPrompt,
def log_tokenization(positive_prompt: Union[Blend, FlattenedPrompt],
negative_prompt: Union[Blend, FlattenedPrompt],
tokenizer):
print(f"\n>> [TOKENLOG] Parsed Prompt: {positive_prompt}")
print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {negative_prompt}")
@@ -139,7 +139,7 @@ def log_tokenization(positive_prompt: Blend | FlattenedPrompt,
log_tokenization_for_prompt_object(negative_prompt, tokenizer, display_label_prefix="(negative prompt)")
def log_tokenization_for_prompt_object(p: Blend | FlattenedPrompt, tokenizer, display_label_prefix=None):
def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None):
display_label_prefix = display_label_prefix or ""
if type(p) is Blend:
blend: Blend = p

View File

@@ -624,7 +624,7 @@ class ModelManager(object):
self,
repo_or_path: Union[str, Path],
model_name: str = None,
model_description: str = None,
description: str = None,
vae: dict = None,
commit_to_conf: Path = None,
) -> bool:
@@ -640,7 +640,7 @@ class ModelManager(object):
models.yaml file.
"""
model_name = model_name or Path(repo_or_path).stem
model_description = model_description or f"Imported diffusers model {model_name}"
model_description = description or f"Imported diffusers model {model_name}"
new_config = dict(
description=model_description,
vae=vae,