mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
Merge branch 'main' into fix/expected-torch-device
This commit is contained in:
@@ -2,3 +2,12 @@ from ._version import __version__
|
||||
|
||||
__app_id__= 'invoke-ai/InvokeAI'
|
||||
__app_name__= 'InvokeAI'
|
||||
|
||||
|
||||
def _ignore_xformers_triton_message_on_windows():
|
||||
import logging
|
||||
logging.getLogger("xformers").addFilter(
|
||||
lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||
|
||||
# In order to be effective, this needs to happen before anything could possibly import xformers.
|
||||
_ignore_xformers_triton_message_on_windows()
|
||||
|
||||
@@ -93,7 +93,7 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p
|
||||
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
|
||||
"""
|
||||
|
||||
if log_tokens or Globals.log_tokenization:
|
||||
if log_tokens or getattr(Globals, "log_tokenization", False):
|
||||
print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}")
|
||||
print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}")
|
||||
|
||||
@@ -236,7 +236,7 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm
|
||||
fragments = [x.text for x in flattened_prompt.children]
|
||||
weights = [x.weight for x in flattened_prompt.children]
|
||||
embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights])
|
||||
if log_tokens or Globals.log_tokenization:
|
||||
if log_tokens or getattr(Globals, "log_tokenization", False):
|
||||
text = " ".join(fragments)
|
||||
log_tokenization(text, model, display_label=log_display_label)
|
||||
|
||||
@@ -296,4 +296,4 @@ def log_tokenization(text, model, display_label=None):
|
||||
|
||||
if discarded != "":
|
||||
print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):')
|
||||
print(f'{discarded}\x1b[0m')
|
||||
print(f'{discarded}\x1b[0m')
|
||||
|
||||
@@ -54,6 +54,9 @@ Globals.full_precision = False
|
||||
# whether we should convert ckpt files into diffusers models on the fly
|
||||
Globals.ckpt_convert = False
|
||||
|
||||
# logging tokenization everywhere
|
||||
Globals.log_tokenization = False
|
||||
|
||||
def global_config_file()->Path:
|
||||
return Path(Globals.root, Globals.config_dir, Globals.models_file)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user