Compare commits

..

7 Commits

Author SHA1 Message Date
Lincoln Stein
dbd2161601 Path->str in call to expand_prompts 2023-04-15 15:19:07 -04:00
Lincoln Stein
1f83ac2eae add root argument 2023-04-15 15:08:44 -04:00
Lincoln Stein
f7bb68d01c more debugging statements 2023-04-15 14:56:47 -04:00
Lincoln Stein
8cddf9c5b3 added lots of debug statements 2023-04-12 22:53:47 -04:00
Lincoln Stein
9b546ccf06 comment out suspected bug 2023-04-12 20:48:23 -04:00
Lincoln Stein
73dbf73a95 dont capture stdout & stderr; print to console 2023-04-12 07:07:34 -04:00
Lincoln Stein
18a1f3893f insert dummy function instead of invokeai 2023-04-11 22:38:51 -04:00
22 changed files with 402 additions and 426 deletions

34
.github/CODEOWNERS vendored
View File

@@ -1,13 +1,13 @@
# continuous integration # continuous integration
/.github/workflows/ @lstein @blessedcoolant /.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation # documentation
/docs/ @lstein @blessedcoolant /docs/ @lstein @mauwii @blessedcoolant
mkdocs.yml @lstein @ebr mkdocs.yml @mauwii @lstein
# installation and configuration # installation and configuration
/pyproject.toml @lstein @ebr /pyproject.toml @mauwii @lstein @ebr
/docker/ @lstein /docker/ @mauwii
/scripts/ @ebr @lstein @blessedcoolant /scripts/ @ebr @lstein @blessedcoolant
/installer/ @ebr @lstein /installer/ @ebr @lstein
ldm/invoke/config @lstein @ebr ldm/invoke/config @lstein @ebr
@@ -21,13 +21,13 @@ invokeai/configs @lstein @ebr @blessedcoolant
# generation and model management # generation and model management
/ldm/*.py @lstein @blessedcoolant /ldm/*.py @lstein @blessedcoolant
/ldm/generate.py @lstein @gregghelt2 /ldm/generate.py @lstein @keturn
/ldm/invoke/args.py @lstein @blessedcoolant /ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein @blessedcoolant /ldm/invoke/ckpt* @lstein @blessedcoolant
/ldm/invoke/ckpt_generator @lstein @blessedcoolant /ldm/invoke/ckpt_generator @lstein @blessedcoolant
/ldm/invoke/CLI.py @lstein @blessedcoolant /ldm/invoke/CLI.py @lstein @blessedcoolant
/ldm/invoke/config @lstein @ebr @blessedcoolant /ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
/ldm/invoke/generator @gregghelt2 @damian0815 /ldm/invoke/generator @keturn @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant /ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant /ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
/ldm/invoke/model_manager.py @lstein @blessedcoolant /ldm/invoke/model_manager.py @lstein @blessedcoolant
@@ -36,17 +36,17 @@ invokeai/configs @lstein @ebr @blessedcoolant
/ldm/invoke/restoration @lstein @blessedcoolant /ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration # attention, textual inversion, model configuration
/ldm/models @damian0815 @gregghelt2 @blessedcoolant /ldm/models @damian0815 @keturn @blessedcoolant
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant /ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
/ldm/modules/attention.py @damian0815 @gregghelt2 /ldm/modules/attention.py @damian0815 @keturn
/ldm/modules/diffusionmodules @damian0815 @gregghelt2 /ldm/modules/diffusionmodules @damian0815 @keturn
/ldm/modules/distributions @damian0815 @gregghelt2 /ldm/modules/distributions @damian0815 @keturn
/ldm/modules/ema.py @damian0815 @gregghelt2 /ldm/modules/ema.py @damian0815 @keturn
/ldm/modules/embedding_manager.py @lstein /ldm/modules/embedding_manager.py @lstein
/ldm/modules/encoders @damian0815 @gregghelt2 /ldm/modules/encoders @damian0815 @keturn
/ldm/modules/image_degradation @damian0815 @gregghelt2 /ldm/modules/image_degradation @damian0815 @keturn
/ldm/modules/losses @damian0815 @gregghelt2 /ldm/modules/losses @damian0815 @keturn
/ldm/modules/x_transformer.py @damian0815 @gregghelt2 /ldm/modules/x_transformer.py @damian0815 @keturn
# Nodes # Nodes
apps/ @Kyle0654 @jpphoto apps/ @Kyle0654 @jpphoto

View File

@@ -132,13 +132,12 @@ class Installer:
# Prefer to copy python executables # Prefer to copy python executables
# so that updates to system python don't break InvokeAI # so that updates to system python don't break InvokeAI
if not venv_dir.exists(): try:
try: venv.create(venv_dir, with_pip=True)
venv.create(venv_dir, with_pip=True) # If installing over an existing environment previously created with symlinks,
# If installing over an existing environment previously created with symlinks, # the executables will fail to copy. Keep symlinks in that case
# the executables will fail to copy. Keep symlinks in that case except shutil.SameFileError:
except shutil.SameFileError: venv.create(venv_dir, with_pip=True, symlinks=True)
venv.create(venv_dir, with_pip=True, symlinks=True)
# upgrade pip in Python 3.9 environments # upgrade pip in Python 3.9 environments
if int(platform.python_version_tuple()[1]) == 9: if int(platform.python_version_tuple()[1]) == 9:

View File

@@ -30,6 +30,7 @@ from ldm.invoke.conditioning import (
get_tokens_for_prompt_object, get_tokens_for_prompt_object,
get_prompt_structure, get_prompt_structure,
split_weighted_subprompts, split_weighted_subprompts,
get_tokenizer,
) )
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
from ldm.invoke.generator.inpaint import infill_methods from ldm.invoke.generator.inpaint import infill_methods
@@ -1313,7 +1314,7 @@ class InvokeAIWebServer:
None None
if type(parsed_prompt) is Blend if type(parsed_prompt) is Blend
else get_tokens_for_prompt_object( else get_tokens_for_prompt_object(
self.generate.model.tokenizer, parsed_prompt get_tokenizer(self.generate.model), parsed_prompt
) )
) )
attention_maps_image_base64_url = ( attention_maps_image_base64_url = (

View File

@@ -4,6 +4,7 @@ import shlex
import sys import sys
import traceback import traceback
from argparse import Namespace from argparse import Namespace
from packaging import version
from pathlib import Path from pathlib import Path
from typing import Union from typing import Union
@@ -16,6 +17,8 @@ if sys.platform == "darwin":
import pyparsing # type: ignore import pyparsing # type: ignore
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
import ldm.invoke import ldm.invoke
from ..generate import Generate from ..generate import Generate
@@ -25,17 +28,26 @@ from .generator.diffusers_pipeline import PipelineIntermediateState
from .globals import Globals, global_config_dir from .globals import Globals, global_config_dir
from .image_util import make_grid from .image_util import make_grid
from .log import write_log from .log import write_log
from .model_manager import ModelManager
from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .pngwriter import PngWriter, retrieve_metadata, write_metadata
from .readline import Completer, get_completer from .readline import Completer, get_completer
from ..util import url_attachment_name from ..util import url_attachment_name
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
# global used in multiple functions (fix) # global used in multiple functions (fix)
infile = None infile = None
def main(): def main():
"""Initialize command-line parsers and the diffusion model""" """Initialize command-line parsers and the diffusion model"""
global infile global infile
print('DEBUG: [3] Entered main()', file=sys.stderr)
print('DEBUG: INVOKEAI ENVIRONMENT:')
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
opt = Args() opt = Args()
args = opt.parse_args() args = opt.parse_args()
if not args: if not args:
@@ -64,6 +76,13 @@ def main():
Globals.sequential_guidance = args.sequential_guidance Globals.sequential_guidance = args.sequential_guidance
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
# run any post-install patches needed
run_patches()
print(f'DEBUG: [5] Patches run', file=sys.stderr)
print(f">> Internet connectivity is {Globals.internet_available}") print(f">> Internet connectivity is {Globals.internet_available}")
if not args.conf: if not args.conf:
@@ -79,8 +98,9 @@ def main():
# loading here to avoid long delays on startup # loading here to avoid long delays on startup
# these two lines prevent a horrible warning message from appearing # these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported # when the frozen CLIP tokenizer is imported
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
import transformers # type: ignore import transformers # type: ignore
from ldm.generate import Generate from ldm.generate import Generate
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
@@ -88,6 +108,7 @@ def main():
diffusers.logging.set_verbosity_error() diffusers.logging.set_verbosity_error()
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
# Loading Face Restoration and ESRGAN Modules # Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = load_face_restoration(opt) gfpgan, codeformer, esrgan = load_face_restoration(opt)
@@ -108,6 +129,10 @@ def main():
if opt.lora_path: if opt.lora_path:
Globals.lora_models_dir = opt.lora_path Globals.lora_models_dir = opt.lora_path
# migrate legacy models
print(f'DEBUG: [8] migrating models', file=sys.stderr)
ModelManager.migrate_models()
# load the infile as a list of lines # load the infile as a list of lines
if opt.infile: if opt.infile:
try: try:
@@ -123,6 +148,7 @@ def main():
model = opt.model or retrieve_last_used_model() model = opt.model or retrieve_last_used_model()
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
# creating a Generate object: # creating a Generate object:
try: try:
gen = Generate( gen = Generate(
@@ -149,6 +175,7 @@ def main():
print(">> changed to seamless tiling mode") print(">> changed to seamless tiling mode")
# preload the model # preload the model
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
try: try:
gen.load_model() gen.load_model()
except KeyError: except KeyError:
@@ -196,6 +223,7 @@ def main():
# TODO: main_loop() has gotten busy. Needs to be refactored. # TODO: main_loop() has gotten busy. Needs to be refactored.
def main_loop(gen, opt, completer): def main_loop(gen, opt, completer):
"""prompt/read/execute loop""" """prompt/read/execute loop"""
print(f'DEBUG: [11] In main loop', file=sys.stderr)
global infile global infile
done = False done = False
doneAfterInFile = infile is not None doneAfterInFile = infile is not None
@@ -1291,6 +1319,63 @@ def retrieve_last_used_model()->str:
with open(model_file_path,'r') as f: with open(model_file_path,'r') as f:
return f.readline() return f.readline()
# This routine performs any patch-ups needed after installation
def run_patches():
install_missing_config_files()
version_file = Path(Globals.root,'.version')
if version_file.exists():
with open(version_file,'r') as f:
root_version = version.parse(f.readline() or 'v2.3.2')
else:
root_version = version.parse('v2.3.2')
app_version = version.parse(ldm.invoke.__version__)
if root_version < app_version:
try:
do_version_update(root_version, ldm.invoke.__version__)
with open(version_file,'w') as f:
f.write(ldm.invoke.__version__)
except:
print("** Update failed. Will try again on next launch")
def install_missing_config_files():
"""
install ckpt configuration files that may have been added to the
distro after original root directory configuration
"""
pass
# import invokeai.configs as conf
# from shutil import copyfile
# root_configs = Path(global_config_dir(), 'stable-diffusion')
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
# for src in repo_configs.iterdir():
# dest = root_configs / src.name
# if not dest.exists():
# copyfile(src,dest)
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
"""
Make any updates to the launcher .sh and .bat scripts that may be needed
from release to release. This is not an elegant solution. Instead, the
launcher should be moved into the source tree and installed using pip.
"""
if root_version < version.Version('v2.3.4'):
dest = Path(Globals.root,'loras')
dest.mkdir(exist_ok=True)
if root_version < version.Version('v2.3.3'):
if sys.platform == "linux":
print('>> Downloading new version of launcher script and its config file')
from ldm.util import download_with_progress_bar
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
dest = Path(Globals.root,'invoke.sh.in')
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
dest.replace(Path(Globals.root,'invoke.sh'))
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
dest = Path(Globals.root,'dialogrc')
assert download_with_progress_bar(url_base+'dialogrc',dest)
dest.replace(Path(Globals.root,'.dialogrc'))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@@ -1 +1 @@
__version__='2.3.4.post1' __version__='2.3.4'

View File

@@ -15,10 +15,19 @@ from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \ from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
Conjunction Conjunction
from .devices import torch_dtype from .devices import torch_dtype
from .generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals
def get_tokenizer(model) -> CLIPTokenizer:
# TODO remove legacy ckpt fallback handling
return (getattr(model, 'tokenizer', None) # diffusers
or model.cond_stage_model.tokenizer) # ldm
def get_text_encoder(model) -> Any:
# TODO remove legacy ckpt fallback handling
return (getattr(model, 'text_encoder', None) # diffusers
or UnsqueezingLDMTransformer(model.cond_stage_model.transformer)) # ldm
class UnsqueezingLDMTransformer: class UnsqueezingLDMTransformer:
def __init__(self, ldm_transformer): def __init__(self, ldm_transformer):
self.ldm_transformer = ldm_transformer self.ldm_transformer = ldm_transformer
@@ -32,15 +41,15 @@ class UnsqueezingLDMTransformer:
return insufficiently_unsqueezed_tensor.unsqueeze(0) return insufficiently_unsqueezed_tensor.unsqueeze(0)
def get_uc_and_c_and_ec(prompt_string, def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
model: StableDiffusionGeneratorPipeline,
log_tokens=False, skip_normalize_legacy_blend=False):
# lazy-load any deferred textual inversions. # lazy-load any deferred textual inversions.
# this might take a couple of seconds the first time a textual inversion is used. # this might take a couple of seconds the first time a textual inversion is used.
model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string) model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string)
compel = Compel(tokenizer=model.tokenizer, tokenizer = get_tokenizer(model)
text_encoder=model.text_encoder, text_encoder = get_text_encoder(model)
compel = Compel(tokenizer=tokenizer,
text_encoder=text_encoder,
textual_inversion_manager=model.textual_inversion_manager, textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype) dtype_for_device_getter=torch_dtype)
@@ -69,20 +78,14 @@ def get_uc_and_c_and_ec(prompt_string,
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string) negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0] negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
tokens_count = get_max_token_count(model.tokenizer, positive_prompt)
if log_tokens or getattr(Globals, "log_tokenization", False): if log_tokens or getattr(Globals, "log_tokenization", False):
log_tokenization(positive_prompt, negative_prompt, tokenizer=model.tokenizer) log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
# some LoRA models also mess with the text encoder, so they must be active while compel builds conditioning tensors c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
lora_conditioning_ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count, uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
lora_conditions=lora_conditions)
with InvokeAIDiffuserComponent.custom_attention_context(model.unet, tokens_count = get_max_token_count(tokenizer, positive_prompt)
extra_conditioning_info=lora_conditioning_ec,
step_count=-1):
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
# now build the "real" ec
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count, ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
cross_attention_control_args=options.get( cross_attention_control_args=options.get(
'cross_attention_control', None), 'cross_attention_control', None),

View File

@@ -21,6 +21,7 @@ from urllib import request
from shutil import get_terminal_size from shutil import get_terminal_size
import npyscreen import npyscreen
import torch
import transformers import transformers
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from huggingface_hub import HfFolder from huggingface_hub import HfFolder
@@ -663,19 +664,8 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
configs_src = Path(configs.__path__[0]) configs_src = Path(configs.__path__[0])
configs_dest = Path(root) / "configs" configs_dest = Path(root) / "configs"
if not os.path.samefile(configs_src, configs_dest): if not os.path.samefile(configs_src, configs_dest):
shutil.copytree(configs_src, shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
configs_dest,
dirs_exist_ok=True,
copy_function=shutil.copyfile,
)
# Fix up directory permissions so that they are writable
# This can happen when running under Nix environment which
# makes the runtime directory template immutable.
for root,dirs,files in os.walk(os.path.join(root,name)):
for d in dirs:
Path(root,d).chmod(0o775)
for f in files:
Path(root,d).chmod(0o644)
# ------------------------------------- # -------------------------------------
def run_console_ui( def run_console_ui(

View File

@@ -42,18 +42,8 @@ def invokeai_is_running()->bool:
except psutil.AccessDenied: except psutil.AccessDenied:
continue continue
return False return False
def do_post_install():
'''
Run postinstallation script.
'''
print("Looking for postinstallation script to run on this version...")
try:
from ldm.invoke.config.post_install.py import post_install
post_install()
except:
print("Postinstallation script not available for this version of InvokeAI")
def welcome(versions: dict): def welcome(versions: dict):
@group() @group()
@@ -117,7 +107,6 @@ def main():
print(f':heavy_check_mark: Upgrade successful') print(f':heavy_check_mark: Upgrade successful')
else: else:
print(f':exclamation: [bold red]Upgrade failed[/red bold]') print(f':exclamation: [bold red]Upgrade failed[/red bold]')
do_post_install()
if __name__ == "__main__": if __name__ == "__main__":
try: try:

View File

@@ -196,6 +196,16 @@ class addModelsForm(npyscreen.FormMultiPage):
scroll_exit=True, scroll_exit=True,
) )
self.nextrely += 1 self.nextrely += 1
self.convert_models = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="== CONVERT IMPORTED MODELS INTO DIFFUSERS==",
values=["Keep original format", "Convert to diffusers"],
value=0,
begin_entry_at=4,
max_height=4,
hidden=True, # will appear when imported models box is edited
scroll_exit=True,
)
self.cancel = self.add_widget_intelligent( self.cancel = self.add_widget_intelligent(
npyscreen.ButtonPress, npyscreen.ButtonPress,
name="CANCEL", name="CANCEL",
@@ -230,6 +240,8 @@ class addModelsForm(npyscreen.FormMultiPage):
self.show_directory_fields.addVisibleWhenSelected(i) self.show_directory_fields.addVisibleWhenSelected(i)
self.show_directory_fields.when_value_edited = self._clear_scan_directory self.show_directory_fields.when_value_edited = self._clear_scan_directory
self.import_model_paths.when_value_edited = self._show_hide_convert
self.autoload_directory.when_value_edited = self._show_hide_convert
def resize(self): def resize(self):
super().resize() super().resize()
@@ -240,6 +252,13 @@ class addModelsForm(npyscreen.FormMultiPage):
if not self.show_directory_fields.value: if not self.show_directory_fields.value:
self.autoload_directory.value = "" self.autoload_directory.value = ""
def _show_hide_convert(self):
model_paths = self.import_model_paths.value or ""
autoload_directory = self.autoload_directory.value or ""
self.convert_models.hidden = (
len(model_paths) == 0 and len(autoload_directory) == 0
)
def _get_starter_model_labels(self) -> List[str]: def _get_starter_model_labels(self) -> List[str]:
window_width, window_height = get_terminal_size() window_width, window_height = get_terminal_size()
label_width = 25 label_width = 25
@@ -299,6 +318,7 @@ class addModelsForm(npyscreen.FormMultiPage):
.scan_directory: Path to a directory of models to scan and import .scan_directory: Path to a directory of models to scan and import
.autoscan_on_startup: True if invokeai should scan and import at startup time .autoscan_on_startup: True if invokeai should scan and import at startup time
.import_model_paths: list of URLs, repo_ids and file paths to import .import_model_paths: list of URLs, repo_ids and file paths to import
.convert_to_diffusers: if True, convert legacy checkpoints into diffusers
""" """
# we're using a global here rather than storing the result in the parentapp # we're using a global here rather than storing the result in the parentapp
# due to some bug in npyscreen that is causing attributes to be lost # due to some bug in npyscreen that is causing attributes to be lost
@@ -334,6 +354,7 @@ class addModelsForm(npyscreen.FormMultiPage):
# URLs and the like # URLs and the like
selections.import_model_paths = self.import_model_paths.value.split() selections.import_model_paths = self.import_model_paths.value.split()
selections.convert_to_diffusers = self.convert_models.value[0] == 1
class AddModelApplication(npyscreen.NPSAppManaged): class AddModelApplication(npyscreen.NPSAppManaged):
@@ -346,6 +367,7 @@ class AddModelApplication(npyscreen.NPSAppManaged):
scan_directory=None, scan_directory=None,
autoscan_on_startup=None, autoscan_on_startup=None,
import_model_paths=None, import_model_paths=None,
convert_to_diffusers=None,
) )
def onStart(self): def onStart(self):
@@ -365,6 +387,7 @@ def process_and_execute(opt: Namespace, selections: Namespace):
directory_to_scan = selections.scan_directory directory_to_scan = selections.scan_directory
scan_at_startup = selections.autoscan_on_startup scan_at_startup = selections.autoscan_on_startup
potential_models_to_install = selections.import_model_paths potential_models_to_install = selections.import_model_paths
convert_to_diffusers = selections.convert_to_diffusers
install_requested_models( install_requested_models(
install_initial_models=models_to_install, install_initial_models=models_to_install,
@@ -372,6 +395,7 @@ def process_and_execute(opt: Namespace, selections: Namespace):
scan_directory=Path(directory_to_scan) if directory_to_scan else None, scan_directory=Path(directory_to_scan) if directory_to_scan else None,
external_models=potential_models_to_install, external_models=potential_models_to_install,
scan_at_startup=scan_at_startup, scan_at_startup=scan_at_startup,
convert_to_diffusers=convert_to_diffusers,
precision="float32" precision="float32"
if opt.full_precision if opt.full_precision
else choose_precision(torch.device(choose_torch_device())), else choose_precision(torch.device(choose_torch_device())),

View File

@@ -68,6 +68,7 @@ def install_requested_models(
scan_directory: Path = None, scan_directory: Path = None,
external_models: List[str] = None, external_models: List[str] = None,
scan_at_startup: bool = False, scan_at_startup: bool = False,
convert_to_diffusers: bool = False,
precision: str = "float16", precision: str = "float16",
purge_deleted: bool = False, purge_deleted: bool = False,
config_file_path: Path = None, config_file_path: Path = None,
@@ -110,20 +111,20 @@ def install_requested_models(
if len(external_models)>0: if len(external_models)>0:
print("== INSTALLING EXTERNAL MODELS ==") print("== INSTALLING EXTERNAL MODELS ==")
for path_url_or_repo in external_models: for path_url_or_repo in external_models:
print(f'DEBUG: path_url_or_repo = {path_url_or_repo}')
try: try:
model_manager.heuristic_import( model_manager.heuristic_import(
path_url_or_repo, path_url_or_repo,
convert=convert_to_diffusers,
config_file_callback=_pick_configuration_file, config_file_callback=_pick_configuration_file,
commit_to_conf=config_file_path commit_to_conf=config_file_path
) )
except KeyboardInterrupt: except KeyboardInterrupt:
sys.exit(-1) sys.exit(-1)
except Exception as e: except Exception:
print(f'An exception has occurred: {str(e)}') pass
if scan_at_startup and scan_directory.is_dir(): if scan_at_startup and scan_directory.is_dir():
argument = '--autoconvert' argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
initfile = Path(Globals.root, Globals.initfile) initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f'{Globals.initfile}.new') replacement = Path(Globals.root, f'{Globals.initfile}.new')
directory = str(scan_directory).replace('\\','/') directory = str(scan_directory).replace('\\','/')
@@ -388,19 +389,7 @@ def update_config_file(successfully_downloaded: dict, config_file: Path):
if config_file is default_config_file() and not config_file.parent.exists(): if config_file is default_config_file() and not config_file.parent.exists():
configs_src = Dataset_path.parent configs_src = Dataset_path.parent
configs_dest = default_config_file().parent configs_dest = default_config_file().parent
shutil.copytree(configs_src, shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
configs_dest,
dirs_exist_ok=True,
copy_function=shutil.copyfile,
)
# Fix up directory permissions so that they are writable
# This can happen when running under Nix environment which
# makes the runtime directory template immutable.
for root,dirs,files in os.walk(default_config_file().parent):
for d in dirs:
Path(root,d).chmod(0o775)
for f in files:
Path(root,d).chmod(0o644)
yaml = new_config_file_contents(successfully_downloaded, config_file) yaml = new_config_file_contents(successfully_downloaded, config_file)

View File

@@ -1,168 +0,0 @@
'''ldm.invoke.config.post_install
This defines a single exportable function, post_install(), which does
post-installation stuff like migrating models directories, adding new
config files, etc.
From the command line, its entry point is invokeai-postinstall.
'''
import os
import sys
from packaging import version
from pathlib import Path
from shutil import move,rmtree,copyfile
from typing import Union
import invokeai.configs as conf
import ldm.invoke
from ..globals import Globals, global_cache_dir, global_config_dir
def post_install():
'''
Do version and model updates, etc.
Should be called once after every version update.
'''
_migrate_models()
_run_patches()
def _migrate_models():
"""
Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
script startup time.
"""
# Three transformer models to check: bert, clip and safety checker, and
# the diffusers as well
models_dir = Path(Globals.root, "models")
legacy_locations = [
Path(
models_dir,
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
),
Path("bert-base-uncased/models--bert-base-uncased"),
Path(
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
),
]
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
legacy_layout = False
for model in legacy_locations:
legacy_layout = legacy_layout or model.exists()
if not legacy_layout:
return
print(
"""
>> ALERT:
>> The location of your previously-installed diffusers models needs to move from
>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by
>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory
>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time
>> operation. However if you have customized either of these directories and need to
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
>> Otherwise press <enter> to continue."""
)
print("** This is a quick one-time operation.")
input("continue> ")
# transformer files get moved into the hub directory
if _is_huggingface_hub_directory_present():
hub = global_cache_dir("hub")
else:
hub = models_dir / "hub"
os.makedirs(hub, exist_ok=True)
for model in legacy_locations:
source = models_dir / model
dest = hub / model.stem
if dest.exists() and not source.exists():
continue
print(f"** {source} => {dest}")
if source.exists():
if dest.is_symlink():
print(f"** Found symlink at {dest.name}. Not migrating.")
elif dest.exists():
if source.is_dir():
rmtree(source)
else:
source.unlink()
else:
move(source, dest)
# now clean up by removing any empty directories
empty = [
root
for root, dirs, files, in os.walk(models_dir)
if not len(dirs) and not len(files)
]
for d in empty:
os.rmdir(d)
print("** Migration is done. Continuing...")
def _is_huggingface_hub_directory_present() -> bool:
return (
os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None
)
# This routine performs any patch-ups needed after installation
def _run_patches():
_install_missing_config_files()
version_file = Path(Globals.root,'.version')
if version_file.exists():
with open(version_file,'r') as f:
root_version = version.parse(f.readline() or 'v2.3.2')
else:
root_version = version.parse('v2.3.2')
app_version = version.parse(ldm.invoke.__version__)
if root_version < app_version:
try:
_do_version_update(root_version, ldm.invoke.__version__)
with open(version_file,'w') as f:
f.write(ldm.invoke.__version__)
except:
print("** Version patching failed. Please try invokeai-postinstall later.")
def _install_missing_config_files():
"""
install ckpt configuration files that may have been added to the
distro after original root directory configuration
"""
root_configs = Path(global_config_dir(), 'stable-diffusion')
repo_configs = None
for f in conf.__path__:
if Path(f, 'stable-diffusion', 'v1-inference.yaml').exists():
repo_configs = Path(f, 'stable-diffusion')
break
if not repo_configs:
return
for src in repo_configs.iterdir():
dest = root_configs / src.name
if not dest.exists():
copyfile(src,dest)
def _do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
"""
Make any updates to the launcher .sh and .bat scripts that may be needed
from release to release. This is not an elegant solution. Instead, the
launcher should be moved into the source tree and installed using pip.
"""
if root_version < version.Version('v2.3.4'):
dest = Path(Globals.root,'loras')
dest.mkdir(exist_ok=True)
if root_version < version.Version('v2.3.3'):
if sys.platform == "linux":
print('>> Downloading new version of launcher script and its config file')
from ldm.util import download_with_progress_bar
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
dest = Path(Globals.root,'invoke.sh.in')
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
dest.replace(Path(Globals.root,'invoke.sh'))
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
dest = Path(Globals.root,'dialogrc')
assert download_with_progress_bar(url_base+'dialogrc',dest)
dest.replace(Path(Globals.root,'.dialogrc'))

View File

@@ -32,7 +32,8 @@ def expand_prompts(
template_file: Path, template_file: Path,
run_invoke: bool = False, run_invoke: bool = False,
invoke_model: str = None, invoke_model: str = None,
invoke_outdir: Path = None, invoke_outdir: str = None,
invoke_root: str = None,
processes_per_gpu: int = 1, processes_per_gpu: int = 1,
): ):
""" """
@@ -61,6 +62,8 @@ def expand_prompts(
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"] invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
if invoke_model: if invoke_model:
invokeai_args.extend(("--model", invoke_model)) invokeai_args.extend(("--model", invoke_model))
if invoke_root:
invokeai_args.extend(("--root", invoke_root))
if invoke_outdir: if invoke_outdir:
outdir = os.path.expanduser(invoke_outdir) outdir = os.path.expanduser(invoke_outdir)
invokeai_args.extend(("--outdir", outdir)) invokeai_args.extend(("--outdir", outdir))
@@ -79,6 +82,11 @@ def expand_prompts(
) )
import ldm.invoke.CLI import ldm.invoke.CLI
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
parent_conn, child_conn = Pipe() parent_conn, child_conn = Pipe()
children = set() children = set()
for i in range(processes_to_launch): for i in range(processes_to_launch):
@@ -111,6 +119,13 @@ def expand_prompts(
for p in children: for p in children:
p.terminate() p.terminate()
def _dummy_cli_main():
counter = 0
while line := sys.stdin.readline():
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
counter += 1
time.sleep(1)
def _get_fn_format(directory:str, sequence:int)->str: def _get_fn_format(directory:str, sequence:int)->str:
""" """
Get a filename that doesn't exceed filename length restrictions Get a filename that doesn't exceed filename length restrictions
@@ -179,9 +194,9 @@ def _run_invoke(
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}" os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
sys.argv = args sys.argv = args
sys.stdin = MessageToStdin(conn_in) sys.stdin = MessageToStdin(conn_in)
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]")) # sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
with open(logfile, "w") as stderr, redirect_stderr(stderr): # with open(logfile, "w") as stderr, redirect_stderr(stderr):
entry_point() entry_point()
def _filter_output(stream: TextIOBase): def _filter_output(stream: TextIOBase):
@@ -238,6 +253,10 @@ def main():
default=1, default=1,
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.", help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
) )
parser.add_argument(
'--root_dir',
default=None,
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
opt = parser.parse_args() opt = parser.parse_args()
if opt.example: if opt.example:
@@ -261,6 +280,7 @@ def main():
run_invoke=opt.invoke, run_invoke=opt.invoke,
invoke_model=opt.model, invoke_model=opt.model,
invoke_outdir=opt.outdir, invoke_outdir=opt.outdir,
invoke_root=opt.root,
processes_per_gpu=opt.processes_per_gpu, processes_per_gpu=opt.processes_per_gpu,
) )

View File

@@ -467,9 +467,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if additional_guidance is None: if additional_guidance is None:
additional_guidance = [] additional_guidance = []
extra_conditioning_info = conditioning_data.extra extra_conditioning_info = conditioning_data.extra
with InvokeAIDiffuserComponent.custom_attention_context(self.invokeai_diffuser.model, with self.invokeai_diffuser.custom_attention_context(extra_conditioning_info=extra_conditioning_info,
extra_conditioning_info=extra_conditioning_info, step_count=len(self.scheduler.timesteps)
step_count=len(self.scheduler.timesteps)
): ):
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps, yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps,

View File

@@ -1007,6 +1007,81 @@ class ModelManager(object):
""" """
) )
@classmethod
def migrate_models(cls):
"""
Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
script startup time.
"""
# Three transformer models to check: bert, clip and safety checker, and
# the diffusers as well
models_dir = Path(Globals.root, "models")
legacy_locations = [
Path(
models_dir,
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
),
Path("bert-base-uncased/models--bert-base-uncased"),
Path(
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
),
]
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
legacy_layout = False
for model in legacy_locations:
legacy_layout = legacy_layout or model.exists()
if not legacy_layout:
return
print(
"""
>> ALERT:
>> The location of your previously-installed diffusers models needs to move from
>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by
>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory
>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time
>> operation. However if you have customized either of these directories and need to
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
>> Otherwise press <enter> to continue."""
)
print("** This is a quick one-time operation.")
input("continue> ")
# transformer files get moved into the hub directory
if cls._is_huggingface_hub_directory_present():
hub = global_cache_dir("hub")
else:
hub = models_dir / "hub"
os.makedirs(hub, exist_ok=True)
for model in legacy_locations:
source = models_dir / model
dest = hub / model.stem
if dest.exists() and not source.exists():
continue
print(f"** {source} => {dest}")
if source.exists():
if dest.is_symlink():
print(f"** Found symlink at {dest.name}. Not migrating.")
elif dest.exists():
if source.is_dir():
rmtree(source)
else:
source.unlink()
else:
move(source, dest)
# now clean up by removing any empty directories
empty = [
root
for root, dirs, files, in os.walk(models_dir)
if not len(dirs) and not len(files)
]
for d in empty:
os.rmdir(d)
print("** Migration is done. Continuing...")
def _resolve_path( def _resolve_path(
self, source: Union[str, Path], dest_directory: str self, source: Union[str, Path], dest_directory: str
) -> Optional[Path]: ) -> Optional[Path]:
@@ -1231,3 +1306,8 @@ class ModelManager(object):
return path return path
return Path(Globals.root, path).resolve() return Path(Globals.root, path).resolve()
@staticmethod
def _is_huggingface_hub_directory_present() -> bool:
return (
os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None
)

View File

@@ -288,7 +288,16 @@ class InvokeAICrossAttentionMixin:
return self.einsum_op_tensor_mem(q, k, v, 32) return self.einsum_op_tensor_mem(q, k, v, 32)
def setup_cross_attention_control_attention_processors(unet: UNet2DConditionModel, context: Context):
def restore_default_cross_attention(model, is_running_diffusers: bool, processors_to_restore: Optional[AttnProcessor]=None):
if is_running_diffusers:
unet = model
unet.set_attn_processor(processors_to_restore or CrossAttnProcessor())
else:
remove_attention_function(model)
def override_cross_attention(model, context: Context, is_running_diffusers = False):
""" """
Inject attention parameters and functions into the passed in model to enable cross attention editing. Inject attention parameters and functions into the passed in model to enable cross attention editing.
@@ -314,15 +323,22 @@ def setup_cross_attention_control_attention_processors(unet: UNet2DConditionMode
context.cross_attention_mask = mask.to(device) context.cross_attention_mask = mask.to(device)
context.cross_attention_index_map = indices.to(device) context.cross_attention_index_map = indices.to(device)
old_attn_processors = unet.attn_processors if is_running_diffusers:
if torch.backends.mps.is_available(): unet = model
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS old_attn_processors = unet.attn_processors
unet.set_attn_processor(SwapCrossAttnProcessor()) if torch.backends.mps.is_available():
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
unet.set_attn_processor(SwapCrossAttnProcessor())
else:
# try to re-use an existing slice size
default_slice_size = 4
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
else: else:
# try to re-use an existing slice size context.register_cross_attention_modules(model)
default_slice_size = 4 inject_attention_function(model, context)
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]: def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:

View File

@@ -12,6 +12,17 @@ class DDIMSampler(Sampler):
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model,
model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond))
def prepare_to_sample(self, t_enc, **kwargs):
super().prepare_to_sample(t_enc, **kwargs)
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
else:
self.invokeai_diffuser.restore_default_cross_attention()
# This is the central routine # This is the central routine
@torch.no_grad() @torch.no_grad()

View File

@@ -38,6 +38,15 @@ class CFGDenoiser(nn.Module):
model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond)) model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond))
def prepare_to_sample(self, t_enc, **kwargs):
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = t_enc)
else:
self.invokeai_diffuser.restore_default_cross_attention()
def forward(self, x, sigma, uncond, cond, cond_scale): def forward(self, x, sigma, uncond, cond, cond_scale):
next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale) next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale)

View File

@@ -14,6 +14,17 @@ class PLMSSampler(Sampler):
def __init__(self, model, schedule='linear', device=None, **kwargs): def __init__(self, model, schedule='linear', device=None, **kwargs):
super().__init__(model,schedule,model.num_timesteps, device) super().__init__(model,schedule,model.num_timesteps, device)
def prepare_to_sample(self, t_enc, **kwargs):
super().prepare_to_sample(t_enc, **kwargs)
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
else:
self.invokeai_diffuser.restore_default_cross_attention()
# this is the essential routine # this is the essential routine
@torch.no_grad() @torch.no_grad()

View File

@@ -1,18 +1,18 @@
from contextlib import contextmanager from contextlib import contextmanager
from dataclasses import dataclass from dataclasses import dataclass
from math import ceil from math import ceil
from typing import Callable, Optional, Union, Any from typing import Callable, Optional, Union, Any, Dict
import numpy as np import numpy as np
import torch import torch
from diffusers.models.cross_attention import AttnProcessor
from diffusers import UNet2DConditionModel
from typing_extensions import TypeAlias from typing_extensions import TypeAlias
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals
from ldm.models.diffusion.cross_attention_control import ( from ldm.models.diffusion.cross_attention_control import (
Arguments, Arguments,
setup_cross_attention_control_attention_processors, restore_default_cross_attention,
override_cross_attention,
Context, Context,
get_cross_attention_modules, get_cross_attention_modules,
CrossAttentionType, CrossAttentionType,
@@ -84,45 +84,66 @@ class InvokeAIDiffuserComponent:
self.cross_attention_control_context = None self.cross_attention_control_context = None
self.sequential_guidance = Globals.sequential_guidance self.sequential_guidance = Globals.sequential_guidance
@classmethod
@contextmanager @contextmanager
def custom_attention_context( def custom_attention_context(
clss, self, extra_conditioning_info: Optional[ExtraConditioningInfo], step_count: int
unet: UNet2DConditionModel, # note: also may futz with the text encoder depending on requested LoRAs
extra_conditioning_info: Optional[ExtraConditioningInfo],
step_count: int
): ):
old_attn_processors = None old_attn_processor = None
if extra_conditioning_info and ( if extra_conditioning_info and (
extra_conditioning_info.wants_cross_attention_control extra_conditioning_info.wants_cross_attention_control
| extra_conditioning_info.has_lora_conditions | extra_conditioning_info.has_lora_conditions
): ):
old_attn_processors = unet.attn_processors old_attn_processor = self.override_attention_processors(
# Load lora conditions into the model extra_conditioning_info, step_count=step_count
if extra_conditioning_info.has_lora_conditions: )
for condition in extra_conditioning_info.lora_conditions:
condition() # target model is stored in condition state for some reason
if extra_conditioning_info.wants_cross_attention_control:
cross_attention_control_context = Context(
arguments=extra_conditioning_info.cross_attention_control_args,
step_count=step_count,
)
setup_cross_attention_control_attention_processors(
unet,
cross_attention_control_context,
)
try: try:
yield None yield None
finally: finally:
if old_attn_processors is not None: if old_attn_processor is not None:
unet.set_attn_processor(old_attn_processors) self.restore_default_cross_attention(old_attn_processor)
if extra_conditioning_info and extra_conditioning_info.has_lora_conditions: if extra_conditioning_info and extra_conditioning_info.has_lora_conditions:
for lora_condition in extra_conditioning_info.lora_conditions: for lora_condition in extra_conditioning_info.lora_conditions:
lora_condition.unload() lora_condition.unload()
# TODO resuscitate attention map saving # TODO resuscitate attention map saving
# self.remove_attention_map_saving() # self.remove_attention_map_saving()
def override_attention_processors(
self, conditioning: ExtraConditioningInfo, step_count: int
) -> Dict[str, AttnProcessor]:
"""
setup cross attention .swap control. for diffusers this replaces the attention processor, so
the previous attention processor is returned so that the caller can restore it later.
"""
old_attn_processors = self.model.attn_processors
# Load lora conditions into the model
if conditioning.has_lora_conditions:
for condition in conditioning.lora_conditions:
condition(self.model)
if conditioning.wants_cross_attention_control:
self.cross_attention_control_context = Context(
arguments=conditioning.cross_attention_control_args,
step_count=step_count,
)
override_cross_attention(
self.model,
self.cross_attention_control_context,
is_running_diffusers=self.is_running_diffusers,
)
return old_attn_processors
def restore_default_cross_attention(
self, processors_to_restore: Optional[dict[str, "AttnProcessor"]] = None
):
self.cross_attention_control_context = None
restore_default_cross_attention(
self.model,
is_running_diffusers=self.is_running_diffusers,
processors_to_restore=processors_to_restore,
)
def setup_attention_map_saving(self, saver: AttentionMapSaver): def setup_attention_map_saving(self, saver: AttentionMapSaver):
def callback(slice, dim, offset, slice_size, key): def callback(slice, dim, offset, slice_size, key):
if dim is not None: if dim is not None:

View File

@@ -31,13 +31,18 @@ class LoRALayer:
self.name = name self.name = name
self.scale = alpha / rank if (alpha and rank) else 1.0 self.scale = alpha / rank if (alpha and rank) else 1.0
def forward(self, lora, input_h): def forward(self, lora, input_h, output):
if self.mid is None: if self.mid is None:
weight = self.up(self.down(*input_h)) output = (
output
+ self.up(self.down(*input_h)) * lora.multiplier * self.scale
)
else: else:
weight = self.up(self.mid(self.down(*input_h))) output = (
output
return weight * lora.multiplier * self.scale + self.up(self.mid(self.down(*input_h))) * lora.multiplier * self.scale
)
return output
class LoHALayer: class LoHALayer:
lora_name: str lora_name: str
@@ -59,7 +64,7 @@ class LoHALayer:
self.name = name self.name = name
self.scale = alpha / rank if (alpha and rank) else 1.0 self.scale = alpha / rank if (alpha and rank) else 1.0
def forward(self, lora, input_h): def forward(self, lora, input_h, output):
if type(self.org_module) == torch.nn.Conv2d: if type(self.org_module) == torch.nn.Conv2d:
op = torch.nn.functional.conv2d op = torch.nn.functional.conv2d
@@ -81,9 +86,9 @@ class LoHALayer:
rebuild1 = torch.einsum('i j k l, j r, i p -> p r k l', self.t1, self.w1_b, self.w1_a) rebuild1 = torch.einsum('i j k l, j r, i p -> p r k l', self.t1, self.w1_b, self.w1_a)
rebuild2 = torch.einsum('i j k l, j r, i p -> p r k l', self.t2, self.w2_b, self.w2_a) rebuild2 = torch.einsum('i j k l, j r, i p -> p r k l', self.t2, self.w2_b, self.w2_a)
weight = rebuild1 * rebuild2 weight = rebuild1 * rebuild2
bias = self.bias if self.bias is not None else 0 bias = self.bias if self.bias is not None else 0
return op( return output + op(
*input_h, *input_h,
(weight + bias).view(self.org_module.weight.shape), (weight + bias).view(self.org_module.weight.shape),
None, None,
@@ -91,69 +96,6 @@ class LoHALayer:
) * lora.multiplier * self.scale ) * lora.multiplier * self.scale
class LoKRLayer:
lora_name: str
name: str
scale: float
w1: Optional[torch.Tensor] = None
w1_a: Optional[torch.Tensor] = None
w1_b: Optional[torch.Tensor] = None
w2: Optional[torch.Tensor] = None
w2_a: Optional[torch.Tensor] = None
w2_b: Optional[torch.Tensor] = None
t2: Optional[torch.Tensor] = None
bias: Optional[torch.Tensor] = None
org_module: torch.nn.Module
def __init__(self, lora_name: str, name: str, rank=4, alpha=1.0):
self.lora_name = lora_name
self.name = name
self.scale = alpha / rank if (alpha and rank) else 1.0
def forward(self, lora, input_h):
if type(self.org_module) == torch.nn.Conv2d:
op = torch.nn.functional.conv2d
extra_args = dict(
stride=self.org_module.stride,
padding=self.org_module.padding,
dilation=self.org_module.dilation,
groups=self.org_module.groups,
)
else:
op = torch.nn.functional.linear
extra_args = {}
w1 = self.w1
if w1 is None:
w1 = self.w1_a @ self.w1_b
w2 = self.w2
if w2 is None:
if self.t2 is None:
w2 = self.w2_a @ self.w2_b
else:
w2 = torch.einsum('i j k l, i p, j r -> p r k l', self.t2, self.w2_a, self.w2_b)
if len(w2.shape) == 4:
w1 = w1.unsqueeze(2).unsqueeze(2)
w2 = w2.contiguous()
weight = torch.kron(w1, w2).reshape(self.org_module.weight.shape)
bias = self.bias if self.bias is not None else 0
return op(
*input_h,
(weight + bias).view(self.org_module.weight.shape),
None,
**extra_args
) * lora.multiplier * self.scale
class LoRAModuleWrapper: class LoRAModuleWrapper:
unet: UNet2DConditionModel unet: UNet2DConditionModel
text_encoder: CLIPTextModel text_encoder: CLIPTextModel
@@ -217,7 +159,7 @@ class LoRAModuleWrapper:
layer = lora.layers.get(name, None) layer = lora.layers.get(name, None)
if layer is None: if layer is None:
continue continue
output += layer.forward(lora, input_h) output = layer.forward(lora, input_h, output)
return output return output
return lora_forward return lora_forward
@@ -365,36 +307,6 @@ class LoRA:
else: else:
layer.t2 = None layer.t2 = None
# lokr
elif "lokr_w1_b" in values or "lokr_w1" in values:
if "lokr_w1_b" in values:
rank = values["lokr_w1_b"].shape[0]
elif "lokr_w2_b" in values:
rank = values["lokr_w2_b"].shape[0]
else:
rank = None # unscaled
layer = LoKRLayer(self.name, stem, rank, alpha)
layer.org_module = wrapped
layer.bias = bias
if "lokr_w1" in values:
layer.w1 = values["lokr_w1"].to(device=self.device, dtype=self.dtype)
else:
layer.w1_a = values["lokr_w1_a"].to(device=self.device, dtype=self.dtype)
layer.w1_b = values["lokr_w1_b"].to(device=self.device, dtype=self.dtype)
if "lokr_w2" in values:
layer.w2 = values["lokr_w2"].to(device=self.device, dtype=self.dtype)
else:
layer.w2_a = values["lokr_w2_a"].to(device=self.device, dtype=self.dtype)
layer.w2_b = values["lokr_w2_b"].to(device=self.device, dtype=self.dtype)
if "lokr_t2" in values:
layer.t2 = values["lokr_t2"].to(device=self.device, dtype=self.dtype)
else: else:
print( print(
f">> Encountered unknown lora layer module in {self.name}: {stem} - {type(wrapped).__name__}" f">> Encountered unknown lora layer module in {self.name}: {stem} - {type(wrapped).__name__}"
@@ -427,14 +339,12 @@ class KohyaLoraManager:
return lora return lora
def apply_lora_model(self, name, mult: float = 1.0): def apply_lora_model(self, name, mult: float = 1.0):
path_file = None
for suffix in ["ckpt", "safetensors", "pt"]: for suffix in ["ckpt", "safetensors", "pt"]:
path_files = [x for x in Path(self.lora_path).glob(f"**/{name}.{suffix}")] path_file = Path(self.lora_path, f"{name}.{suffix}")
if len(path_files): if path_file.exists():
path_file = path_files[0]
print(f" | Loading lora {path_file.name} with weight {mult}") print(f" | Loading lora {path_file.name} with weight {mult}")
break break
if not path_file: if not path_file.exists():
print(f" ** Unable to find lora: {name}") print(f" ** Unable to find lora: {name}")
return return

View File

@@ -1,7 +1,5 @@
import os import os
from pathlib import Path from pathlib import Path
from diffusers import UNet2DConditionModel, StableDiffusionPipeline
from ldm.invoke.globals import global_lora_models_dir from ldm.invoke.globals import global_lora_models_dir
from .kohya_lora_manager import KohyaLoraManager from .kohya_lora_manager import KohyaLoraManager
from typing import Optional, Dict from typing import Optional, Dict
@@ -10,29 +8,20 @@ class LoraCondition:
name: str name: str
weight: float weight: float
def __init__(self, def __init__(self, name, weight: float = 1.0, kohya_manager: Optional[KohyaLoraManager]=None):
name,
weight: float = 1.0,
unet: UNet2DConditionModel=None, # for diffusers format LoRAs
kohya_manager: Optional[KohyaLoraManager]=None, # for KohyaLoraManager-compatible LoRAs
):
self.name = name self.name = name
self.weight = weight self.weight = weight
self.kohya_manager = kohya_manager self.kohya_manager = kohya_manager
self.unet = unet
def __call__(self): def __call__(self, model):
# TODO: make model able to load from huggingface, rather then just local files # TODO: make model able to load from huggingface, rather then just local files
path = Path(global_lora_models_dir(), self.name) path = Path(global_lora_models_dir(), self.name)
if path.is_dir(): if path.is_dir():
if not self.unet: if model.load_attn_procs:
print(f" ** Unable to load diffusers-format LoRA {self.name}: unet is None")
return
if self.unet.load_attn_procs:
file = Path(path, "pytorch_lora_weights.bin") file = Path(path, "pytorch_lora_weights.bin")
if file.is_file(): if file.is_file():
print(f">> Loading LoRA: {path}") print(f">> Loading LoRA: {path}")
self.unet.load_attn_procs(path.absolute().as_posix()) model.load_attn_procs(path.absolute().as_posix())
else: else:
print(f" ** Unable to find valid LoRA at: {path}") print(f" ** Unable to find valid LoRA at: {path}")
else: else:
@@ -48,16 +37,15 @@ class LoraCondition:
self.kohya_manager.unload_applied_lora(self.name) self.kohya_manager.unload_applied_lora(self.name)
class LoraManager: class LoraManager:
def __init__(self, pipe: StableDiffusionPipeline): def __init__(self, pipe):
# Kohya class handles lora not generated through diffusers # Kohya class handles lora not generated through diffusers
self.kohya = KohyaLoraManager(pipe, global_lora_models_dir()) self.kohya = KohyaLoraManager(pipe, global_lora_models_dir())
self.unet = pipe.unet
def set_loras_conditions(self, lora_weights: list): def set_loras_conditions(self, lora_weights: list):
conditions = [] conditions = []
if len(lora_weights) > 0: if len(lora_weights) > 0:
for lora in lora_weights: for lora in lora_weights:
conditions.append(LoraCondition(lora.model, lora.weight, self.unet, self.kohya)) conditions.append(LoraCondition(lora.model, lora.weight, self.kohya))
if len(conditions) > 0: if len(conditions) > 0:
return conditions return conditions
@@ -75,4 +63,4 @@ class LoraManager:
if suffix in [".ckpt", ".pt", ".safetensors"]: if suffix in [".ckpt", ".pt", ".safetensors"]:
models_found[name]=Path(root,x) models_found[name]=Path(root,x)
return models_found return models_found

View File

@@ -34,7 +34,7 @@ dependencies = [
"clip_anytorch", "clip_anytorch",
"compel~=1.1.0", "compel~=1.1.0",
"datasets", "datasets",
"diffusers[torch]==0.14", "diffusers[torch]~=0.14",
"dnspython==2.2.1", "dnspython==2.2.1",
"einops", "einops",
"eventlet", "eventlet",
@@ -53,7 +53,7 @@ dependencies = [
"imageio-ffmpeg", "imageio-ffmpeg",
"k-diffusion", "k-diffusion",
"kornia", "kornia",
"npyscreen~=4.10.5", "npyscreen",
"numpy<1.24", "numpy<1.24",
"omegaconf", "omegaconf",
"opencv-python", "opencv-python",
@@ -128,7 +128,6 @@ requires-python = ">=3.9, <3.11"
"invokeai-update" = "ldm.invoke.config.invokeai_update:main" "invokeai-update" = "ldm.invoke.config.invokeai_update:main"
"invokeai-batch" = "ldm.invoke.dynamic_prompts:main" "invokeai-batch" = "ldm.invoke.dynamic_prompts:main"
"invokeai-metadata" = "ldm.invoke.invokeai_metadata:main" "invokeai-metadata" = "ldm.invoke.invokeai_metadata:main"
"invokeai-postinstall" = "ldm.invoke.config.post_install:post_install"
[project.urls] [project.urls]
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues" "Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"