Compare commits

..

9 Commits

Author SHA1 Message Date
Lincoln Stein
aa1538bd70 [Fix] Lora double hook (#3471)
Currently hooks registers multiple time for some modules.
As result - lora applies multiple time to this modules on generation and
images looks weird.
If have any other minds how to fix it better - feel free to push.
2023-05-29 20:53:27 -04:00
Sergey Borisov
9e87a080a8 Avoid double hook registration in lora 2023-05-26 20:37:02 +03:00
Lincoln Stein
f3b2e02921 make updater error message persist in console 2023-05-22 11:16:40 -04:00
Lincoln Stein
fab5df9317 2.3.5 fixes to automatic updating and vae conversions (#3444)
# Minor fixes to the 2.3 branch

This is a proposed `2.3.5.post2` to correct the updater problems in
2.3.5.post1 and make transition to 3.0.0 easier.

## Updating fixed

The invokeai-update script will now recognize when the user previously
installed xformers and modifies the pip install command so as to include
xformers as an extra that needs to be updated. This will prevent the
problems experienced during the upgrade to `2.3.5.post1` in which torch
was updated but xformers wasn't.

## VAE autoconversion improved

In addition to looking for instances in which a user has entered a VAE
ckpt into the "vae" field directly, the model manager now also handles
the case in which the user entered a ckpt (rather than a diffusers) into
the path field. These two cases now both work:

```
vae: models/ldm/stable-diffusion-1/vae-ft-mse-840000-ema-pruned.ckpt
```
and

```
vae:
      path: models/ldm/stable-diffusion-1/vae-ft-mse-840000-ema-pruned.ckpt
```
In addition, if a 32-bit checkpoint VAE is encountered and user is using
half precision, the VAE is now converted to 16 bits on the fly.
2023-05-22 10:56:33 -04:00
Lincoln Stein
2e21e5b8f3 fixes to automatic updating and vae conversions
This PR makes the following minor fixes to the 2.3 branch:

1. The invokeai-update script will now recognize when the user
previously installed xformers and modifies the pip install command
so as to include xformers as an extra that needs to be updated.

2. In addition to looking for instances in which a user has entered a
VAE ckpt into the "vae" field directly, it also handles the case in
which the user entered a ckpt into the path field. These two cases
now work:

   vae: models/ldm/stable-diffusion-1/vae-ft-mse-840000-ema-pruned.ckpt

and

   vae:
      path: models/ldm/stable-diffusion-1/vae-ft-mse-840000-ema-pruned.ckpt

3. If a 32-bit checkpoint VAE is encountered and user is using half precision,
the VAE is now converted to 16 bits on the fly.
2023-05-21 19:11:43 -04:00
blessedcoolant
0ce628b22e autoconvert legacy VAEs (#3235)
This draft PR implements a system in which if a diffusers model is
loaded, and the model manager detects that the user tried to assign a
legacy checkpoint VAE to the model, the checkpoint will be converted to
a diffusers VAE in RAM.

It is draft because it has not been carefully tested yet, and there are
some edge cases that are not handled properly.
2023-05-19 12:26:25 +12:00
Lincoln Stein
ddcf9a3396 Merge branch 'v2.3' into lstein/enhance/autoconvert-vaes 2023-05-18 13:41:55 -04:00
Lincoln Stein
23d9361528 autoconvert ckpt VAEs assigned to diffusers models 2023-04-19 17:44:27 -04:00
Lincoln Stein
ce22a1577c convert VAEs to diffusers format automatically
- If the user enters a VAE .ckpt path into the VAE field of a
  diffusers model, the VAE will be automatically converted behind
  the scenes into a diffusers version, then loaded.

- This commit is untested (done on an airplane).
2023-04-18 21:20:08 -04:00
5 changed files with 76 additions and 17 deletions

View File

@@ -1,2 +1,3 @@
__version__='2.3.5.post1'
__version__='2.3.5.post2'

View File

@@ -620,7 +620,10 @@ def convert_ldm_vae_checkpoint(checkpoint, config):
for key in keys:
if key.startswith(vae_key):
vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
new_checkpoint = convert_ldm_vae_state_dict(vae_state_dict,config)
return new_checkpoint
def convert_ldm_vae_state_dict(vae_state_dict, config):
new_checkpoint = {}
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]

View File

@@ -6,6 +6,7 @@ import os
import platform
import psutil
import requests
import pkg_resources
from rich import box, print
from rich.console import Console, group
from rich.panel import Panel
@@ -72,10 +73,20 @@ def welcome(versions: dict):
)
console.line()
def get_extras():
extras = ''
try:
dist = pkg_resources.get_distribution('xformers')
extras = '[xformers]'
except pkg_resources.DistributionNotFound:
pass
return extras
def main():
versions = get_versions()
if invokeai_is_running():
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
input('Press any key to continue...')
return
welcome(versions)
@@ -94,13 +105,15 @@ def main():
elif choice=='4':
branch = Prompt.ask('Enter an InvokeAI branch name')
extras = get_extras()
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
if release:
cmd = f'pip install {INVOKE_AI_SRC}/{release}.zip --use-pep517 --upgrade'
cmd = f"pip install 'invokeai{extras} @ {INVOKE_AI_SRC}/{release}.zip' --use-pep517 --upgrade"
elif tag:
cmd = f'pip install {INVOKE_AI_TAG}/{tag}.zip --use-pep517 --upgrade'
cmd = f"pip install 'invokeai{extras} @ {INVOKE_AI_TAG}/{tag}.zip' --use-pep517 --upgrade"
else:
cmd = f'pip install {INVOKE_AI_BRANCH}/{branch}.zip --use-pep517 --upgrade'
cmd = f"pip install 'invokeai{extras} @ {INVOKE_AI_BRANCH}/{branch}.zip' --use-pep517 --upgrade"
print('')
print('')
if os.system(cmd)==0:

View File

@@ -9,7 +9,6 @@ from __future__ import annotations
import contextlib
import gc
import hashlib
import io
import os
import re
import sys
@@ -31,11 +30,10 @@ from huggingface_hub import scan_cache_dir
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path
from ldm.invoke.devices import CPU_DEVICE
from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ldm.invoke.globals import Globals, global_cache_dir
from ldm.util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name
from ldm.util import ask_user, download_with_resume, url_attachment_name
class SDLegacyType(Enum):
@@ -370,8 +368,9 @@ class ModelManager(object):
print(
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
)
from ldm.invoke.ckpt_to_diffuser import load_pipeline_from_original_stable_diffusion_ckpt
from .ckpt_to_diffuser import (
load_pipeline_from_original_stable_diffusion_ckpt,
)
if self._has_cuda():
torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
@@ -1230,6 +1229,17 @@ class ModelManager(object):
return vae_path
def _load_vae(self, vae_config) -> AutoencoderKL:
using_fp16 = self.precision == "float16"
dtype = torch.float16 if using_fp16 else torch.float32
# Handle the common case of a user shoving a VAE .ckpt into
# the vae field for a diffusers. We convert it into diffusers
# format and use it.
if isinstance(vae_config,(str,Path)):
return self.convert_vae(vae_config).to(dtype=dtype)
elif isinstance(vae_config,DictConfig) and (vae_path := vae_config.get('path')):
return self.convert_vae(vae_path).to(dtype=dtype)
vae_args = {}
try:
name_or_path = self.model_name_or_path(vae_config)
@@ -1237,7 +1247,6 @@ class ModelManager(object):
return None
if name_or_path is None:
return None
using_fp16 = self.precision == "float16"
vae_args.update(
cache_dir=global_cache_dir("hub"),
@@ -1277,6 +1286,32 @@ class ModelManager(object):
return vae
@staticmethod
def convert_vae(vae_path: Union[Path,str])->AutoencoderKL:
print(" | A checkpoint VAE was detected. Converting to diffusers format.")
vae_path = Path(Globals.root,vae_path).resolve()
from .ckpt_to_diffuser import (
create_vae_diffusers_config,
convert_ldm_vae_state_dict,
)
vae_path = Path(vae_path)
if vae_path.suffix in ['.pt','.ckpt']:
vae_state_dict = torch.load(vae_path, map_location="cpu")
else:
vae_state_dict = safetensors.torch.load_file(vae_path)
if 'state_dict' in vae_state_dict:
vae_state_dict = vae_state_dict['state_dict']
# TODO: see if this works with 1.x inpaint models and 2.x models
config_file_path = Path(Globals.root,"configs/stable-diffusion/v1-inference.yaml")
original_conf = OmegaConf.load(config_file_path)
vae_config = create_vae_diffusers_config(original_conf, image_size=512) # TODO: fix
diffusers_vae = convert_ldm_vae_state_dict(vae_state_dict,vae_config)
vae = AutoencoderKL(**vae_config)
vae.load_state_dict(diffusers_vae)
return vae
@staticmethod
def _delete_model_from_cache(repo_id):
cache_info = scan_cache_dir(global_cache_dir("diffusers"))

View File

@@ -1,6 +1,6 @@
import json
from pathlib import Path
from typing import Optional
from typing import Optional, Dict, Tuple
import torch
from diffusers.models import UNet2DConditionModel
@@ -166,12 +166,12 @@ class LoKRLayer:
class LoRAModuleWrapper:
unet: UNet2DConditionModel
text_encoder: CLIPTextModel
hooks: list[RemovableHandle]
hooks: Dict[str, Tuple[torch.nn.Module, RemovableHandle]]
def __init__(self, unet, text_encoder):
self.unet = unet
self.text_encoder = text_encoder
self.hooks = []
self.hooks = dict()
self.text_modules = None
self.unet_modules = None
@@ -228,7 +228,7 @@ class LoRAModuleWrapper:
wrapper = self
def lora_forward(module, input_h, output):
if len(wrapper.loaded_loras) == 0:
if len(wrapper.applied_loras) == 0:
return output
for lora in wrapper.applied_loras.values():
@@ -241,11 +241,18 @@ class LoRAModuleWrapper:
return lora_forward
def apply_module_forward(self, module, name):
handle = module.register_forward_hook(self.lora_forward_hook(name))
self.hooks.append(handle)
if name in self.hooks:
registered_module, _ = self.hooks[name]
if registered_module != module:
raise Exception(f"Trying to register multiple modules to lora key: {name}")
# else it's just double hook creation - nothing to do
else:
handle = module.register_forward_hook(self.lora_forward_hook(name))
self.hooks[name] = (module, handle)
def clear_hooks(self):
for hook in self.hooks:
for _, hook in self.hooks.values():
hook.remove()
self.hooks.clear()