mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-20 01:07:54 -05:00
Compare commits
9 Commits
v2.3.5.pos
...
v2.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa1538bd70 | ||
|
|
9e87a080a8 | ||
|
|
f3b2e02921 | ||
|
|
fab5df9317 | ||
|
|
2e21e5b8f3 | ||
|
|
0ce628b22e | ||
|
|
ddcf9a3396 | ||
|
|
23d9361528 | ||
|
|
ce22a1577c |
@@ -1,2 +1,3 @@
|
||||
__version__='2.3.5.post1'
|
||||
__version__='2.3.5.post2'
|
||||
|
||||
|
||||
|
||||
@@ -620,7 +620,10 @@ def convert_ldm_vae_checkpoint(checkpoint, config):
|
||||
for key in keys:
|
||||
if key.startswith(vae_key):
|
||||
vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
|
||||
new_checkpoint = convert_ldm_vae_state_dict(vae_state_dict,config)
|
||||
return new_checkpoint
|
||||
|
||||
def convert_ldm_vae_state_dict(vae_state_dict, config):
|
||||
new_checkpoint = {}
|
||||
|
||||
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
|
||||
|
||||
@@ -6,6 +6,7 @@ import os
|
||||
import platform
|
||||
import psutil
|
||||
import requests
|
||||
import pkg_resources
|
||||
from rich import box, print
|
||||
from rich.console import Console, group
|
||||
from rich.panel import Panel
|
||||
@@ -72,10 +73,20 @@ def welcome(versions: dict):
|
||||
)
|
||||
console.line()
|
||||
|
||||
def get_extras():
|
||||
extras = ''
|
||||
try:
|
||||
dist = pkg_resources.get_distribution('xformers')
|
||||
extras = '[xformers]'
|
||||
except pkg_resources.DistributionNotFound:
|
||||
pass
|
||||
return extras
|
||||
|
||||
def main():
|
||||
versions = get_versions()
|
||||
if invokeai_is_running():
|
||||
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
|
||||
input('Press any key to continue...')
|
||||
return
|
||||
|
||||
welcome(versions)
|
||||
@@ -94,13 +105,15 @@ def main():
|
||||
elif choice=='4':
|
||||
branch = Prompt.ask('Enter an InvokeAI branch name')
|
||||
|
||||
extras = get_extras()
|
||||
|
||||
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
|
||||
if release:
|
||||
cmd = f'pip install {INVOKE_AI_SRC}/{release}.zip --use-pep517 --upgrade'
|
||||
cmd = f"pip install 'invokeai{extras} @ {INVOKE_AI_SRC}/{release}.zip' --use-pep517 --upgrade"
|
||||
elif tag:
|
||||
cmd = f'pip install {INVOKE_AI_TAG}/{tag}.zip --use-pep517 --upgrade'
|
||||
cmd = f"pip install 'invokeai{extras} @ {INVOKE_AI_TAG}/{tag}.zip' --use-pep517 --upgrade"
|
||||
else:
|
||||
cmd = f'pip install {INVOKE_AI_BRANCH}/{branch}.zip --use-pep517 --upgrade'
|
||||
cmd = f"pip install 'invokeai{extras} @ {INVOKE_AI_BRANCH}/{branch}.zip' --use-pep517 --upgrade"
|
||||
print('')
|
||||
print('')
|
||||
if os.system(cmd)==0:
|
||||
|
||||
@@ -9,7 +9,6 @@ from __future__ import annotations
|
||||
import contextlib
|
||||
import gc
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@@ -31,11 +30,10 @@ from huggingface_hub import scan_cache_dir
|
||||
from omegaconf import OmegaConf
|
||||
from omegaconf.dictconfig import DictConfig
|
||||
from picklescan.scanner import scan_file_path
|
||||
|
||||
from ldm.invoke.devices import CPU_DEVICE
|
||||
from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
from ldm.invoke.globals import Globals, global_cache_dir
|
||||
from ldm.util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name
|
||||
from ldm.util import ask_user, download_with_resume, url_attachment_name
|
||||
|
||||
|
||||
class SDLegacyType(Enum):
|
||||
@@ -370,8 +368,9 @@ class ModelManager(object):
|
||||
print(
|
||||
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
|
||||
)
|
||||
from ldm.invoke.ckpt_to_diffuser import load_pipeline_from_original_stable_diffusion_ckpt
|
||||
|
||||
from .ckpt_to_diffuser import (
|
||||
load_pipeline_from_original_stable_diffusion_ckpt,
|
||||
)
|
||||
if self._has_cuda():
|
||||
torch.cuda.empty_cache()
|
||||
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
@@ -1230,6 +1229,17 @@ class ModelManager(object):
|
||||
return vae_path
|
||||
|
||||
def _load_vae(self, vae_config) -> AutoencoderKL:
|
||||
using_fp16 = self.precision == "float16"
|
||||
dtype = torch.float16 if using_fp16 else torch.float32
|
||||
|
||||
# Handle the common case of a user shoving a VAE .ckpt into
|
||||
# the vae field for a diffusers. We convert it into diffusers
|
||||
# format and use it.
|
||||
if isinstance(vae_config,(str,Path)):
|
||||
return self.convert_vae(vae_config).to(dtype=dtype)
|
||||
elif isinstance(vae_config,DictConfig) and (vae_path := vae_config.get('path')):
|
||||
return self.convert_vae(vae_path).to(dtype=dtype)
|
||||
|
||||
vae_args = {}
|
||||
try:
|
||||
name_or_path = self.model_name_or_path(vae_config)
|
||||
@@ -1237,7 +1247,6 @@ class ModelManager(object):
|
||||
return None
|
||||
if name_or_path is None:
|
||||
return None
|
||||
using_fp16 = self.precision == "float16"
|
||||
|
||||
vae_args.update(
|
||||
cache_dir=global_cache_dir("hub"),
|
||||
@@ -1277,6 +1286,32 @@ class ModelManager(object):
|
||||
|
||||
return vae
|
||||
|
||||
@staticmethod
|
||||
def convert_vae(vae_path: Union[Path,str])->AutoencoderKL:
|
||||
print(" | A checkpoint VAE was detected. Converting to diffusers format.")
|
||||
vae_path = Path(Globals.root,vae_path).resolve()
|
||||
|
||||
from .ckpt_to_diffuser import (
|
||||
create_vae_diffusers_config,
|
||||
convert_ldm_vae_state_dict,
|
||||
)
|
||||
|
||||
vae_path = Path(vae_path)
|
||||
if vae_path.suffix in ['.pt','.ckpt']:
|
||||
vae_state_dict = torch.load(vae_path, map_location="cpu")
|
||||
else:
|
||||
vae_state_dict = safetensors.torch.load_file(vae_path)
|
||||
if 'state_dict' in vae_state_dict:
|
||||
vae_state_dict = vae_state_dict['state_dict']
|
||||
# TODO: see if this works with 1.x inpaint models and 2.x models
|
||||
config_file_path = Path(Globals.root,"configs/stable-diffusion/v1-inference.yaml")
|
||||
original_conf = OmegaConf.load(config_file_path)
|
||||
vae_config = create_vae_diffusers_config(original_conf, image_size=512) # TODO: fix
|
||||
diffusers_vae = convert_ldm_vae_state_dict(vae_state_dict,vae_config)
|
||||
vae = AutoencoderKL(**vae_config)
|
||||
vae.load_state_dict(diffusers_vae)
|
||||
return vae
|
||||
|
||||
@staticmethod
|
||||
def _delete_model_from_cache(repo_id):
|
||||
cache_info = scan_cache_dir(global_cache_dir("diffusers"))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from typing import Optional, Dict, Tuple
|
||||
|
||||
import torch
|
||||
from diffusers.models import UNet2DConditionModel
|
||||
@@ -166,12 +166,12 @@ class LoKRLayer:
|
||||
class LoRAModuleWrapper:
|
||||
unet: UNet2DConditionModel
|
||||
text_encoder: CLIPTextModel
|
||||
hooks: list[RemovableHandle]
|
||||
hooks: Dict[str, Tuple[torch.nn.Module, RemovableHandle]]
|
||||
|
||||
def __init__(self, unet, text_encoder):
|
||||
self.unet = unet
|
||||
self.text_encoder = text_encoder
|
||||
self.hooks = []
|
||||
self.hooks = dict()
|
||||
self.text_modules = None
|
||||
self.unet_modules = None
|
||||
|
||||
@@ -228,7 +228,7 @@ class LoRAModuleWrapper:
|
||||
wrapper = self
|
||||
|
||||
def lora_forward(module, input_h, output):
|
||||
if len(wrapper.loaded_loras) == 0:
|
||||
if len(wrapper.applied_loras) == 0:
|
||||
return output
|
||||
|
||||
for lora in wrapper.applied_loras.values():
|
||||
@@ -241,11 +241,18 @@ class LoRAModuleWrapper:
|
||||
return lora_forward
|
||||
|
||||
def apply_module_forward(self, module, name):
|
||||
handle = module.register_forward_hook(self.lora_forward_hook(name))
|
||||
self.hooks.append(handle)
|
||||
if name in self.hooks:
|
||||
registered_module, _ = self.hooks[name]
|
||||
if registered_module != module:
|
||||
raise Exception(f"Trying to register multiple modules to lora key: {name}")
|
||||
# else it's just double hook creation - nothing to do
|
||||
|
||||
else:
|
||||
handle = module.register_forward_hook(self.lora_forward_hook(name))
|
||||
self.hooks[name] = (module, handle)
|
||||
|
||||
def clear_hooks(self):
|
||||
for hook in self.hooks:
|
||||
for _, hook in self.hooks.values():
|
||||
hook.remove()
|
||||
|
||||
self.hooks.clear()
|
||||
|
||||
Reference in New Issue
Block a user