Compare commits

..

3 Commits

Author SHA1 Message Date
Lincoln Stein
020fb6c1e4 bump version number for release 2.3.5 2023-04-24 23:12:32 -04:00
Lincoln Stein
c63ef500b6 Merge branch 'v2.3' into bugfix/control-model-revision 2023-04-25 03:58:00 +01:00
Lincoln Stein
5f1d311c52 Merge branch 'v2.3' into bugfix/control-model-revision 2023-04-25 03:23:12 +01:00
11 changed files with 25 additions and 36 deletions

View File

@@ -41,7 +41,7 @@ jobs:
--verbose
- name: deploy to gh-pages
if: ${{ github.ref == 'refs/heads/v2.3' }}
if: ${{ github.ref == 'refs/heads/main' }}
run: |
python -m \
mkdocs gh-deploy \

2
.gitignore vendored
View File

@@ -233,3 +233,5 @@ installer/install.sh
installer/update.bat
installer/update.sh
# no longer stored in source directory
models

View File

@@ -1 +1 @@
__version__='2.3.5'
__version__='2.3.5-rc1'

View File

@@ -39,7 +39,7 @@ def invokeai_is_running()->bool:
if matches:
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
return True
except (psutil.AccessDenied,psutil.NoSuchProcess):
except psutil.AccessDenied:
continue
return False

View File

@@ -111,6 +111,7 @@ def install_requested_models(
if len(external_models)>0:
print("== INSTALLING EXTERNAL MODELS ==")
for path_url_or_repo in external_models:
print(f'DEBUG: path_url_or_repo = {path_url_or_repo}')
try:
model_manager.heuristic_import(
path_url_or_repo,

View File

@@ -400,15 +400,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property
def _submodels(self) -> Sequence[torch.nn.Module]:
module_names, _, _ = self.extract_init_dict(dict(self.config))
submodels = []
for name in module_names.keys():
if hasattr(self, name):
value = getattr(self, name)
else:
value = getattr(self.config, name)
if isinstance(value, torch.nn.Module):
submodels.append(value)
return submodels
values = [getattr(self, name) for name in module_names.keys()]
return [m for m in values if isinstance(m, torch.nn.Module)]
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
conditioning_data: ConditioningData,
@@ -479,7 +472,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
step_count=len(self.scheduler.timesteps)
):
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.config.num_train_timesteps,
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps,
latents=latents)
batch_size = latents.shape[0]
@@ -763,7 +756,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property
def channels(self) -> int:
"""Compatible with DiffusionWrapper"""
return self.unet.config.in_channels
return self.unet.in_channels
def decode_latents(self, latents):
# Explicit call to get the vae loaded, since `decode` isn't the forward method.

View File

@@ -1156,7 +1156,7 @@ class ModelManager(object):
return self.device.type == "cuda"
def _diffuser_sha256(
self, name_or_path: Union[str, Path], chunksize=16777216
self, name_or_path: Union[str, Path], chunksize=4096
) -> Union[str, bytes]:
path = None
if isinstance(name_or_path, Path):

View File

@@ -14,6 +14,7 @@ from torch import nn
from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.cross_attention import AttnProcessor
from ldm.invoke.devices import torch_dtype
@@ -162,7 +163,7 @@ class Context:
class InvokeAICrossAttentionMixin:
"""
Enable InvokeAI-flavoured Attention calculation, which does aggressive low-memory slicing and calls
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
and dymamic slicing strategy selection.
"""
@@ -177,7 +178,7 @@ class InvokeAICrossAttentionMixin:
Set custom attention calculator to be called when attention is calculated
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
which returns either the suggested_attention_slice or an adjusted equivalent.
`module` is the current Attention module for which the callback is being invoked.
`module` is the current CrossAttention module for which the callback is being invoked.
`suggested_attention_slice` is the default-calculated attention slice
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
@@ -325,7 +326,7 @@ def setup_cross_attention_control_attention_processors(unet: UNet2DConditionMode
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
from ldm.modules.attention import CrossAttention # avoid circular import # TODO: rename as in diffusers?
from ldm.modules.attention import CrossAttention # avoid circular import
cross_attention_class: type = InvokeAIDiffusersCrossAttention if isinstance(model,UNet2DConditionModel) else CrossAttention
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
attention_module_tuples = [(name,module) for name, module in model.named_modules() if
@@ -431,7 +432,7 @@ def get_mem_free_total(device):
class InvokeAIDiffusersCrossAttention(diffusers.models.attention.Attention, InvokeAICrossAttentionMixin):
class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@@ -456,8 +457,8 @@ class InvokeAIDiffusersCrossAttention(diffusers.models.attention.Attention, Invo
"""
# base implementation
class AttnProcessor:
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
class CrossAttnProcessor:
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length)
@@ -486,7 +487,7 @@ from dataclasses import field, dataclass
import torch
from diffusers.models.attention_processor import Attention, AttnProcessor, SlicedAttnProcessor
from diffusers.models.cross_attention import CrossAttention, CrossAttnProcessor, SlicedAttnProcessor
@dataclass
@@ -531,7 +532,7 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor):
# TODO: dynamically pick slice size based on memory conditions
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None,
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None,
# kwargs
swap_cross_attn_context: SwapCrossAttnContext=None):

View File

@@ -5,6 +5,7 @@ from typing import Callable, Optional, Union, Any
import numpy as np
import torch
from diffusers import UNet2DConditionModel
from typing_extensions import TypeAlias

View File

@@ -9,7 +9,7 @@ from safetensors.torch import load_file
from torch.utils.hooks import RemovableHandle
from transformers import CLIPTextModel
from ..invoke.globals import global_lora_models_dir, Globals
from ..invoke.globals import global_lora_models_dir
from ..invoke.devices import choose_torch_device
"""
@@ -456,25 +456,16 @@ class LoRA:
class KohyaLoraManager:
lora_path = Path(global_lora_models_dir())
vector_length_cache_path = lora_path / '.vectorlength.cache'
def __init__(self, pipe):
self.vector_length_cache_path = self.lora_path / '.vectorlength.cache'
self.unet = pipe.unet
self.wrapper = LoRAModuleWrapper(pipe.unet, pipe.text_encoder)
self.text_encoder = pipe.text_encoder
self.device = torch.device(choose_torch_device())
self.dtype = pipe.unet.dtype
@classmethod
@property
def lora_path(cls)->Path:
return Path(global_lora_models_dir())
@classmethod
@property
def vector_length_cache_path(cls)->Path:
return cls.lora_path / '.vectorlength.cache'
def load_lora_module(self, name, path_file, multiplier: float = 1.0):
print(f" | Found lora {name} at {path_file}")
if path_file.suffix == ".safetensors":

View File

@@ -34,7 +34,7 @@ dependencies = [
"clip_anytorch",
"compel~=1.1.0",
"datasets",
"diffusers[torch]~=0.15.0",
"diffusers[torch]==0.14",
"dnspython==2.2.1",
"einops",
"eventlet",