Merge branch 'v2.3' into fix/new_diffusers_names

This commit is contained in:
Lincoln Stein
2023-04-20 17:20:33 +01:00
committed by GitHub
2 changed files with 119 additions and 31 deletions

34
.github/CODEOWNERS vendored
View File

@@ -1,13 +1,13 @@
# continuous integration
/.github/workflows/ @mauwii @lstein @blessedcoolant
/.github/workflows/ @lstein @blessedcoolant
# documentation
/docs/ @lstein @mauwii @blessedcoolant
mkdocs.yml @mauwii @lstein
/docs/ @lstein @blessedcoolant
mkdocs.yml @lstein @ebr
# installation and configuration
/pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii
/pyproject.toml @lstein @ebr
/docker/ @lstein
/scripts/ @ebr @lstein @blessedcoolant
/installer/ @ebr @lstein
ldm/invoke/config @lstein @ebr
@@ -21,13 +21,13 @@ invokeai/configs @lstein @ebr @blessedcoolant
# generation and model management
/ldm/*.py @lstein @blessedcoolant
/ldm/generate.py @lstein @keturn
/ldm/generate.py @lstein @gregghelt2
/ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein @blessedcoolant
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
/ldm/invoke/CLI.py @lstein @blessedcoolant
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
/ldm/invoke/generator @keturn @damian0815
/ldm/invoke/config @lstein @ebr @blessedcoolant
/ldm/invoke/generator @gregghelt2 @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
/ldm/invoke/model_manager.py @lstein @blessedcoolant
@@ -36,17 +36,17 @@ invokeai/configs @lstein @ebr @blessedcoolant
/ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn @blessedcoolant
/ldm/models @damian0815 @gregghelt2 @blessedcoolant
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
/ldm/modules/attention.py @damian0815 @keturn
/ldm/modules/diffusionmodules @damian0815 @keturn
/ldm/modules/distributions @damian0815 @keturn
/ldm/modules/ema.py @damian0815 @keturn
/ldm/modules/attention.py @damian0815 @gregghelt2
/ldm/modules/diffusionmodules @damian0815 @gregghelt2
/ldm/modules/distributions @damian0815 @gregghelt2
/ldm/modules/ema.py @damian0815 @gregghelt2
/ldm/modules/embedding_manager.py @lstein
/ldm/modules/encoders @damian0815 @keturn
/ldm/modules/image_degradation @damian0815 @keturn
/ldm/modules/losses @damian0815 @keturn
/ldm/modules/x_transformer.py @damian0815 @keturn
/ldm/modules/encoders @damian0815 @gregghelt2
/ldm/modules/image_degradation @damian0815 @gregghelt2
/ldm/modules/losses @damian0815 @gregghelt2
/ldm/modules/x_transformer.py @damian0815 @gregghelt2
# Nodes
apps/ @Kyle0654 @jpphoto

View File

@@ -31,18 +31,13 @@ class LoRALayer:
self.name = name
self.scale = alpha / rank if (alpha and rank) else 1.0
def forward(self, lora, input_h, output):
def forward(self, lora, input_h):
if self.mid is None:
output = (
output
+ self.up(self.down(*input_h)) * lora.multiplier * self.scale
)
weight = self.up(self.down(*input_h))
else:
output = (
output
+ self.up(self.mid(self.down(*input_h))) * lora.multiplier * self.scale
)
return output
weight = self.up(self.mid(self.down(*input_h)))
return weight * lora.multiplier * self.scale
class LoHALayer:
lora_name: str
@@ -64,7 +59,7 @@ class LoHALayer:
self.name = name
self.scale = alpha / rank if (alpha and rank) else 1.0
def forward(self, lora, input_h, output):
def forward(self, lora, input_h):
if type(self.org_module) == torch.nn.Conv2d:
op = torch.nn.functional.conv2d
@@ -86,9 +81,9 @@ class LoHALayer:
rebuild1 = torch.einsum('i j k l, j r, i p -> p r k l', self.t1, self.w1_b, self.w1_a)
rebuild2 = torch.einsum('i j k l, j r, i p -> p r k l', self.t2, self.w2_b, self.w2_a)
weight = rebuild1 * rebuild2
bias = self.bias if self.bias is not None else 0
return output + op(
return op(
*input_h,
(weight + bias).view(self.org_module.weight.shape),
None,
@@ -96,6 +91,69 @@ class LoHALayer:
) * lora.multiplier * self.scale
class LoKRLayer:
lora_name: str
name: str
scale: float
w1: Optional[torch.Tensor] = None
w1_a: Optional[torch.Tensor] = None
w1_b: Optional[torch.Tensor] = None
w2: Optional[torch.Tensor] = None
w2_a: Optional[torch.Tensor] = None
w2_b: Optional[torch.Tensor] = None
t2: Optional[torch.Tensor] = None
bias: Optional[torch.Tensor] = None
org_module: torch.nn.Module
def __init__(self, lora_name: str, name: str, rank=4, alpha=1.0):
self.lora_name = lora_name
self.name = name
self.scale = alpha / rank if (alpha and rank) else 1.0
def forward(self, lora, input_h):
if type(self.org_module) == torch.nn.Conv2d:
op = torch.nn.functional.conv2d
extra_args = dict(
stride=self.org_module.stride,
padding=self.org_module.padding,
dilation=self.org_module.dilation,
groups=self.org_module.groups,
)
else:
op = torch.nn.functional.linear
extra_args = {}
w1 = self.w1
if w1 is None:
w1 = self.w1_a @ self.w1_b
w2 = self.w2
if w2 is None:
if self.t2 is None:
w2 = self.w2_a @ self.w2_b
else:
w2 = torch.einsum('i j k l, i p, j r -> p r k l', self.t2, self.w2_a, self.w2_b)
if len(w2.shape) == 4:
w1 = w1.unsqueeze(2).unsqueeze(2)
w2 = w2.contiguous()
weight = torch.kron(w1, w2).reshape(self.org_module.weight.shape)
bias = self.bias if self.bias is not None else 0
return op(
*input_h,
(weight + bias).view(self.org_module.weight.shape),
None,
**extra_args
) * lora.multiplier * self.scale
class LoRAModuleWrapper:
unet: UNet2DConditionModel
text_encoder: CLIPTextModel
@@ -159,7 +217,7 @@ class LoRAModuleWrapper:
layer = lora.layers.get(name, None)
if layer is None:
continue
output = layer.forward(lora, input_h, output)
output += layer.forward(lora, input_h)
return output
return lora_forward
@@ -307,6 +365,36 @@ class LoRA:
else:
layer.t2 = None
# lokr
elif "lokr_w1_b" in values or "lokr_w1" in values:
if "lokr_w1_b" in values:
rank = values["lokr_w1_b"].shape[0]
elif "lokr_w2_b" in values:
rank = values["lokr_w2_b"].shape[0]
else:
rank = None # unscaled
layer = LoKRLayer(self.name, stem, rank, alpha)
layer.org_module = wrapped
layer.bias = bias
if "lokr_w1" in values:
layer.w1 = values["lokr_w1"].to(device=self.device, dtype=self.dtype)
else:
layer.w1_a = values["lokr_w1_a"].to(device=self.device, dtype=self.dtype)
layer.w1_b = values["lokr_w1_b"].to(device=self.device, dtype=self.dtype)
if "lokr_w2" in values:
layer.w2 = values["lokr_w2"].to(device=self.device, dtype=self.dtype)
else:
layer.w2_a = values["lokr_w2_a"].to(device=self.device, dtype=self.dtype)
layer.w2_b = values["lokr_w2_b"].to(device=self.device, dtype=self.dtype)
if "lokr_t2" in values:
layer.t2 = values["lokr_t2"].to(device=self.device, dtype=self.dtype)
else:
print(
f">> Encountered unknown lora layer module in {self.name}: {stem} - {type(wrapped).__name__}"