mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 04:48:14 -05:00
Compare commits
32 Commits
ryan/sd35
...
ebr/pin-py
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe6ba3571a | ||
|
|
e6ab6e0293 | ||
|
|
66d9c7c631 | ||
|
|
fec45f3eb6 | ||
|
|
7211d1a6fc | ||
|
|
f3069754a9 | ||
|
|
4f43152aeb | ||
|
|
7125055d02 | ||
|
|
c91a9ce390 | ||
|
|
3e7b73da2c | ||
|
|
61ac50c00d | ||
|
|
c1201f0bce | ||
|
|
acdffac5ad | ||
|
|
e420300fa4 | ||
|
|
260a5a4f9a | ||
|
|
ed0c2006fe | ||
|
|
9ffd888c86 | ||
|
|
175a9dc28d | ||
|
|
5764e4f7f2 | ||
|
|
4275a494b9 | ||
|
|
a3deb8d30d | ||
|
|
aafdb0a37b | ||
|
|
56a815719a | ||
|
|
4db26bfa3a | ||
|
|
8d84ccb12b | ||
|
|
3321d14997 | ||
|
|
43cc4684e1 | ||
|
|
afa5a4b17c | ||
|
|
33c433fe59 | ||
|
|
9cd47fa857 | ||
|
|
32d9abe802 | ||
|
|
3947d4a165 |
@@ -808,7 +808,11 @@ def get_is_installed(
|
||||
for model in installed_models:
|
||||
if model.source == starter_model.source:
|
||||
return True
|
||||
if model.name == starter_model.name and model.base == starter_model.base and model.type == starter_model.type:
|
||||
if (
|
||||
(model.name == starter_model.name or model.name in starter_model.previous_names)
|
||||
and model.base == starter_model.base
|
||||
and model.type == starter_model.type
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -133,7 +133,6 @@ class FieldDescriptions:
|
||||
clip_embed_model = "CLIP Embed loader"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
transformer = "Transformer"
|
||||
mmditx = "MMDiTX"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
@@ -141,7 +140,6 @@ class FieldDescriptions:
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
flux_model = "Flux model (Transformer) to load"
|
||||
sd3_model = "SD3 model (MMDiTX) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import CheckpointConfigBase, SubModelType
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
import copy
|
||||
from typing import List, Optional
|
||||
from typing import List, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -13,9 +13,11 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
CheckpointConfigBase,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
@@ -137,6 +139,78 @@ class ModelIdentifierInvocation(BaseInvocation):
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import CheckpointConfigBase, SubModelType
|
||||
|
||||
|
||||
@invocation_output("sd3_model_loader_output")
|
||||
class Sd3ModelLoaderOutput(BaseInvocationOutput):
|
||||
"""SD3 base model loader output."""
|
||||
|
||||
mmditx: TransformerField = OutputField(description=FieldDescriptions.mmditx, title="MMDiTX")
|
||||
clip_l: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP L")
|
||||
clip_g: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP G")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_model_loader",
|
||||
title="SD3 Main Model",
|
||||
tags=["model", "sd3"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a SD3 base model, outputting its submodels."""
|
||||
|
||||
# TODO(ryand): Create a UIType.Sd3MainModelField to use here.
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sd3_model,
|
||||
ui_type=UIType.MainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
# TODO(ryand): Make the text encoders optional.
|
||||
# Note: The text encoders are optional for SD3. The model was trained with dropout, so any can be left out at
|
||||
# inference time. Typically, only the T5 encoder is omitted, since it is the largest by far.
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_l_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP L Embed",
|
||||
)
|
||||
|
||||
clip_g_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP G Embed",
|
||||
)
|
||||
|
||||
# TODO(ryand): Create a UIType.Sd3VaModelField to use here.
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.VAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> Sd3ModelLoaderOutput:
|
||||
for key in [
|
||||
self.model.key,
|
||||
self.t5_encoder_model.key,
|
||||
self.clip_l_embed_model.key,
|
||||
self.clip_g_embed_model.key,
|
||||
self.vae_model.key,
|
||||
]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
# TODO(ryand): Figure out the sub-model types for SD3.
|
||||
mmditx = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer_l = self.clip_l_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder_l = self.clip_l_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer_g = self.clip_g_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder_g = self.clip_g_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer_t5 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(mmditx)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return Sd3ModelLoaderOutput(
|
||||
mmditx=TransformerField(transformer=mmditx, loras=[]),
|
||||
clip_l=CLIPField(tokenizer=tokenizer_l, text_encoder=clip_encoder_l, loras=[], skipped_layers=0),
|
||||
clip_g=CLIPField(tokenizer=tokenizer_g, text_encoder=clip_encoder_g, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
@@ -53,8 +53,6 @@ class BaseModelType(str, Enum):
|
||||
Any = "any"
|
||||
StableDiffusion1 = "sd-1"
|
||||
StableDiffusion2 = "sd-2"
|
||||
# TODO(ryand): Should this just be StableDiffusion3?
|
||||
StableDiffusion35 = "sd-3.5"
|
||||
StableDiffusionXL = "sdxl"
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
CheckpointConfigBase,
|
||||
MainCheckpointConfig,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion35, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
class FluxCheckpointModel(ModelLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, CheckpointConfigBase):
|
||||
raise ValueError("Only CheckpointConfigBase models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Transformer:
|
||||
return self._load_from_singlefile(config)
|
||||
|
||||
raise ValueError(
|
||||
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
)
|
||||
|
||||
def _load_from_singlefile(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
) -> AnyModel:
|
||||
assert isinstance(config, MainCheckpointConfig)
|
||||
model_path = Path(config.path)
|
||||
|
||||
# model = Flux(params[config.config_path])
|
||||
# sd = load_file(model_path)
|
||||
# if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
|
||||
# sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
# new_sd_size = sum([ten.nelement() * torch.bfloat16.itemsize for ten in sd.values()])
|
||||
# self._ram_cache.make_room(new_sd_size)
|
||||
# for k in sd.keys():
|
||||
# # We need to cast to bfloat16 due to it being the only currently supported dtype for inference
|
||||
# sd[k] = sd[k].to(torch.bfloat16)
|
||||
# model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
@@ -37,7 +37,6 @@ from invokeai.backend.model_manager.config import (
|
||||
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.sd3.sd3_state_dict_utils import is_sd3_checkpoint
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
@@ -121,7 +120,6 @@ class ModelProbe(object):
|
||||
"T2IAdapter": ModelType.T2IAdapter,
|
||||
"CLIPModel": ModelType.CLIPEmbed,
|
||||
"CLIPTextModel": ModelType.CLIPEmbed,
|
||||
"CLIPTextModelWithProjection": ModelType.CLIPEmbed,
|
||||
"T5EncoderModel": ModelType.T5Encoder,
|
||||
"FluxControlNetModel": ModelType.ControlNet,
|
||||
}
|
||||
@@ -243,11 +241,6 @@ class ModelProbe(object):
|
||||
for key in [str(k) for k in ckpt.keys()]:
|
||||
if key.startswith(
|
||||
(
|
||||
# The following prefixes appear when multiple models have been bundled together in a single file (I
|
||||
# believe the format originated in ComfyUI).
|
||||
# first_stage_model = VAE
|
||||
# cond_stage_model = Text Encoder
|
||||
# model.diffusion_model = UNet / Transformer
|
||||
"cond_stage_model.",
|
||||
"first_stage_model.",
|
||||
"model.diffusion_model.",
|
||||
@@ -404,9 +397,6 @@ class ModelProbe(object):
|
||||
# is used rather than attempting to support flux with separate model types and format
|
||||
# If changed in the future, please fix me
|
||||
config_file = "flux-schnell"
|
||||
elif base_type == BaseModelType.StableDiffusion35:
|
||||
# TODO(ryand): Think about what to do here.
|
||||
config_file = "sd3.5-large"
|
||||
else:
|
||||
config_file = LEGACY_CONFIGS[base_type][variant_type]
|
||||
if isinstance(config_file, dict): # need another tier for sd-2.x models
|
||||
@@ -472,8 +462,9 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"normal": "normalbae_image_processor",
|
||||
"sketch": "pidi_image_processor",
|
||||
"scribble": "lineart_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"lineart anime": "lineart_anime_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
@@ -526,7 +517,7 @@ class CheckpointProbeBase(ProbeBase):
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
model_type = ModelProbe.get_model_type_from_checkpoint(self.model_path, self.checkpoint)
|
||||
base_type = self.get_base_type()
|
||||
if model_type != ModelType.Main or base_type in (BaseModelType.Flux, BaseModelType.StableDiffusion35):
|
||||
if model_type != ModelType.Main or base_type == BaseModelType.Flux:
|
||||
return ModelVariantType.Normal
|
||||
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
|
||||
in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1]
|
||||
@@ -551,10 +542,6 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
|
||||
or "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in state_dict
|
||||
):
|
||||
return BaseModelType.Flux
|
||||
|
||||
if is_sd3_checkpoint(state_dict):
|
||||
return BaseModelType.StableDiffusion35
|
||||
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
|
||||
@@ -13,6 +13,9 @@ class StarterModelWithoutDependencies(BaseModel):
|
||||
type: ModelType
|
||||
format: Optional[ModelFormat] = None
|
||||
is_installed: bool = False
|
||||
# allows us to track what models a user has installed across name changes within starter models
|
||||
# if you update a starter model name, please add the old one to this list for that starter model
|
||||
previous_names: list[str] = []
|
||||
|
||||
|
||||
class StarterModel(StarterModelWithoutDependencies):
|
||||
@@ -243,44 +246,49 @@ easy_neg_sd1 = StarterModel(
|
||||
# endregion
|
||||
# region IP Adapter
|
||||
ip_adapter_sd1 = StarterModel(
|
||||
name="IP Adapter",
|
||||
name="Standard Reference (IP Adapter)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_sd15/resolve/main/ip-adapter_sd15.safetensors",
|
||||
description="IP-Adapter for SD 1.5 models",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
previous_names=["IP Adapter"],
|
||||
)
|
||||
ip_adapter_plus_sd1 = StarterModel(
|
||||
name="IP Adapter Plus",
|
||||
name="Precise Reference (IP Adapter Plus)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_plus_sd15/resolve/main/ip-adapter-plus_sd15.safetensors",
|
||||
description="Refined IP-Adapter for SD 1.5 models",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
previous_names=["IP Adapter Plus"],
|
||||
)
|
||||
ip_adapter_plus_face_sd1 = StarterModel(
|
||||
name="IP Adapter Plus Face",
|
||||
name="Face Reference (IP Adapter Plus Face)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15/resolve/main/ip-adapter-plus-face_sd15.safetensors",
|
||||
description="Refined IP-Adapter for SD 1.5 models, adapted for faces",
|
||||
description="References images with a higher degree of precision, adapted for faces",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
previous_names=["IP Adapter Plus Face"],
|
||||
)
|
||||
ip_adapter_sdxl = StarterModel(
|
||||
name="IP Adapter SDXL",
|
||||
name="Standard Reference (IP Adapter ViT-H)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h/resolve/main/ip-adapter_sdxl_vit-h.safetensors",
|
||||
description="IP-Adapter for SDXL models",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="XLabs FLUX IP-Adapter",
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter/resolve/main/flux-ip-adapter.safetensors",
|
||||
description="FLUX IP-Adapter",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[clip_vit_l_image_encoder],
|
||||
previous_names=["XLabs FLUX IP-Adapter"],
|
||||
)
|
||||
# endregion
|
||||
# region ControlNet
|
||||
@@ -299,157 +307,162 @@ qr_code_cnet_sdxl = StarterModel(
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
canny_sd1 = StarterModel(
|
||||
name="canny",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_canny",
|
||||
description="ControlNet weights trained on sd-1.5 with canny conditioning.",
|
||||
description="Uses detected edges in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["canny"],
|
||||
)
|
||||
inpaint_cnet_sd1 = StarterModel(
|
||||
name="inpaint",
|
||||
name="Inpainting",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_inpaint",
|
||||
description="ControlNet weights trained on sd-1.5 with canny conditioning, inpaint version",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["inpaint"],
|
||||
)
|
||||
mlsd_sd1 = StarterModel(
|
||||
name="mlsd",
|
||||
name="Line Drawing (mlsd)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_mlsd",
|
||||
description="ControlNet weights trained on sd-1.5 with canny conditioning, MLSD version",
|
||||
description="Uses straight line detection for controlling the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["mlsd"],
|
||||
)
|
||||
depth_sd1 = StarterModel(
|
||||
name="depth",
|
||||
name="Depth Map",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11f1p_sd15_depth",
|
||||
description="ControlNet weights trained on sd-1.5 with depth conditioning",
|
||||
description="Uses depth information in the image to control the depth in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["depth"],
|
||||
)
|
||||
normal_bae_sd1 = StarterModel(
|
||||
name="normal_bae",
|
||||
name="Lighting Detection (Normals)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_normalbae",
|
||||
description="ControlNet weights trained on sd-1.5 with normalbae image conditioning",
|
||||
description="Uses detected lighting information to guide the lighting of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["normal_bae"],
|
||||
)
|
||||
seg_sd1 = StarterModel(
|
||||
name="seg",
|
||||
name="Segmentation Map",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_seg",
|
||||
description="ControlNet weights trained on sd-1.5 with seg image conditioning",
|
||||
description="Uses segmentation maps to guide the structure of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["seg"],
|
||||
)
|
||||
lineart_sd1 = StarterModel(
|
||||
name="lineart",
|
||||
name="Lineart",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_lineart",
|
||||
description="ControlNet weights trained on sd-1.5 with lineart image conditioning",
|
||||
description="Uses lineart detection to guide the lighting of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["lineart"],
|
||||
)
|
||||
lineart_anime_sd1 = StarterModel(
|
||||
name="lineart_anime",
|
||||
name="Lineart Anime",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15s2_lineart_anime",
|
||||
description="ControlNet weights trained on sd-1.5 with anime image conditioning",
|
||||
description="Uses anime lineart detection to guide the lighting of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["lineart_anime"],
|
||||
)
|
||||
openpose_sd1 = StarterModel(
|
||||
name="openpose",
|
||||
name="Pose Detection (openpose)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_openpose",
|
||||
description="ControlNet weights trained on sd-1.5 with openpose image conditioning",
|
||||
description="Uses pose information to control the pose of human characters in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["openpose"],
|
||||
)
|
||||
scribble_sd1 = StarterModel(
|
||||
name="scribble",
|
||||
name="Contour Detection (scribble)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_scribble",
|
||||
description="ControlNet weights trained on sd-1.5 with scribble image conditioning",
|
||||
description="Uses edges, contours, or line art in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["scribble"],
|
||||
)
|
||||
softedge_sd1 = StarterModel(
|
||||
name="softedge",
|
||||
name="Soft Edge Detection (softedge)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_softedge",
|
||||
description="ControlNet weights trained on sd-1.5 with soft edge conditioning",
|
||||
description="Uses a soft edge detection map to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["softedge"],
|
||||
)
|
||||
shuffle_sd1 = StarterModel(
|
||||
name="shuffle",
|
||||
name="Remix (shuffle)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11e_sd15_shuffle",
|
||||
description="ControlNet weights trained on sd-1.5 with shuffle image conditioning",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["shuffle"],
|
||||
)
|
||||
tile_sd1 = StarterModel(
|
||||
name="tile",
|
||||
name="Tile",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11f1e_sd15_tile",
|
||||
description="ControlNet weights trained on sd-1.5 with tiled image conditioning",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
ip2p_sd1 = StarterModel(
|
||||
name="ip2p",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11e_sd15_ip2p",
|
||||
description="ControlNet weights trained on sd-1.5 with ip2p conditioning.",
|
||||
description="Uses image data to replicate exact colors/structure in the resulting generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["tile"],
|
||||
)
|
||||
canny_sdxl = StarterModel(
|
||||
name="canny-sdxl",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-canny-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with canny conditioning, by Xinsir.",
|
||||
description="Uses detected edges in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["canny-sdxl"],
|
||||
)
|
||||
depth_sdxl = StarterModel(
|
||||
name="depth-sdxl",
|
||||
name="Depth Map",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="diffusers/controlNet-depth-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with depth conditioning.",
|
||||
description="Uses depth information in the image to control the depth in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["depth-sdxl"],
|
||||
)
|
||||
softedge_sdxl = StarterModel(
|
||||
name="softedge-dexined-sdxl",
|
||||
name="Soft Edge Detection (softedge)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="SargeZT/controlNet-sd-xl-1.0-softedge-dexined",
|
||||
description="ControlNet weights trained on sdxl-1.0 with dexined soft edge preprocessing.",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
depth_zoe_16_sdxl = StarterModel(
|
||||
name="depth-16bit-zoe-sdxl",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="SargeZT/controlNet-sd-xl-1.0-depth-16bit-zoe",
|
||||
description="ControlNet weights trained on sdxl-1.0 with Zoe's preprocessor (16 bits).",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
depth_zoe_32_sdxl = StarterModel(
|
||||
name="depth-zoe-sdxl",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="diffusers/controlNet-zoe-depth-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with Zoe's preprocessor (32 bits).",
|
||||
description="Uses a soft edge detection map to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["softedge-dexined-sdxl"],
|
||||
)
|
||||
openpose_sdxl = StarterModel(
|
||||
name="openpose-sdxl",
|
||||
name="Pose Detection (openpose)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-openpose-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 compatible with the DWPose processor by Xinsir.",
|
||||
description="Uses pose information to control the pose of human characters in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["openpose-sdxl", "controlnet-openpose-sdxl"],
|
||||
)
|
||||
scribble_sdxl = StarterModel(
|
||||
name="scribble-sdxl",
|
||||
name="Contour Detection (scribble)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-scribble-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 compatible with various lineart processors and black/white sketches by Xinsir.",
|
||||
description="Uses edges, contours, or line art in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["scribble-sdxl", "controlnet-scribble-sdxl"],
|
||||
)
|
||||
tile_sdxl = StarterModel(
|
||||
name="tile-sdxl",
|
||||
name="Tile",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-tile-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with tiled image conditioning",
|
||||
description="Uses image data to replicate exact colors/structure in the resulting generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["tile-sdxl"],
|
||||
)
|
||||
union_cnet_sdxl = StarterModel(
|
||||
name="Multi-Guidance Detection (Union Pro)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="InvokeAI/Xinsir-SDXL_Controlnet_Union",
|
||||
description="A unified ControlNet for SDXL model that supports 10+ control types",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
union_cnet_flux = StarterModel(
|
||||
@@ -462,60 +475,52 @@ union_cnet_flux = StarterModel(
|
||||
# endregion
|
||||
# region T2I Adapter
|
||||
t2i_canny_sd1 = StarterModel(
|
||||
name="canny-sd15",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_canny_sd15v2",
|
||||
description="T2I Adapter weights trained on sd-1.5 with canny conditioning.",
|
||||
description="Uses detected edges in the image to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["canny-sd15"],
|
||||
)
|
||||
t2i_sketch_sd1 = StarterModel(
|
||||
name="sketch-sd15",
|
||||
name="Sketch",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_sketch_sd15v2",
|
||||
description="T2I Adapter weights trained on sd-1.5 with sketch conditioning.",
|
||||
description="Uses a sketch to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["sketch-sd15"],
|
||||
)
|
||||
t2i_depth_sd1 = StarterModel(
|
||||
name="depth-sd15",
|
||||
name="Depth Map",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_depth_sd15v2",
|
||||
description="T2I Adapter weights trained on sd-1.5 with depth conditioning.",
|
||||
type=ModelType.T2IAdapter,
|
||||
)
|
||||
t2i_zoe_depth_sd1 = StarterModel(
|
||||
name="zoedepth-sd15",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_zoedepth_sd15v1",
|
||||
description="T2I Adapter weights trained on sd-1.5 with zoe depth conditioning.",
|
||||
description="Uses depth information in the image to control the depth in the generation.",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["depth-sd15"],
|
||||
)
|
||||
t2i_canny_sdxl = StarterModel(
|
||||
name="canny-sdxl",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-canny-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with canny conditioning.",
|
||||
type=ModelType.T2IAdapter,
|
||||
)
|
||||
t2i_zoe_depth_sdxl = StarterModel(
|
||||
name="zoedepth-sdxl",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-depth-zoe-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with zoe depth conditioning.",
|
||||
description="Uses detected edges in the image to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["canny-sdxl"],
|
||||
)
|
||||
t2i_lineart_sdxl = StarterModel(
|
||||
name="lineart-sdxl",
|
||||
name="Lineart",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-lineart-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with lineart conditioning.",
|
||||
description="Uses lineart detection to guide the lighting of the composition.",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["lineart-sdxl"],
|
||||
)
|
||||
t2i_sketch_sdxl = StarterModel(
|
||||
name="sketch-sdxl",
|
||||
name="Sketch",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-sketch-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with sketch conditioning.",
|
||||
description="Uses a sketch to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["sketch-sdxl"],
|
||||
)
|
||||
# endregion
|
||||
# region SpandrelImageToImage
|
||||
@@ -600,22 +605,18 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
softedge_sd1,
|
||||
shuffle_sd1,
|
||||
tile_sd1,
|
||||
ip2p_sd1,
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
depth_zoe_16_sdxl,
|
||||
depth_zoe_32_sdxl,
|
||||
openpose_sdxl,
|
||||
scribble_sdxl,
|
||||
tile_sdxl,
|
||||
union_cnet_sdxl,
|
||||
union_cnet_flux,
|
||||
t2i_canny_sd1,
|
||||
t2i_sketch_sd1,
|
||||
t2i_depth_sd1,
|
||||
t2i_zoe_depth_sd1,
|
||||
t2i_canny_sdxl,
|
||||
t2i_zoe_depth_sdxl,
|
||||
t2i_lineart_sdxl,
|
||||
t2i_sketch_sdxl,
|
||||
realesrgan_x4,
|
||||
@@ -646,7 +647,6 @@ sd1_bundle: list[StarterModel] = [
|
||||
softedge_sd1,
|
||||
shuffle_sd1,
|
||||
tile_sd1,
|
||||
ip2p_sd1,
|
||||
swinir,
|
||||
]
|
||||
|
||||
@@ -657,8 +657,6 @@ sdxl_bundle: list[StarterModel] = [
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
depth_zoe_16_sdxl,
|
||||
depth_zoe_32_sdxl,
|
||||
openpose_sdxl,
|
||||
scribble_sdxl,
|
||||
tile_sdxl,
|
||||
|
||||
@@ -1,891 +0,0 @@
|
||||
# This file was originally copied from:
|
||||
# https://github.com/Stability-AI/sd3.5/blob/19bf11c4e1e37324c5aa5a61f010d4127848a09c/mmditx.py
|
||||
|
||||
|
||||
### This file contains impls for MM-DiT, the core model component of SD3
|
||||
|
||||
import math
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
|
||||
from invokeai.backend.sd3.other_impls import Mlp, attention
|
||||
|
||||
|
||||
class PatchEmbed(torch.nn.Module):
|
||||
"""2D Image to Patch Embedding"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
img_size: Optional[int] = 224,
|
||||
patch_size: int = 16,
|
||||
in_chans: int = 3,
|
||||
embed_dim: int = 768,
|
||||
flatten: bool = True,
|
||||
bias: bool = True,
|
||||
strict_img_size: bool = True,
|
||||
dynamic_img_pad: bool = False,
|
||||
dtype: torch.dtype | None = None,
|
||||
device: torch.device | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.patch_size = (patch_size, patch_size)
|
||||
if img_size is not None:
|
||||
self.img_size = (img_size, img_size)
|
||||
self.grid_size = tuple([s // p for s, p in zip(self.img_size, self.patch_size, strict=False)])
|
||||
self.num_patches = self.grid_size[0] * self.grid_size[1]
|
||||
else:
|
||||
self.img_size = None
|
||||
self.grid_size = None
|
||||
self.num_patches = None
|
||||
|
||||
# flatten spatial dim and transpose to channels last, kept for bwd compat
|
||||
self.flatten = flatten
|
||||
self.strict_img_size = strict_img_size
|
||||
self.dynamic_img_pad = dynamic_img_pad
|
||||
|
||||
self.proj = torch.nn.Conv2d(
|
||||
in_chans,
|
||||
embed_dim,
|
||||
kernel_size=patch_size,
|
||||
stride=patch_size,
|
||||
bias=bias,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x = self.proj(x)
|
||||
if self.flatten:
|
||||
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
|
||||
return x
|
||||
|
||||
|
||||
def modulate(x: torch.Tensor, shift: torch.Tensor | None, scale: torch.Tensor) -> torch.Tensor:
|
||||
if shift is None:
|
||||
shift = torch.zeros_like(scale)
|
||||
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
||||
|
||||
|
||||
#################################################################################
|
||||
# Sine/Cosine Positional Embedding Functions #
|
||||
#################################################################################
|
||||
|
||||
|
||||
def get_2d_sincos_pos_embed(
|
||||
embed_dim: int,
|
||||
grid_size: int,
|
||||
cls_token: bool = False,
|
||||
extra_tokens: int = 0,
|
||||
scaling_factor: Optional[float] = None,
|
||||
offset: Optional[float] = None,
|
||||
):
|
||||
"""
|
||||
grid_size: int of the grid height and width
|
||||
return:
|
||||
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
||||
"""
|
||||
grid_h = np.arange(grid_size, dtype=np.float32)
|
||||
grid_w = np.arange(grid_size, dtype=np.float32)
|
||||
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
||||
grid = np.stack(grid, axis=0)
|
||||
if scaling_factor is not None:
|
||||
grid = grid / scaling_factor
|
||||
if offset is not None:
|
||||
grid = grid - offset
|
||||
grid = grid.reshape([2, 1, grid_size, grid_size])
|
||||
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
||||
if cls_token and extra_tokens > 0:
|
||||
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
|
||||
return pos_embed
|
||||
|
||||
|
||||
def get_2d_sincos_pos_embed_from_grid(embed_dim: int, grid):
|
||||
assert embed_dim % 2 == 0
|
||||
# use half of dimensions to encode grid_h
|
||||
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
||||
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
||||
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
||||
return emb
|
||||
|
||||
|
||||
def get_1d_sincos_pos_embed_from_grid(embed_dim: int, pos):
|
||||
"""
|
||||
embed_dim: output dimension for each position
|
||||
pos: a list of positions to be encoded: size (M,)
|
||||
out: (M, D)
|
||||
"""
|
||||
assert embed_dim % 2 == 0
|
||||
omega = np.arange(embed_dim // 2, dtype=np.float64)
|
||||
omega /= embed_dim / 2.0
|
||||
omega = 1.0 / 10000**omega # (D/2,)
|
||||
pos = pos.reshape(-1) # (M,)
|
||||
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
|
||||
emb_sin = np.sin(out) # (M, D/2)
|
||||
emb_cos = np.cos(out) # (M, D/2)
|
||||
return np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
||||
|
||||
|
||||
#################################################################################
|
||||
# Embedding Layers for Timesteps and Class Labels #
|
||||
#################################################################################
|
||||
|
||||
|
||||
class TimestepEmbedder(torch.nn.Module):
|
||||
"""Embeds scalar timesteps into vector representations."""
|
||||
|
||||
def __init__(self, hidden_size, frequency_embedding_size=256, dtype=None, device=None):
|
||||
super().__init__()
|
||||
self.mlp = torch.nn.Sequential(
|
||||
torch.nn.Linear(
|
||||
frequency_embedding_size,
|
||||
hidden_size,
|
||||
bias=True,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Linear(hidden_size, hidden_size, bias=True, dtype=dtype, device=device),
|
||||
)
|
||||
self.frequency_embedding_size = frequency_embedding_size
|
||||
|
||||
@staticmethod
|
||||
def timestep_embedding(t, dim, max_period=10000):
|
||||
"""
|
||||
Create sinusoidal timestep embeddings.
|
||||
:param t: a 1-D Tensor of N indices, one per batch element.
|
||||
These may be fractional.
|
||||
:param dim: the dimension of the output.
|
||||
:param max_period: controls the minimum frequency of the embeddings.
|
||||
:return: an (N, D) Tensor of positional embeddings.
|
||||
"""
|
||||
half = dim // 2
|
||||
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
|
||||
device=t.device
|
||||
)
|
||||
args = t[:, None].float() * freqs[None]
|
||||
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
if dim % 2:
|
||||
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
||||
if torch.is_floating_point(t):
|
||||
embedding = embedding.to(dtype=t.dtype)
|
||||
return embedding
|
||||
|
||||
def forward(self, t, dtype, **kwargs):
|
||||
t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype)
|
||||
t_emb = self.mlp(t_freq)
|
||||
return t_emb
|
||||
|
||||
|
||||
class VectorEmbedder(torch.nn.Module):
|
||||
"""Embeds a flat vector of dimension input_dim"""
|
||||
|
||||
def __init__(self, input_dim: int, hidden_size: int, dtype=None, device=None):
|
||||
super().__init__()
|
||||
self.mlp = torch.nn.Sequential(
|
||||
torch.nn.Linear(input_dim, hidden_size, bias=True, dtype=dtype, device=device),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Linear(hidden_size, hidden_size, bias=True, dtype=dtype, device=device),
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return self.mlp(x)
|
||||
|
||||
|
||||
#################################################################################
|
||||
# Core DiT Model #
|
||||
#################################################################################
|
||||
|
||||
|
||||
def split_qkv(qkv, head_dim):
|
||||
qkv = qkv.reshape(qkv.shape[0], qkv.shape[1], 3, -1, head_dim).movedim(2, 0)
|
||||
return qkv[0], qkv[1], qkv[2]
|
||||
|
||||
|
||||
def optimized_attention(qkv, num_heads):
|
||||
return attention(qkv[0], qkv[1], qkv[2], num_heads)
|
||||
|
||||
|
||||
class SelfAttention(torch.nn.Module):
|
||||
ATTENTION_MODES = ("xformers", "torch", "torch-hb", "math", "debug")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
num_heads: int = 8,
|
||||
qkv_bias: bool = False,
|
||||
qk_scale: Optional[float] = None,
|
||||
attn_mode: str = "xformers",
|
||||
pre_only: bool = False,
|
||||
qk_norm: Optional[str] = None,
|
||||
rmsnorm: bool = False,
|
||||
dtype=None,
|
||||
device=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
self.head_dim = dim // num_heads
|
||||
|
||||
self.qkv = torch.nn.Linear(dim, dim * 3, bias=qkv_bias, dtype=dtype, device=device)
|
||||
if not pre_only:
|
||||
self.proj = torch.nn.Linear(dim, dim, dtype=dtype, device=device)
|
||||
assert attn_mode in self.ATTENTION_MODES
|
||||
self.attn_mode = attn_mode
|
||||
self.pre_only = pre_only
|
||||
|
||||
if qk_norm == "rms":
|
||||
self.ln_q = RMSNorm(
|
||||
self.head_dim,
|
||||
elementwise_affine=True,
|
||||
eps=1.0e-6,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.ln_k = RMSNorm(
|
||||
self.head_dim,
|
||||
elementwise_affine=True,
|
||||
eps=1.0e-6,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
elif qk_norm == "ln":
|
||||
self.ln_q = torch.nn.LayerNorm(
|
||||
self.head_dim,
|
||||
elementwise_affine=True,
|
||||
eps=1.0e-6,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.ln_k = torch.nn.LayerNorm(
|
||||
self.head_dim,
|
||||
elementwise_affine=True,
|
||||
eps=1.0e-6,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
elif qk_norm is None:
|
||||
self.ln_q = torch.nn.Identity()
|
||||
self.ln_k = torch.nn.Identity()
|
||||
else:
|
||||
raise ValueError(qk_norm)
|
||||
|
||||
def pre_attention(self, x: torch.Tensor):
|
||||
B, L, C = x.shape
|
||||
qkv = self.qkv(x)
|
||||
q, k, v = split_qkv(qkv, self.head_dim)
|
||||
q = self.ln_q(q).reshape(q.shape[0], q.shape[1], -1)
|
||||
k = self.ln_k(k).reshape(q.shape[0], q.shape[1], -1)
|
||||
return (q, k, v)
|
||||
|
||||
def post_attention(self, x: torch.Tensor) -> torch.Tensor:
|
||||
assert not self.pre_only
|
||||
x = self.proj(x)
|
||||
return x
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
(q, k, v) = self.pre_attention(x)
|
||||
x = attention(q, k, v, self.num_heads)
|
||||
x = self.post_attention(x)
|
||||
return x
|
||||
|
||||
|
||||
class RMSNorm(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
elementwise_affine: bool = False,
|
||||
eps: float = 1e-6,
|
||||
device=None,
|
||||
dtype=None,
|
||||
):
|
||||
"""
|
||||
Initialize the RMSNorm normalization layer.
|
||||
Args:
|
||||
dim (int): The dimension of the input tensor.
|
||||
eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
|
||||
Attributes:
|
||||
eps (float): A small value added to the denominator for numerical stability.
|
||||
weight (torch.nn.Parameter): Learnable scaling parameter.
|
||||
"""
|
||||
super().__init__()
|
||||
self.eps = eps
|
||||
self.learnable_scale = elementwise_affine
|
||||
if self.learnable_scale:
|
||||
self.weight = torch.nn.Parameter(torch.empty(dim, device=device, dtype=dtype))
|
||||
else:
|
||||
self.register_parameter("weight", None)
|
||||
|
||||
def _norm(self, x):
|
||||
"""
|
||||
Apply the RMSNorm normalization to the input tensor.
|
||||
Args:
|
||||
x (torch.Tensor): The input tensor.
|
||||
Returns:
|
||||
torch.Tensor: The normalized tensor.
|
||||
"""
|
||||
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Forward pass through the RMSNorm layer.
|
||||
Args:
|
||||
x (torch.Tensor): The input tensor.
|
||||
Returns:
|
||||
torch.Tensor: The output tensor after applying RMSNorm.
|
||||
"""
|
||||
x = self._norm(x)
|
||||
if self.learnable_scale:
|
||||
return x * self.weight.to(device=x.device, dtype=x.dtype)
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
class SwiGLUFeedForward(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
hidden_dim: int,
|
||||
multiple_of: int,
|
||||
ffn_dim_multiplier: Optional[float] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the FeedForward module.
|
||||
|
||||
Args:
|
||||
dim (int): Input dimension.
|
||||
hidden_dim (int): Hidden dimension of the feedforward layer.
|
||||
multiple_of (int): Value to ensure hidden dimension is a multiple of this value.
|
||||
ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.
|
||||
|
||||
Attributes:
|
||||
w1 (ColumnParallelLinear): Linear transformation for the first layer.
|
||||
w2 (RowParallelLinear): Linear transformation for the second layer.
|
||||
w3 (ColumnParallelLinear): Linear transformation for the third layer.
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
hidden_dim = int(2 * hidden_dim / 3)
|
||||
# custom dim factor multiplier
|
||||
if ffn_dim_multiplier is not None:
|
||||
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
|
||||
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
|
||||
|
||||
self.w1 = torch.nn.Linear(dim, hidden_dim, bias=False)
|
||||
self.w2 = torch.nn.Linear(hidden_dim, dim, bias=False)
|
||||
self.w3 = torch.nn.Linear(dim, hidden_dim, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
return self.w2(torch.nn.functional.silu(self.w1(x)) * self.w3(x))
|
||||
|
||||
|
||||
class DismantledBlock(torch.nn.Module):
|
||||
"""A DiT block with gated adaptive layer norm (adaLN) conditioning."""
|
||||
|
||||
ATTENTION_MODES = ("xformers", "torch", "torch-hb", "math", "debug")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
num_heads: int,
|
||||
mlp_ratio: float = 4.0,
|
||||
attn_mode: str = "xformers",
|
||||
qkv_bias: bool = False,
|
||||
pre_only: bool = False,
|
||||
rmsnorm: bool = False,
|
||||
scale_mod_only: bool = False,
|
||||
swiglu: bool = False,
|
||||
qk_norm: Optional[str] = None,
|
||||
x_block_self_attn: bool = False,
|
||||
dtype=None,
|
||||
device=None,
|
||||
**block_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
assert attn_mode in self.ATTENTION_MODES
|
||||
if not rmsnorm:
|
||||
self.norm1 = torch.nn.LayerNorm(
|
||||
hidden_size,
|
||||
elementwise_affine=False,
|
||||
eps=1e-6,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
self.norm1 = RMSNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
self.attn = SelfAttention(
|
||||
dim=hidden_size,
|
||||
num_heads=num_heads,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_mode=attn_mode,
|
||||
pre_only=pre_only,
|
||||
qk_norm=qk_norm,
|
||||
rmsnorm=rmsnorm,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
if x_block_self_attn:
|
||||
assert not pre_only
|
||||
assert not scale_mod_only
|
||||
self.x_block_self_attn = True
|
||||
self.attn2 = SelfAttention(
|
||||
dim=hidden_size,
|
||||
num_heads=num_heads,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_mode=attn_mode,
|
||||
pre_only=False,
|
||||
qk_norm=qk_norm,
|
||||
rmsnorm=rmsnorm,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
self.x_block_self_attn = False
|
||||
if not pre_only:
|
||||
if not rmsnorm:
|
||||
self.norm2 = torch.nn.LayerNorm(
|
||||
hidden_size,
|
||||
elementwise_affine=False,
|
||||
eps=1e-6,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
self.norm2 = RMSNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
||||
if not pre_only:
|
||||
if not swiglu:
|
||||
self.mlp = Mlp(
|
||||
in_features=hidden_size,
|
||||
hidden_features=mlp_hidden_dim,
|
||||
act_layer=torch.nn.GELU(approximate="tanh"),
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
self.mlp = SwiGLUFeedForward(dim=hidden_size, hidden_dim=mlp_hidden_dim, multiple_of=256)
|
||||
self.scale_mod_only = scale_mod_only
|
||||
if x_block_self_attn:
|
||||
assert not pre_only
|
||||
assert not scale_mod_only
|
||||
n_mods = 9
|
||||
elif not scale_mod_only:
|
||||
n_mods = 6 if not pre_only else 2
|
||||
else:
|
||||
n_mods = 4 if not pre_only else 1
|
||||
self.adaLN_modulation = torch.nn.Sequential(
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Linear(hidden_size, n_mods * hidden_size, bias=True, dtype=dtype, device=device),
|
||||
)
|
||||
self.pre_only = pre_only
|
||||
|
||||
def pre_attention(self, x: torch.Tensor, c: torch.Tensor):
|
||||
assert x is not None, "pre_attention called with None input"
|
||||
if not self.pre_only:
|
||||
if not self.scale_mod_only:
|
||||
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(
|
||||
6, dim=1
|
||||
)
|
||||
else:
|
||||
shift_msa = None
|
||||
shift_mlp = None
|
||||
scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(4, dim=1)
|
||||
qkv = self.attn.pre_attention(modulate(self.norm1(x), shift_msa, scale_msa))
|
||||
return qkv, (x, gate_msa, shift_mlp, scale_mlp, gate_mlp)
|
||||
else:
|
||||
if not self.scale_mod_only:
|
||||
shift_msa, scale_msa = self.adaLN_modulation(c).chunk(2, dim=1)
|
||||
else:
|
||||
shift_msa = None
|
||||
scale_msa = self.adaLN_modulation(c)
|
||||
qkv = self.attn.pre_attention(modulate(self.norm1(x), shift_msa, scale_msa))
|
||||
return qkv, None
|
||||
|
||||
def post_attention(
|
||||
self,
|
||||
attn: torch.Tensor,
|
||||
x: torch.Tensor,
|
||||
gate_msa: torch.Tensor,
|
||||
shift_mlp: torch.Tensor,
|
||||
scale_mlp: torch.Tensor,
|
||||
gate_mlp: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
assert not self.pre_only
|
||||
x = x + gate_msa.unsqueeze(1) * self.attn.post_attention(attn)
|
||||
x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
|
||||
return x
|
||||
|
||||
def pre_attention_x(
|
||||
self, x: torch.Tensor, c: torch.Tensor
|
||||
) -> tuple[
|
||||
tuple[torch.Tensor, torch.Tensor, torch.Tensor],
|
||||
tuple[torch.Tensor, torch.Tensor, torch.Tensor],
|
||||
tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
|
||||
]:
|
||||
assert self.x_block_self_attn
|
||||
(
|
||||
shift_msa,
|
||||
scale_msa,
|
||||
gate_msa,
|
||||
shift_mlp,
|
||||
scale_mlp,
|
||||
gate_mlp,
|
||||
shift_msa2,
|
||||
scale_msa2,
|
||||
gate_msa2,
|
||||
) = self.adaLN_modulation(c).chunk(9, dim=1)
|
||||
x_norm = self.norm1(x)
|
||||
qkv = self.attn.pre_attention(modulate(x_norm, shift_msa, scale_msa))
|
||||
qkv2 = self.attn2.pre_attention(modulate(x_norm, shift_msa2, scale_msa2))
|
||||
return (
|
||||
qkv,
|
||||
qkv2,
|
||||
(
|
||||
x,
|
||||
gate_msa,
|
||||
shift_mlp,
|
||||
scale_mlp,
|
||||
gate_mlp,
|
||||
gate_msa2,
|
||||
),
|
||||
)
|
||||
|
||||
def post_attention_x(
|
||||
self,
|
||||
attn: torch.Tensor,
|
||||
attn2: torch.Tensor,
|
||||
x: torch.Tensor,
|
||||
gate_msa: torch.Tensor,
|
||||
shift_mlp: torch.Tensor,
|
||||
scale_mlp: torch.Tensor,
|
||||
gate_mlp: torch.Tensor,
|
||||
gate_msa2: torch.Tensor,
|
||||
attn1_dropout: float = 0.0,
|
||||
):
|
||||
assert not self.pre_only
|
||||
if attn1_dropout > 0.0:
|
||||
# Use torch.bernoulli to implement dropout, only dropout the batch dimension
|
||||
attn1_dropout = torch.bernoulli(torch.full((attn.size(0), 1, 1), 1 - attn1_dropout, device=attn.device))
|
||||
attn_ = gate_msa.unsqueeze(1) * self.attn.post_attention(attn) * attn1_dropout
|
||||
else:
|
||||
attn_ = gate_msa.unsqueeze(1) * self.attn.post_attention(attn)
|
||||
x = x + attn_
|
||||
attn2_ = gate_msa2.unsqueeze(1) * self.attn2.post_attention(attn2)
|
||||
x = x + attn2_
|
||||
mlp_ = gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
|
||||
x = x + mlp_
|
||||
return x, (gate_msa, gate_msa2, gate_mlp, attn_, attn2_)
|
||||
|
||||
def forward(self, x: torch.Tensor, c: torch.Tensor):
|
||||
assert not self.pre_only
|
||||
if self.x_block_self_attn:
|
||||
(q, k, v), (q2, k2, v2), intermediates = self.pre_attention_x(x, c)
|
||||
attn = attention(q, k, v, self.attn.num_heads)
|
||||
attn2 = attention(q2, k2, v2, self.attn2.num_heads)
|
||||
return self.post_attention_x(attn, attn2, *intermediates)
|
||||
else:
|
||||
(q, k, v), intermediates = self.pre_attention(x, c)
|
||||
attn = attention(q, k, v, self.attn.num_heads)
|
||||
return self.post_attention(attn, *intermediates)
|
||||
|
||||
|
||||
def block_mixing(
|
||||
context: torch.Tensor, x: torch.Tensor, context_block: DismantledBlock, x_block: DismantledBlock, c: torch.Tensor
|
||||
):
|
||||
assert context is not None, "block_mixing called with None context"
|
||||
context_qkv, context_intermediates = context_block.pre_attention(context, c)
|
||||
|
||||
if x_block.x_block_self_attn:
|
||||
x_qkv, x_qkv2, x_intermediates = x_block.pre_attention_x(x, c)
|
||||
else:
|
||||
x_qkv, x_intermediates = x_block.pre_attention(x, c)
|
||||
|
||||
o: list[torch.Tensor] = []
|
||||
for t in range(3):
|
||||
o.append(torch.cat((context_qkv[t], x_qkv[t]), dim=1))
|
||||
q, k, v = tuple(o)
|
||||
|
||||
attn = attention(q, k, v, x_block.attn.num_heads)
|
||||
context_attn, x_attn = (
|
||||
attn[:, : context_qkv[0].shape[1]],
|
||||
attn[:, context_qkv[0].shape[1] :],
|
||||
)
|
||||
|
||||
if not context_block.pre_only:
|
||||
context = context_block.post_attention(context_attn, *context_intermediates)
|
||||
else:
|
||||
context = None
|
||||
|
||||
if x_block.x_block_self_attn:
|
||||
x_q2, x_k2, x_v2 = x_qkv2
|
||||
attn2 = attention(x_q2, x_k2, x_v2, x_block.attn2.num_heads)
|
||||
else:
|
||||
x = x_block.post_attention(x_attn, *x_intermediates)
|
||||
|
||||
return context, x
|
||||
|
||||
|
||||
class JointBlock(torch.nn.Module):
|
||||
"""just a small wrapper to serve as a fsdp unit"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__()
|
||||
pre_only = kwargs.pop("pre_only")
|
||||
qk_norm = kwargs.pop("qk_norm", None)
|
||||
x_block_self_attn = kwargs.pop("x_block_self_attn", False)
|
||||
self.context_block = DismantledBlock(*args, pre_only=pre_only, qk_norm=qk_norm, **kwargs)
|
||||
self.x_block = DismantledBlock(
|
||||
*args,
|
||||
pre_only=False,
|
||||
qk_norm=qk_norm,
|
||||
x_block_self_attn=x_block_self_attn,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
return block_mixing(*args, context_block=self.context_block, x_block=self.x_block, **kwargs)
|
||||
|
||||
|
||||
class FinalLayer(torch.nn.Module):
|
||||
"""
|
||||
The final layer of DiT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
patch_size: int,
|
||||
out_channels: int,
|
||||
total_out_channels: Optional[int] = None,
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
device: Optional[torch.device] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.norm_final = torch.nn.LayerNorm(
|
||||
hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device
|
||||
)
|
||||
self.linear = (
|
||||
torch.nn.Linear(
|
||||
hidden_size,
|
||||
patch_size * patch_size * out_channels,
|
||||
bias=True,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
if (total_out_channels is None)
|
||||
else torch.nn.Linear(hidden_size, total_out_channels, bias=True, dtype=dtype, device=device)
|
||||
)
|
||||
self.adaLN_modulation = torch.nn.Sequential(
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Linear(hidden_size, 2 * hidden_size, bias=True, dtype=dtype, device=device),
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
|
||||
shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
|
||||
x = modulate(self.norm_final(x), shift, scale)
|
||||
x = self.linear(x)
|
||||
return x
|
||||
|
||||
|
||||
class MMDiTX(torch.nn.Module):
|
||||
"""Diffusion model with a Transformer backbone."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_size: int | None = 32,
|
||||
patch_size: int = 2,
|
||||
in_channels: int = 4,
|
||||
depth: int = 28,
|
||||
mlp_ratio: float = 4.0,
|
||||
learn_sigma: bool = False,
|
||||
adm_in_channels: Optional[int] = None,
|
||||
context_embedder_config: Optional[Dict] = None,
|
||||
register_length: int = 0,
|
||||
attn_mode: str = "torch",
|
||||
rmsnorm: bool = False,
|
||||
scale_mod_only: bool = False,
|
||||
swiglu: bool = False,
|
||||
out_channels: Optional[int] = None,
|
||||
pos_embed_scaling_factor: Optional[float] = None,
|
||||
pos_embed_offset: Optional[float] = None,
|
||||
pos_embed_max_size: Optional[int] = None,
|
||||
num_patches: Optional[int] = None,
|
||||
qk_norm: Optional[str] = None,
|
||||
x_block_self_attn_layers: Optional[List[int]] = None,
|
||||
qkv_bias: bool = True,
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
device: Optional[torch.device] = None,
|
||||
verbose: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
if verbose:
|
||||
print(
|
||||
f"mmdit initializing with: {input_size=}, {patch_size=}, {in_channels=}, {depth=}, {mlp_ratio=}, {learn_sigma=}, {adm_in_channels=}, {context_embedder_config=}, {register_length=}, {attn_mode=}, {rmsnorm=}, {scale_mod_only=}, {swiglu=}, {out_channels=}, {pos_embed_scaling_factor=}, {pos_embed_offset=}, {pos_embed_max_size=}, {num_patches=}, {qk_norm=}, {qkv_bias=}, {dtype=}, {device=}"
|
||||
)
|
||||
self.dtype = dtype
|
||||
self.learn_sigma = learn_sigma
|
||||
self.in_channels = in_channels
|
||||
default_out_channels = in_channels * 2 if learn_sigma else in_channels
|
||||
self.out_channels = out_channels if out_channels is not None else default_out_channels
|
||||
self.patch_size = patch_size
|
||||
self.pos_embed_scaling_factor = pos_embed_scaling_factor
|
||||
self.pos_embed_offset = pos_embed_offset
|
||||
self.pos_embed_max_size = pos_embed_max_size
|
||||
self.x_block_self_attn_layers = x_block_self_attn_layers or []
|
||||
|
||||
# apply magic --> this defines a head_size of 64
|
||||
hidden_size = 64 * depth
|
||||
num_heads = depth
|
||||
|
||||
self.num_heads = num_heads
|
||||
|
||||
self.x_embedder = PatchEmbed(
|
||||
input_size,
|
||||
patch_size,
|
||||
in_channels,
|
||||
hidden_size,
|
||||
bias=True,
|
||||
strict_img_size=self.pos_embed_max_size is None,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.t_embedder = TimestepEmbedder(hidden_size, dtype=dtype, device=device)
|
||||
|
||||
if adm_in_channels is not None:
|
||||
assert isinstance(adm_in_channels, int)
|
||||
self.y_embedder = VectorEmbedder(adm_in_channels, hidden_size, dtype=dtype, device=device)
|
||||
|
||||
self.context_embedder = torch.nn.Identity()
|
||||
if context_embedder_config is not None:
|
||||
if context_embedder_config["target"] == "torch.nn.Linear":
|
||||
self.context_embedder = torch.nn.Linear(**context_embedder_config["params"], dtype=dtype, device=device)
|
||||
|
||||
self.register_length = register_length
|
||||
if self.register_length > 0:
|
||||
self.register = torch.nn.Parameter(torch.randn(1, register_length, hidden_size, dtype=dtype, device=device))
|
||||
|
||||
# num_patches = self.x_embedder.num_patches
|
||||
# Will use fixed sin-cos embedding:
|
||||
# just use a buffer already
|
||||
if num_patches is not None:
|
||||
self.register_buffer(
|
||||
"pos_embed",
|
||||
torch.zeros(1, num_patches, hidden_size, dtype=dtype, device=device),
|
||||
)
|
||||
else:
|
||||
self.pos_embed = None
|
||||
|
||||
self.joint_blocks = torch.nn.ModuleList(
|
||||
[
|
||||
JointBlock(
|
||||
hidden_size,
|
||||
num_heads,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_mode=attn_mode,
|
||||
pre_only=i == depth - 1,
|
||||
rmsnorm=rmsnorm,
|
||||
scale_mod_only=scale_mod_only,
|
||||
swiglu=swiglu,
|
||||
qk_norm=qk_norm,
|
||||
x_block_self_attn=(i in self.x_block_self_attn_layers),
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
for i in range(depth)
|
||||
]
|
||||
)
|
||||
|
||||
self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels, dtype=dtype, device=device)
|
||||
|
||||
def cropped_pos_embed(self, hw: torch.Size) -> torch.Tensor:
|
||||
assert self.pos_embed_max_size is not None
|
||||
p = self.x_embedder.patch_size[0]
|
||||
h, w = hw
|
||||
# patched size
|
||||
h = h // p
|
||||
w = w // p
|
||||
assert h <= self.pos_embed_max_size, (h, self.pos_embed_max_size)
|
||||
assert w <= self.pos_embed_max_size, (w, self.pos_embed_max_size)
|
||||
top = (self.pos_embed_max_size - h) // 2
|
||||
left = (self.pos_embed_max_size - w) // 2
|
||||
spatial_pos_embed: torch.Tensor = rearrange(
|
||||
self.pos_embed,
|
||||
"1 (h w) c -> 1 h w c",
|
||||
h=self.pos_embed_max_size,
|
||||
w=self.pos_embed_max_size,
|
||||
) # type: ignore Type checking does not correctly infer the type of the self.pos_embed buffer.
|
||||
spatial_pos_embed = spatial_pos_embed[:, top : top + h, left : left + w, :]
|
||||
spatial_pos_embed = rearrange(spatial_pos_embed, "1 h w c -> 1 (h w) c")
|
||||
return spatial_pos_embed
|
||||
|
||||
def unpatchify(self, x: torch.Tensor, hw: Optional[torch.Size] = None) -> torch.Tensor:
|
||||
"""
|
||||
x: (N, T, patch_size**2 * C)
|
||||
imgs: (N, H, W, C)
|
||||
"""
|
||||
c = self.out_channels
|
||||
p = self.x_embedder.patch_size[0]
|
||||
if hw is None:
|
||||
h = w = int(x.shape[1] ** 0.5)
|
||||
else:
|
||||
h, w = hw
|
||||
h = h // p
|
||||
w = w // p
|
||||
assert h * w == x.shape[1]
|
||||
|
||||
x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
|
||||
x = torch.einsum("nhwpqc->nchpwq", x)
|
||||
imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))
|
||||
return imgs
|
||||
|
||||
def forward_core_with_concat(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
c_mod: torch.Tensor,
|
||||
context: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
if self.register_length > 0:
|
||||
context = torch.cat(
|
||||
(
|
||||
repeat(self.register, "1 ... -> b ...", b=x.shape[0]),
|
||||
context if context is not None else torch.Tensor([]).type_as(x),
|
||||
),
|
||||
1,
|
||||
)
|
||||
|
||||
# context is B, L', D
|
||||
# x is B, L, D
|
||||
for block in self.joint_blocks:
|
||||
context, x = block(context, x, c=c_mod)
|
||||
|
||||
x = self.final_layer(x, c_mod) # (N, T, patch_size ** 2 * out_channels)
|
||||
return x
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
t: torch.Tensor,
|
||||
y: Optional[torch.Tensor] = None,
|
||||
context: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Forward pass of DiT.
|
||||
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
|
||||
t: (N,) tensor of diffusion timesteps
|
||||
y: (N,) tensor of class labels
|
||||
"""
|
||||
hw = x.shape[-2:]
|
||||
x = self.x_embedder(x) + self.cropped_pos_embed(hw)
|
||||
c = self.t_embedder(t, dtype=x.dtype) # (N, D)
|
||||
if y is not None:
|
||||
y = self.y_embedder(y) # (N, D)
|
||||
c = c + y # (N, D)
|
||||
|
||||
context = self.context_embedder(context)
|
||||
|
||||
x = self.forward_core_with_concat(x, c, context)
|
||||
|
||||
x = self.unpatchify(x, hw=hw) # (N, out_channels, H, W)
|
||||
return x
|
||||
@@ -1,795 +0,0 @@
|
||||
# This file was originally copied from:
|
||||
# https://github.com/Stability-AI/sd3.5/blob/19bf11c4e1e37324c5aa5a61f010d4127848a09c/other_impls.py
|
||||
|
||||
### This file contains impls for underlying related models (CLIP, T5, etc)
|
||||
|
||||
import math
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTokenizer, T5TokenizerFast
|
||||
|
||||
#################################################################################################
|
||||
### Core/Utility
|
||||
#################################################################################################
|
||||
|
||||
|
||||
def attention(
|
||||
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, heads: int, mask: Optional[torch.Tensor] = None
|
||||
) -> torch.Tensor:
|
||||
"""Convenience wrapper around a basic attention operation"""
|
||||
b, _, dim_head = q.shape
|
||||
dim_head //= heads
|
||||
q, k, v = map(lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2), (q, k, v))
|
||||
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
|
||||
return out.transpose(1, 2).reshape(b, -1, heads * dim_head)
|
||||
|
||||
|
||||
class Mlp(torch.nn.Module):
|
||||
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_features: int,
|
||||
hidden_features: Optional[int] = None,
|
||||
out_features: Optional[int] = None,
|
||||
act_layer: Callable[[torch.Tensor], torch.Tensor] | None = None,
|
||||
bias: bool = True,
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
device: Optional[torch.device] = None,
|
||||
):
|
||||
super().__init__()
|
||||
out_features = out_features or in_features
|
||||
hidden_features = hidden_features or in_features
|
||||
if act_layer is None:
|
||||
act_layer = torch.nn.functional.gelu
|
||||
|
||||
self.fc1 = torch.nn.Linear(in_features, hidden_features, bias=bias, dtype=dtype, device=device)
|
||||
self.act = act_layer
|
||||
self.fc2 = torch.nn.Linear(hidden_features, out_features, bias=bias, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x = self.fc1(x)
|
||||
x = self.act(x)
|
||||
x = self.fc2(x)
|
||||
return x
|
||||
|
||||
|
||||
#################################################################################################
|
||||
### CLIP
|
||||
#################################################################################################
|
||||
|
||||
|
||||
class CLIPAttention(torch.nn.Module):
|
||||
def __init__(self, embed_dim, heads, dtype, device):
|
||||
super().__init__()
|
||||
self.heads = heads
|
||||
self.q_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
|
||||
self.k_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
|
||||
self.v_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
|
||||
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, x, mask=None):
|
||||
q = self.q_proj(x)
|
||||
k = self.k_proj(x)
|
||||
v = self.v_proj(x)
|
||||
out = attention(q, k, v, self.heads, mask)
|
||||
return self.out_proj(out)
|
||||
|
||||
|
||||
ACTIVATIONS = {
|
||||
"quick_gelu": lambda a: a * torch.sigmoid(1.702 * a),
|
||||
"gelu": torch.nn.functional.gelu,
|
||||
}
|
||||
|
||||
|
||||
class CLIPLayer(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim,
|
||||
heads,
|
||||
intermediate_size,
|
||||
intermediate_activation,
|
||||
dtype,
|
||||
device,
|
||||
):
|
||||
super().__init__()
|
||||
self.layer_norm1 = torch.nn.LayerNorm(embed_dim, dtype=dtype, device=device)
|
||||
self.self_attn = CLIPAttention(embed_dim, heads, dtype, device)
|
||||
self.layer_norm2 = torch.nn.LayerNorm(embed_dim, dtype=dtype, device=device)
|
||||
# self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device)
|
||||
self.mlp = Mlp(
|
||||
embed_dim,
|
||||
intermediate_size,
|
||||
embed_dim,
|
||||
act_layer=ACTIVATIONS[intermediate_activation],
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
def forward(self, x, mask=None):
|
||||
x += self.self_attn(self.layer_norm1(x), mask)
|
||||
x += self.mlp(self.layer_norm2(x))
|
||||
return x
|
||||
|
||||
|
||||
class CLIPEncoder(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
num_layers,
|
||||
embed_dim,
|
||||
heads,
|
||||
intermediate_size,
|
||||
intermediate_activation,
|
||||
dtype,
|
||||
device,
|
||||
):
|
||||
super().__init__()
|
||||
self.layers = torch.nn.ModuleList(
|
||||
[
|
||||
CLIPLayer(
|
||||
embed_dim,
|
||||
heads,
|
||||
intermediate_size,
|
||||
intermediate_activation,
|
||||
dtype,
|
||||
device,
|
||||
)
|
||||
for i in range(num_layers)
|
||||
]
|
||||
)
|
||||
|
||||
def forward(self, x, mask=None, intermediate_output=None):
|
||||
if intermediate_output is not None:
|
||||
if intermediate_output < 0:
|
||||
intermediate_output = len(self.layers) + intermediate_output
|
||||
intermediate = None
|
||||
for i, l in enumerate(self.layers):
|
||||
x = l(x, mask)
|
||||
if i == intermediate_output:
|
||||
intermediate = x.clone()
|
||||
return x, intermediate
|
||||
|
||||
|
||||
class CLIPEmbeddings(torch.nn.Module):
|
||||
def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None):
|
||||
super().__init__()
|
||||
self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device)
|
||||
self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, input_tokens):
|
||||
return self.token_embedding(input_tokens) + self.position_embedding.weight
|
||||
|
||||
|
||||
class CLIPTextModel_(torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device):
|
||||
num_layers = config_dict["num_hidden_layers"]
|
||||
embed_dim = config_dict["hidden_size"]
|
||||
heads = config_dict["num_attention_heads"]
|
||||
intermediate_size = config_dict["intermediate_size"]
|
||||
intermediate_activation = config_dict["hidden_act"]
|
||||
super().__init__()
|
||||
self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device)
|
||||
self.encoder = CLIPEncoder(
|
||||
num_layers,
|
||||
embed_dim,
|
||||
heads,
|
||||
intermediate_size,
|
||||
intermediate_activation,
|
||||
dtype,
|
||||
device,
|
||||
)
|
||||
self.final_layer_norm = torch.nn.LayerNorm(embed_dim, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, input_tokens, intermediate_output=None, final_layer_norm_intermediate=True):
|
||||
x = self.embeddings(input_tokens)
|
||||
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
|
||||
x, i = self.encoder(x, mask=causal_mask, intermediate_output=intermediate_output)
|
||||
x = self.final_layer_norm(x)
|
||||
if i is not None and final_layer_norm_intermediate:
|
||||
i = self.final_layer_norm(i)
|
||||
pooled_output = x[
|
||||
torch.arange(x.shape[0], device=x.device),
|
||||
input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),
|
||||
]
|
||||
return x, i, pooled_output
|
||||
|
||||
|
||||
class CLIPTextModel(torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device):
|
||||
super().__init__()
|
||||
self.num_layers = config_dict["num_hidden_layers"]
|
||||
self.text_model = CLIPTextModel_(config_dict, dtype, device)
|
||||
embed_dim = config_dict["hidden_size"]
|
||||
self.text_projection = torch.nn.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device)
|
||||
self.text_projection.weight.copy_(torch.eye(embed_dim))
|
||||
self.dtype = dtype
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.text_model.embeddings.token_embedding
|
||||
|
||||
def set_input_embeddings(self, embeddings):
|
||||
self.text_model.embeddings.token_embedding = embeddings
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
x = self.text_model(*args, **kwargs)
|
||||
out = self.text_projection(x[2])
|
||||
return (x[0], x[1], out, x[2])
|
||||
|
||||
|
||||
def parse_parentheses(string):
|
||||
result = []
|
||||
current_item = ""
|
||||
nesting_level = 0
|
||||
for char in string:
|
||||
if char == "(":
|
||||
if nesting_level == 0:
|
||||
if current_item:
|
||||
result.append(current_item)
|
||||
current_item = "("
|
||||
else:
|
||||
current_item = "("
|
||||
else:
|
||||
current_item += char
|
||||
nesting_level += 1
|
||||
elif char == ")":
|
||||
nesting_level -= 1
|
||||
if nesting_level == 0:
|
||||
result.append(current_item + ")")
|
||||
current_item = ""
|
||||
else:
|
||||
current_item += char
|
||||
else:
|
||||
current_item += char
|
||||
if current_item:
|
||||
result.append(current_item)
|
||||
return result
|
||||
|
||||
|
||||
def token_weights(string, current_weight):
|
||||
a = parse_parentheses(string)
|
||||
out = []
|
||||
for x in a:
|
||||
weight = current_weight
|
||||
if len(x) >= 2 and x[-1] == ")" and x[0] == "(":
|
||||
x = x[1:-1]
|
||||
xx = x.rfind(":")
|
||||
weight *= 1.1
|
||||
if xx > 0:
|
||||
try:
|
||||
weight = float(x[xx + 1 :])
|
||||
x = x[:xx]
|
||||
except:
|
||||
pass
|
||||
out += token_weights(x, weight)
|
||||
else:
|
||||
out += [(x, current_weight)]
|
||||
return out
|
||||
|
||||
|
||||
def escape_important(text):
|
||||
text = text.replace("\\)", "\0\1")
|
||||
text = text.replace("\\(", "\0\2")
|
||||
return text
|
||||
|
||||
|
||||
def unescape_important(text):
|
||||
text = text.replace("\0\1", ")")
|
||||
text = text.replace("\0\2", "(")
|
||||
return text
|
||||
|
||||
|
||||
class SDTokenizer:
|
||||
def __init__(
|
||||
self,
|
||||
max_length=77,
|
||||
pad_with_end=True,
|
||||
tokenizer=None,
|
||||
has_start_token=True,
|
||||
pad_to_max_length=True,
|
||||
min_length=None,
|
||||
extra_padding_token=None,
|
||||
):
|
||||
self.tokenizer = tokenizer
|
||||
self.max_length = max_length
|
||||
self.min_length = min_length
|
||||
|
||||
empty = self.tokenizer("")["input_ids"]
|
||||
if has_start_token:
|
||||
self.tokens_start = 1
|
||||
self.start_token = empty[0]
|
||||
self.end_token = empty[1]
|
||||
else:
|
||||
self.tokens_start = 0
|
||||
self.start_token = None
|
||||
self.end_token = empty[0]
|
||||
self.pad_with_end = pad_with_end
|
||||
self.pad_to_max_length = pad_to_max_length
|
||||
self.extra_padding_token = extra_padding_token
|
||||
|
||||
vocab = self.tokenizer.get_vocab()
|
||||
self.inv_vocab = {v: k for k, v in vocab.items()}
|
||||
self.max_word_length = 8
|
||||
|
||||
def tokenize_with_weights(self, text: str, return_word_ids=False):
|
||||
"""
|
||||
Tokenize the text, with weight values - presume 1.0 for all and ignore other features here.
|
||||
The details aren't relevant for a reference impl, and weights themselves has weak effect on SD3.
|
||||
"""
|
||||
if self.pad_with_end:
|
||||
pad_token = self.end_token
|
||||
else:
|
||||
pad_token = 0
|
||||
|
||||
text = escape_important(text)
|
||||
parsed_weights = token_weights(text, 1.0)
|
||||
|
||||
# tokenize words
|
||||
tokens = []
|
||||
for weighted_segment, weight in parsed_weights:
|
||||
to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(" ")
|
||||
to_tokenize = [x for x in to_tokenize if x != ""]
|
||||
for word in to_tokenize:
|
||||
# parse word
|
||||
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start : -1]])
|
||||
|
||||
# reshape token array to CLIP input size
|
||||
batched_tokens = []
|
||||
batch = []
|
||||
if self.start_token is not None:
|
||||
batch.append((self.start_token, 1.0, 0))
|
||||
batched_tokens.append(batch)
|
||||
for i, t_group in enumerate(tokens):
|
||||
# determine if we're going to try and keep the tokens in a single batch
|
||||
is_large = len(t_group) >= self.max_word_length
|
||||
|
||||
while len(t_group) > 0:
|
||||
if len(t_group) + len(batch) > self.max_length - 1:
|
||||
remaining_length = self.max_length - len(batch) - 1
|
||||
# break word in two and add end token
|
||||
if is_large:
|
||||
batch.extend([(t, w, i + 1) for t, w in t_group[:remaining_length]])
|
||||
batch.append((self.end_token, 1.0, 0))
|
||||
t_group = t_group[remaining_length:]
|
||||
# add end token and pad
|
||||
else:
|
||||
batch.append((self.end_token, 1.0, 0))
|
||||
if self.pad_to_max_length:
|
||||
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
|
||||
# start new batch
|
||||
batch = []
|
||||
if self.start_token is not None:
|
||||
batch.append((self.start_token, 1.0, 0))
|
||||
batched_tokens.append(batch)
|
||||
else:
|
||||
batch.extend([(t, w, i + 1) for t, w in t_group])
|
||||
t_group = []
|
||||
|
||||
# pad extra padding token first befor getting to the end token
|
||||
if self.extra_padding_token is not None:
|
||||
batch.extend([(self.extra_padding_token, 1.0, 0)] * (self.min_length - len(batch) - 1))
|
||||
# fill last batch
|
||||
batch.append((self.end_token, 1.0, 0))
|
||||
if self.pad_to_max_length:
|
||||
batch.extend([(pad_token, 1.0, 0)] * (self.max_length - len(batch)))
|
||||
if self.min_length is not None and len(batch) < self.min_length:
|
||||
batch.extend([(pad_token, 1.0, 0)] * (self.min_length - len(batch)))
|
||||
|
||||
if not return_word_ids:
|
||||
batched_tokens = [[(t, w) for t, w, _ in x] for x in batched_tokens]
|
||||
|
||||
return batched_tokens
|
||||
|
||||
def untokenize(self, token_weight_pair):
|
||||
return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))
|
||||
|
||||
|
||||
class SDXLClipGTokenizer(SDTokenizer):
|
||||
def __init__(self, tokenizer):
|
||||
super().__init__(pad_with_end=False, tokenizer=tokenizer)
|
||||
|
||||
|
||||
class SD3Tokenizer:
|
||||
def __init__(self):
|
||||
clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
||||
self.clip_l = SDTokenizer(tokenizer=clip_tokenizer)
|
||||
self.clip_g = SDXLClipGTokenizer(clip_tokenizer)
|
||||
self.t5xxl = T5XXLTokenizer()
|
||||
|
||||
def tokenize_with_weights(self, text: str):
|
||||
out = {}
|
||||
out["l"] = self.clip_l.tokenize_with_weights(text)
|
||||
out["g"] = self.clip_g.tokenize_with_weights(text)
|
||||
out["t5xxl"] = self.t5xxl.tokenize_with_weights(text[:226])
|
||||
return out
|
||||
|
||||
|
||||
class ClipTokenWeightEncoder:
|
||||
def encode_token_weights(self, token_weight_pairs):
|
||||
tokens = list(map(lambda a: a[0], token_weight_pairs[0]))
|
||||
out, pooled = self([tokens])
|
||||
if pooled is not None:
|
||||
first_pooled = pooled[0:1].cpu()
|
||||
else:
|
||||
first_pooled = pooled
|
||||
output = [out[0:1]]
|
||||
return torch.cat(output, dim=-2).cpu(), first_pooled
|
||||
|
||||
|
||||
class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
||||
"""Uses the CLIP transformer encoder for text (from huggingface)"""
|
||||
|
||||
LAYERS = ["last", "pooled", "hidden"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
device="cpu",
|
||||
max_length=77,
|
||||
layer="last",
|
||||
layer_idx=None,
|
||||
textmodel_json_config=None,
|
||||
dtype=None,
|
||||
model_class=CLIPTextModel,
|
||||
special_tokens={"start": 49406, "end": 49407, "pad": 49407},
|
||||
layer_norm_hidden_state=True,
|
||||
return_projected_pooled=True,
|
||||
):
|
||||
super().__init__()
|
||||
assert layer in self.LAYERS
|
||||
self.transformer = model_class(textmodel_json_config, dtype, device)
|
||||
self.num_layers = self.transformer.num_layers
|
||||
self.max_length = max_length
|
||||
self.transformer = self.transformer.eval()
|
||||
for param in self.parameters():
|
||||
param.requires_grad = False
|
||||
self.layer = layer
|
||||
self.layer_idx = None
|
||||
self.special_tokens = special_tokens
|
||||
self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055))
|
||||
self.layer_norm_hidden_state = layer_norm_hidden_state
|
||||
self.return_projected_pooled = return_projected_pooled
|
||||
if layer == "hidden":
|
||||
assert layer_idx is not None
|
||||
assert abs(layer_idx) < self.num_layers
|
||||
self.set_clip_options({"layer": layer_idx})
|
||||
self.options_default = (
|
||||
self.layer,
|
||||
self.layer_idx,
|
||||
self.return_projected_pooled,
|
||||
)
|
||||
|
||||
def set_clip_options(self, options):
|
||||
layer_idx = options.get("layer", self.layer_idx)
|
||||
self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled)
|
||||
if layer_idx is None or abs(layer_idx) > self.num_layers:
|
||||
self.layer = "last"
|
||||
else:
|
||||
self.layer = "hidden"
|
||||
self.layer_idx = layer_idx
|
||||
|
||||
def forward(self, tokens):
|
||||
backup_embeds = self.transformer.get_input_embeddings()
|
||||
device = backup_embeds.weight.device
|
||||
tokens = torch.LongTensor(tokens).to(device)
|
||||
outputs = self.transformer(
|
||||
tokens,
|
||||
intermediate_output=self.layer_idx,
|
||||
final_layer_norm_intermediate=self.layer_norm_hidden_state,
|
||||
)
|
||||
self.transformer.set_input_embeddings(backup_embeds)
|
||||
if self.layer == "last":
|
||||
z = outputs[0]
|
||||
else:
|
||||
z = outputs[1]
|
||||
pooled_output = None
|
||||
if len(outputs) >= 3:
|
||||
if not self.return_projected_pooled and len(outputs) >= 4 and outputs[3] is not None:
|
||||
pooled_output = outputs[3].float()
|
||||
elif outputs[2] is not None:
|
||||
pooled_output = outputs[2].float()
|
||||
return z.float(), pooled_output
|
||||
|
||||
|
||||
class SDXLClipG(SDClipModel):
|
||||
"""Wraps the CLIP-G model into the SD-CLIP-Model interface"""
|
||||
|
||||
def __init__(self, config, device="cpu", layer="penultimate", layer_idx=None, dtype=None):
|
||||
if layer == "penultimate":
|
||||
layer = "hidden"
|
||||
layer_idx = -2
|
||||
super().__init__(
|
||||
device=device,
|
||||
layer=layer,
|
||||
layer_idx=layer_idx,
|
||||
textmodel_json_config=config,
|
||||
dtype=dtype,
|
||||
special_tokens={"start": 49406, "end": 49407, "pad": 0},
|
||||
layer_norm_hidden_state=False,
|
||||
)
|
||||
|
||||
|
||||
class T5XXLModel(SDClipModel):
|
||||
"""Wraps the T5-XXL model into the SD-CLIP-Model interface for convenience"""
|
||||
|
||||
def __init__(self, config, device="cpu", layer="last", layer_idx=None, dtype=None):
|
||||
super().__init__(
|
||||
device=device,
|
||||
layer=layer,
|
||||
layer_idx=layer_idx,
|
||||
textmodel_json_config=config,
|
||||
dtype=dtype,
|
||||
special_tokens={"end": 1, "pad": 0},
|
||||
model_class=T5,
|
||||
)
|
||||
|
||||
|
||||
#################################################################################################
|
||||
### T5 implementation, for the T5-XXL text encoder portion, largely pulled from upstream impl
|
||||
#################################################################################################
|
||||
|
||||
|
||||
class T5XXLTokenizer(SDTokenizer):
|
||||
"""Wraps the T5 Tokenizer from HF into the SDTokenizer interface"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
pad_with_end=False,
|
||||
tokenizer=T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl"),
|
||||
has_start_token=False,
|
||||
pad_to_max_length=False,
|
||||
max_length=99999999,
|
||||
min_length=77,
|
||||
)
|
||||
|
||||
|
||||
class T5LayerNorm(torch.nn.Module):
|
||||
def __init__(self, hidden_size, eps=1e-6, dtype=None, device=None):
|
||||
super().__init__()
|
||||
self.weight = torch.nn.Parameter(torch.ones(hidden_size, dtype=dtype, device=device))
|
||||
self.variance_epsilon = eps
|
||||
|
||||
def forward(self, x):
|
||||
variance = x.pow(2).mean(-1, keepdim=True)
|
||||
x = x * torch.rsqrt(variance + self.variance_epsilon)
|
||||
return self.weight.to(device=x.device, dtype=x.dtype) * x
|
||||
|
||||
|
||||
class T5DenseGatedActDense(torch.nn.Module):
|
||||
def __init__(self, model_dim, ff_dim, dtype, device):
|
||||
super().__init__()
|
||||
self.wi_0 = torch.nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device)
|
||||
self.wi_1 = torch.nn.Linear(model_dim, ff_dim, bias=False, dtype=dtype, device=device)
|
||||
self.wo = torch.nn.Linear(ff_dim, model_dim, bias=False, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, x):
|
||||
hidden_gelu = torch.nn.functional.gelu(self.wi_0(x), approximate="tanh")
|
||||
hidden_linear = self.wi_1(x)
|
||||
x = hidden_gelu * hidden_linear
|
||||
x = self.wo(x)
|
||||
return x
|
||||
|
||||
|
||||
class T5LayerFF(torch.nn.Module):
|
||||
def __init__(self, model_dim, ff_dim, dtype, device):
|
||||
super().__init__()
|
||||
self.DenseReluDense = T5DenseGatedActDense(model_dim, ff_dim, dtype, device)
|
||||
self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, x):
|
||||
forwarded_states = self.layer_norm(x)
|
||||
forwarded_states = self.DenseReluDense(forwarded_states)
|
||||
x += forwarded_states
|
||||
return x
|
||||
|
||||
|
||||
class T5Attention(torch.nn.Module):
|
||||
def __init__(self, model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device):
|
||||
super().__init__()
|
||||
# Mesh TensorFlow initialization to avoid scaling before softmax
|
||||
self.q = torch.nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device)
|
||||
self.k = torch.nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device)
|
||||
self.v = torch.nn.Linear(model_dim, inner_dim, bias=False, dtype=dtype, device=device)
|
||||
self.o = torch.nn.Linear(inner_dim, model_dim, bias=False, dtype=dtype, device=device)
|
||||
self.num_heads = num_heads
|
||||
self.relative_attention_bias = None
|
||||
if relative_attention_bias:
|
||||
self.relative_attention_num_buckets = 32
|
||||
self.relative_attention_max_distance = 128
|
||||
self.relative_attention_bias = torch.nn.Embedding(
|
||||
self.relative_attention_num_buckets, self.num_heads, device=device
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
|
||||
"""
|
||||
Adapted from Mesh Tensorflow:
|
||||
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
|
||||
|
||||
Translate relative position to a bucket number for relative attention. The relative position is defined as
|
||||
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
|
||||
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
|
||||
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
|
||||
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
|
||||
This should allow for more graceful generalization to longer sequences than the model has been trained on
|
||||
|
||||
Args:
|
||||
relative_position: an int32 Tensor
|
||||
bidirectional: a boolean - whether the attention is bidirectional
|
||||
num_buckets: an integer
|
||||
max_distance: an integer
|
||||
|
||||
Returns:
|
||||
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
|
||||
"""
|
||||
relative_buckets = 0
|
||||
if bidirectional:
|
||||
num_buckets //= 2
|
||||
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
|
||||
relative_position = torch.abs(relative_position)
|
||||
else:
|
||||
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
|
||||
# now relative_position is in the range [0, inf)
|
||||
# half of the buckets are for exact increments in positions
|
||||
max_exact = num_buckets // 2
|
||||
is_small = relative_position < max_exact
|
||||
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
|
||||
relative_position_if_large = max_exact + (
|
||||
torch.log(relative_position.float() / max_exact)
|
||||
/ math.log(max_distance / max_exact)
|
||||
* (num_buckets - max_exact)
|
||||
).to(torch.long)
|
||||
relative_position_if_large = torch.min(
|
||||
relative_position_if_large,
|
||||
torch.full_like(relative_position_if_large, num_buckets - 1),
|
||||
)
|
||||
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
|
||||
return relative_buckets
|
||||
|
||||
def compute_bias(self, query_length, key_length, device):
|
||||
"""Compute binned relative position bias"""
|
||||
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
|
||||
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
|
||||
relative_position = memory_position - context_position # shape (query_length, key_length)
|
||||
relative_position_bucket = self._relative_position_bucket(
|
||||
relative_position, # shape (query_length, key_length)
|
||||
bidirectional=True,
|
||||
num_buckets=self.relative_attention_num_buckets,
|
||||
max_distance=self.relative_attention_max_distance,
|
||||
)
|
||||
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
|
||||
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
|
||||
return values
|
||||
|
||||
def forward(self, x, past_bias=None):
|
||||
q = self.q(x)
|
||||
k = self.k(x)
|
||||
v = self.v(x)
|
||||
if self.relative_attention_bias is not None:
|
||||
past_bias = self.compute_bias(x.shape[1], x.shape[1], x.device)
|
||||
if past_bias is not None:
|
||||
mask = past_bias
|
||||
out = attention(q, k * ((k.shape[-1] / self.num_heads) ** 0.5), v, self.num_heads, mask)
|
||||
return self.o(out), past_bias
|
||||
|
||||
|
||||
class T5LayerSelfAttention(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
model_dim,
|
||||
inner_dim,
|
||||
ff_dim,
|
||||
num_heads,
|
||||
relative_attention_bias,
|
||||
dtype,
|
||||
device,
|
||||
):
|
||||
super().__init__()
|
||||
self.SelfAttention = T5Attention(model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device)
|
||||
self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, x, past_bias=None):
|
||||
output, past_bias = self.SelfAttention(self.layer_norm(x), past_bias=past_bias)
|
||||
x += output
|
||||
return x, past_bias
|
||||
|
||||
|
||||
class T5Block(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
model_dim,
|
||||
inner_dim,
|
||||
ff_dim,
|
||||
num_heads,
|
||||
relative_attention_bias,
|
||||
dtype,
|
||||
device,
|
||||
):
|
||||
super().__init__()
|
||||
self.layer = torch.nn.ModuleList()
|
||||
self.layer.append(
|
||||
T5LayerSelfAttention(
|
||||
model_dim,
|
||||
inner_dim,
|
||||
ff_dim,
|
||||
num_heads,
|
||||
relative_attention_bias,
|
||||
dtype,
|
||||
device,
|
||||
)
|
||||
)
|
||||
self.layer.append(T5LayerFF(model_dim, ff_dim, dtype, device))
|
||||
|
||||
def forward(self, x, past_bias=None):
|
||||
x, past_bias = self.layer[0](x, past_bias)
|
||||
x = self.layer[-1](x)
|
||||
return x, past_bias
|
||||
|
||||
|
||||
class T5Stack(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
num_layers,
|
||||
model_dim,
|
||||
inner_dim,
|
||||
ff_dim,
|
||||
num_heads,
|
||||
vocab_size,
|
||||
dtype,
|
||||
device,
|
||||
):
|
||||
super().__init__()
|
||||
self.embed_tokens = torch.nn.Embedding(vocab_size, model_dim, device=device)
|
||||
self.block = torch.nn.ModuleList(
|
||||
[
|
||||
T5Block(
|
||||
model_dim,
|
||||
inner_dim,
|
||||
ff_dim,
|
||||
num_heads,
|
||||
relative_attention_bias=(i == 0),
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
for i in range(num_layers)
|
||||
]
|
||||
)
|
||||
self.final_layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, input_ids, intermediate_output=None, final_layer_norm_intermediate=True):
|
||||
intermediate = None
|
||||
x = self.embed_tokens(input_ids)
|
||||
past_bias = None
|
||||
for i, l in enumerate(self.block):
|
||||
x, past_bias = l(x, past_bias)
|
||||
if i == intermediate_output:
|
||||
intermediate = x.clone()
|
||||
x = self.final_layer_norm(x)
|
||||
if intermediate is not None and final_layer_norm_intermediate:
|
||||
intermediate = self.final_layer_norm(intermediate)
|
||||
return x, intermediate
|
||||
|
||||
|
||||
class T5(torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device):
|
||||
super().__init__()
|
||||
self.num_layers = config_dict["num_layers"]
|
||||
self.encoder = T5Stack(
|
||||
self.num_layers,
|
||||
config_dict["d_model"],
|
||||
config_dict["d_model"],
|
||||
config_dict["d_ff"],
|
||||
config_dict["num_heads"],
|
||||
config_dict["vocab_size"],
|
||||
dtype,
|
||||
device,
|
||||
)
|
||||
self.dtype = dtype
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.encoder.embed_tokens
|
||||
|
||||
def set_input_embeddings(self, embeddings):
|
||||
self.encoder.embed_tokens = embeddings
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
return self.encoder(*args, **kwargs)
|
||||
@@ -1,609 +0,0 @@
|
||||
# This file was originally copied from:
|
||||
# https://github.com/Stability-AI/sd3.5/blob/19bf11c4e1e37324c5aa5a61f010d4127848a09c/sd3_impls.py
|
||||
|
||||
|
||||
### Impls of the SD3 core diffusion model and VAE
|
||||
|
||||
import math
|
||||
import re
|
||||
|
||||
import einops
|
||||
import torch
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.sd3.mmditx import MMDiTX
|
||||
|
||||
#################################################################################################
|
||||
### MMDiT Model Wrapping
|
||||
#################################################################################################
|
||||
|
||||
|
||||
class ModelSamplingDiscreteFlow(torch.nn.Module):
|
||||
"""Helper for sampler scheduling (ie timestep/sigma calculations) for Discrete Flow models"""
|
||||
|
||||
def __init__(self, shift: float = 1.0):
|
||||
super().__init__()
|
||||
self.shift = shift
|
||||
timesteps = 1000
|
||||
ts = self.sigma(torch.arange(1, timesteps + 1, 1))
|
||||
self.register_buffer("sigmas", ts)
|
||||
|
||||
@property
|
||||
def sigma_min(self):
|
||||
return self.sigmas[0]
|
||||
|
||||
@property
|
||||
def sigma_max(self):
|
||||
return self.sigmas[-1]
|
||||
|
||||
def timestep(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
return sigma * 1000
|
||||
|
||||
def sigma(self, timestep: torch.Tensor):
|
||||
timestep = timestep / 1000.0
|
||||
if self.shift == 1.0:
|
||||
return timestep
|
||||
return self.shift * timestep / (1 + (self.shift - 1) * timestep)
|
||||
|
||||
def calculate_denoised(
|
||||
self, sigma: torch.Tensor, model_output: torch.Tensor, model_input: torch.Tensor
|
||||
) -> torch.Tensor:
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
return sigma * noise + (1.0 - sigma) * latent_image
|
||||
|
||||
|
||||
class BaseModel(torch.nn.Module):
|
||||
"""Wrapper around the core MM-DiT model"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
shift=1.0,
|
||||
device=None,
|
||||
dtype=torch.float32,
|
||||
file=None,
|
||||
prefix="",
|
||||
verbose=False,
|
||||
):
|
||||
super().__init__()
|
||||
# Important configuration values can be quickly determined by checking shapes in the source file
|
||||
# Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change)
|
||||
patch_size = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[2]
|
||||
depth = file.get_tensor(f"{prefix}x_embedder.proj.weight").shape[0] // 64
|
||||
num_patches = file.get_tensor(f"{prefix}pos_embed").shape[1]
|
||||
pos_embed_max_size = round(math.sqrt(num_patches))
|
||||
adm_in_channels = file.get_tensor(f"{prefix}y_embedder.mlp.0.weight").shape[1]
|
||||
context_shape = file.get_tensor(f"{prefix}context_embedder.weight").shape
|
||||
qk_norm = "rms" if f"{prefix}joint_blocks.0.context_block.attn.ln_k.weight" in file.keys() else None
|
||||
x_block_self_attn_layers = sorted(
|
||||
[
|
||||
int(key.split(".x_block.attn2.ln_k.weight")[0].split(".")[-1])
|
||||
for key in list(filter(re.compile(".*.x_block.attn2.ln_k.weight").match, file.keys()))
|
||||
]
|
||||
)
|
||||
|
||||
context_embedder_config = {
|
||||
"target": "torch.nn.Linear",
|
||||
"params": {
|
||||
"in_features": context_shape[1],
|
||||
"out_features": context_shape[0],
|
||||
},
|
||||
}
|
||||
self.diffusion_model = MMDiTX(
|
||||
input_size=None,
|
||||
pos_embed_scaling_factor=None,
|
||||
pos_embed_offset=None,
|
||||
pos_embed_max_size=pos_embed_max_size,
|
||||
patch_size=patch_size,
|
||||
in_channels=16,
|
||||
depth=depth,
|
||||
num_patches=num_patches,
|
||||
adm_in_channels=adm_in_channels,
|
||||
context_embedder_config=context_embedder_config,
|
||||
qk_norm=qk_norm,
|
||||
x_block_self_attn_layers=x_block_self_attn_layers,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
verbose=verbose,
|
||||
)
|
||||
self.model_sampling = ModelSamplingDiscreteFlow(shift=shift)
|
||||
|
||||
def apply_model(
|
||||
self, x: torch.Tensor, sigma: float, c_crossattn: torch.Tensor | None = None, y: torch.Tensor | None = None
|
||||
):
|
||||
dtype = self.get_dtype()
|
||||
timestep = self.model_sampling.timestep(sigma).float()
|
||||
model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float()
|
||||
return self.model_sampling.calculate_denoised(sigma, model_output, x)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
return self.apply_model(*args, **kwargs)
|
||||
|
||||
def get_dtype(self):
|
||||
return self.diffusion_model.dtype
|
||||
|
||||
|
||||
class CFGDenoiser(torch.nn.Module):
|
||||
"""Helper for applying CFG Scaling to diffusion outputs"""
|
||||
|
||||
def __init__(self, model):
|
||||
super().__init__()
|
||||
self.model = model
|
||||
|
||||
def forward(self, x, timestep, cond, uncond, cond_scale):
|
||||
# Run cond and uncond in a batch together
|
||||
batched = self.model.apply_model(
|
||||
torch.cat([x, x]),
|
||||
torch.cat([timestep, timestep]),
|
||||
c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]),
|
||||
y=torch.cat([cond["y"], uncond["y"]]),
|
||||
)
|
||||
# Then split and apply CFG Scaling
|
||||
pos_out, neg_out = batched.chunk(2)
|
||||
scaled = neg_out + (pos_out - neg_out) * cond_scale
|
||||
return scaled
|
||||
|
||||
|
||||
class SD3LatentFormat:
|
||||
"""Latents are slightly shifted from center - this class must be called after VAE Decode to correct for the shift"""
|
||||
|
||||
def __init__(self):
|
||||
self.scale_factor = 1.5305
|
||||
self.shift_factor = 0.0609
|
||||
|
||||
def process_in(self, latent):
|
||||
return (latent - self.shift_factor) * self.scale_factor
|
||||
|
||||
def process_out(self, latent):
|
||||
return (latent / self.scale_factor) + self.shift_factor
|
||||
|
||||
def decode_latent_to_preview(self, x0):
|
||||
"""Quick RGB approximate preview of sd3 latents"""
|
||||
factors = torch.tensor(
|
||||
[
|
||||
[-0.0645, 0.0177, 0.1052],
|
||||
[0.0028, 0.0312, 0.0650],
|
||||
[0.1848, 0.0762, 0.0360],
|
||||
[0.0944, 0.0360, 0.0889],
|
||||
[0.0897, 0.0506, -0.0364],
|
||||
[-0.0020, 0.1203, 0.0284],
|
||||
[0.0855, 0.0118, 0.0283],
|
||||
[-0.0539, 0.0658, 0.1047],
|
||||
[-0.0057, 0.0116, 0.0700],
|
||||
[-0.0412, 0.0281, -0.0039],
|
||||
[0.1106, 0.1171, 0.1220],
|
||||
[-0.0248, 0.0682, -0.0481],
|
||||
[0.0815, 0.0846, 0.1207],
|
||||
[-0.0120, -0.0055, -0.0867],
|
||||
[-0.0749, -0.0634, -0.0456],
|
||||
[-0.1418, -0.1457, -0.1259],
|
||||
],
|
||||
device="cpu",
|
||||
)
|
||||
latent_image = x0[0].permute(1, 2, 0).cpu() @ factors
|
||||
|
||||
latents_ubyte = (
|
||||
((latent_image + 1) / 2)
|
||||
.clamp(0, 1) # change scale from -1..1 to 0..1
|
||||
.mul(0xFF) # to 0..255
|
||||
.byte()
|
||||
).cpu()
|
||||
|
||||
return Image.fromarray(latents_ubyte.numpy())
|
||||
|
||||
|
||||
#################################################################################################
|
||||
### Samplers
|
||||
#################################################################################################
|
||||
|
||||
|
||||
def append_dims(x, target_dims):
|
||||
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
|
||||
dims_to_append = target_dims - x.ndim
|
||||
return x[(...,) + (None,) * dims_to_append]
|
||||
|
||||
|
||||
def to_d(x, sigma, denoised):
|
||||
"""Converts a denoiser output to a Karras ODE derivative."""
|
||||
return (x - denoised) / append_dims(sigma, x.ndim)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.autocast("cuda", dtype=torch.float16)
|
||||
def sample_euler(model, x, sigmas, extra_args=None):
|
||||
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in tqdm(range(len(sigmas) - 1)):
|
||||
sigma_hat = sigmas[i]
|
||||
denoised = model(x, sigma_hat * s_in, **extra_args)
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
# Euler method
|
||||
x = x + d * dt
|
||||
return x
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.autocast("cuda", dtype=torch.float16)
|
||||
def sample_dpmpp_2m(model, x, sigmas, extra_args=None):
|
||||
"""DPM-Solver++(2M)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
sigma_fn = lambda t: t.neg().exp()
|
||||
t_fn = lambda sigma: sigma.log().neg()
|
||||
old_denoised = None
|
||||
for i in tqdm(range(len(sigmas) - 1)):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
||||
h = t_next - t
|
||||
if old_denoised is None or sigmas[i + 1] == 0:
|
||||
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised
|
||||
else:
|
||||
h_last = t - t_fn(sigmas[i - 1])
|
||||
r = h_last / h
|
||||
denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
|
||||
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d
|
||||
old_denoised = denoised
|
||||
return x
|
||||
|
||||
|
||||
#################################################################################################
|
||||
### VAE
|
||||
#################################################################################################
|
||||
|
||||
|
||||
def Normalize(in_channels, num_groups=32, dtype=torch.float32, device=None):
|
||||
return torch.nn.GroupNorm(
|
||||
num_groups=num_groups,
|
||||
num_channels=in_channels,
|
||||
eps=1e-6,
|
||||
affine=True,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
|
||||
class ResnetBlock(torch.nn.Module):
|
||||
def __init__(self, *, in_channels, out_channels=None, dtype=torch.float32, device=None):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
out_channels = in_channels if out_channels is None else out_channels
|
||||
self.out_channels = out_channels
|
||||
|
||||
self.norm1 = Normalize(in_channels, dtype=dtype, device=device)
|
||||
self.conv1 = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
out_channels,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.norm2 = Normalize(out_channels, dtype=dtype, device=device)
|
||||
self.conv2 = torch.nn.Conv2d(
|
||||
out_channels,
|
||||
out_channels,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
if self.in_channels != self.out_channels:
|
||||
self.nin_shortcut = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
out_channels,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
self.nin_shortcut = None
|
||||
self.swish = torch.nn.SiLU(inplace=True)
|
||||
|
||||
def forward(self, x):
|
||||
hidden = x
|
||||
hidden = self.norm1(hidden)
|
||||
hidden = self.swish(hidden)
|
||||
hidden = self.conv1(hidden)
|
||||
hidden = self.norm2(hidden)
|
||||
hidden = self.swish(hidden)
|
||||
hidden = self.conv2(hidden)
|
||||
if self.in_channels != self.out_channels:
|
||||
x = self.nin_shortcut(x)
|
||||
return x + hidden
|
||||
|
||||
|
||||
class AttnBlock(torch.nn.Module):
|
||||
def __init__(self, in_channels, dtype=torch.float32, device=None):
|
||||
super().__init__()
|
||||
self.norm = Normalize(in_channels, dtype=dtype, device=device)
|
||||
self.q = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
in_channels,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.k = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
in_channels,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.v = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
in_channels,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.proj_out = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
in_channels,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
hidden = self.norm(x)
|
||||
q = self.q(hidden)
|
||||
k = self.k(hidden)
|
||||
v = self.v(hidden)
|
||||
b, c, h, w = q.shape
|
||||
q, k, v = map(
|
||||
lambda x: einops.rearrange(x, "b c h w -> b 1 (h w) c").contiguous(),
|
||||
(q, k, v),
|
||||
)
|
||||
hidden = torch.nn.functional.scaled_dot_product_attention(q, k, v) # scale is dim ** -0.5 per default
|
||||
hidden = einops.rearrange(hidden, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
|
||||
hidden = self.proj_out(hidden)
|
||||
return x + hidden
|
||||
|
||||
|
||||
class Downsample(torch.nn.Module):
|
||||
def __init__(self, in_channels, dtype=torch.float32, device=None):
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
in_channels,
|
||||
kernel_size=3,
|
||||
stride=2,
|
||||
padding=0,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
pad = (0, 1, 0, 1)
|
||||
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
||||
x = self.conv(x)
|
||||
return x
|
||||
|
||||
|
||||
class Upsample(torch.nn.Module):
|
||||
def __init__(self, in_channels, dtype=torch.float32, device=None):
|
||||
super().__init__()
|
||||
self.conv = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
in_channels,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
||||
x = self.conv(x)
|
||||
return x
|
||||
|
||||
|
||||
class VAEEncoder(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
ch=128,
|
||||
ch_mult=(1, 2, 4, 4),
|
||||
num_res_blocks=2,
|
||||
in_channels=3,
|
||||
z_channels=16,
|
||||
dtype=torch.float32,
|
||||
device=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
# downsampling
|
||||
self.conv_in = torch.nn.Conv2d(
|
||||
in_channels,
|
||||
ch,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
in_ch_mult = (1,) + tuple(ch_mult)
|
||||
self.in_ch_mult = in_ch_mult
|
||||
self.down = torch.nn.ModuleList()
|
||||
for i_level in range(self.num_resolutions):
|
||||
block = torch.nn.ModuleList()
|
||||
attn = torch.nn.ModuleList()
|
||||
block_in = ch * in_ch_mult[i_level]
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for i_block in range(num_res_blocks):
|
||||
block.append(
|
||||
ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_out,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
)
|
||||
block_in = block_out
|
||||
down = torch.nn.Module()
|
||||
down.block = block
|
||||
down.attn = attn
|
||||
if i_level != self.num_resolutions - 1:
|
||||
down.downsample = Downsample(block_in, dtype=dtype, device=device)
|
||||
self.down.append(down)
|
||||
# middle
|
||||
self.mid = torch.nn.Module()
|
||||
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
|
||||
self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device)
|
||||
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
|
||||
# end
|
||||
self.norm_out = Normalize(block_in, dtype=dtype, device=device)
|
||||
self.conv_out = torch.nn.Conv2d(
|
||||
block_in,
|
||||
2 * z_channels,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.swish = torch.nn.SiLU(inplace=True)
|
||||
|
||||
def forward(self, x):
|
||||
# downsampling
|
||||
hs = [self.conv_in(x)]
|
||||
for i_level in range(self.num_resolutions):
|
||||
for i_block in range(self.num_res_blocks):
|
||||
h = self.down[i_level].block[i_block](hs[-1])
|
||||
hs.append(h)
|
||||
if i_level != self.num_resolutions - 1:
|
||||
hs.append(self.down[i_level].downsample(hs[-1]))
|
||||
# middle
|
||||
h = hs[-1]
|
||||
h = self.mid.block_1(h)
|
||||
h = self.mid.attn_1(h)
|
||||
h = self.mid.block_2(h)
|
||||
# end
|
||||
h = self.norm_out(h)
|
||||
h = self.swish(h)
|
||||
h = self.conv_out(h)
|
||||
return h
|
||||
|
||||
|
||||
class VAEDecoder(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
ch=128,
|
||||
out_ch=3,
|
||||
ch_mult=(1, 2, 4, 4),
|
||||
num_res_blocks=2,
|
||||
resolution=256,
|
||||
z_channels=16,
|
||||
dtype=torch.float32,
|
||||
device=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
block_in = ch * ch_mult[self.num_resolutions - 1]
|
||||
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
||||
# z to block_in
|
||||
self.conv_in = torch.nn.Conv2d(
|
||||
z_channels,
|
||||
block_in,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
# middle
|
||||
self.mid = torch.nn.Module()
|
||||
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
|
||||
self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device)
|
||||
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
|
||||
# upsampling
|
||||
self.up = torch.nn.ModuleList()
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
block = torch.nn.ModuleList()
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
block.append(
|
||||
ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_out,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
)
|
||||
block_in = block_out
|
||||
up = torch.nn.Module()
|
||||
up.block = block
|
||||
if i_level != 0:
|
||||
up.upsample = Upsample(block_in, dtype=dtype, device=device)
|
||||
curr_res = curr_res * 2
|
||||
self.up.insert(0, up) # prepend to get consistent order
|
||||
# end
|
||||
self.norm_out = Normalize(block_in, dtype=dtype, device=device)
|
||||
self.conv_out = torch.nn.Conv2d(
|
||||
block_in,
|
||||
out_ch,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
)
|
||||
self.swish = torch.nn.SiLU(inplace=True)
|
||||
|
||||
def forward(self, z):
|
||||
# z to block_in
|
||||
hidden = self.conv_in(z)
|
||||
# middle
|
||||
hidden = self.mid.block_1(hidden)
|
||||
hidden = self.mid.attn_1(hidden)
|
||||
hidden = self.mid.block_2(hidden)
|
||||
# upsampling
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
hidden = self.up[i_level].block[i_block](hidden)
|
||||
if i_level != 0:
|
||||
hidden = self.up[i_level].upsample(hidden)
|
||||
# end
|
||||
hidden = self.norm_out(hidden)
|
||||
hidden = self.swish(hidden)
|
||||
hidden = self.conv_out(hidden)
|
||||
return hidden
|
||||
|
||||
|
||||
class SDVAE(torch.nn.Module):
|
||||
def __init__(self, dtype=torch.float32, device=None):
|
||||
super().__init__()
|
||||
self.encoder = VAEEncoder(dtype=dtype, device=device)
|
||||
self.decoder = VAEDecoder(dtype=dtype, device=device)
|
||||
|
||||
@torch.autocast("cuda", dtype=torch.float16)
|
||||
def decode(self, latent):
|
||||
return self.decoder(latent)
|
||||
|
||||
@torch.autocast("cuda", dtype=torch.float16)
|
||||
def encode(self, image):
|
||||
hidden = self.encoder(image)
|
||||
mean, logvar = torch.chunk(hidden, 2, dim=1)
|
||||
logvar = torch.clamp(logvar, -30.0, 20.0)
|
||||
std = torch.exp(0.5 * logvar)
|
||||
return mean + std * torch.randn_like(mean)
|
||||
@@ -1,426 +0,0 @@
|
||||
# This file was originally copied from:
|
||||
# https://github.com/Stability-AI/sd3.5/blob/19bf11c4e1e37324c5aa5a61f010d4127848a09c/sd3_infer.py
|
||||
|
||||
# NOTE: Must have folder `models` with the following files:
|
||||
# - `clip_g.safetensors` (openclip bigG, same as SDXL)
|
||||
# - `clip_l.safetensors` (OpenAI CLIP-L, same as SDXL)
|
||||
# - `t5xxl.safetensors` (google T5-v1.1-XXL)
|
||||
# - `sd3_medium.safetensors` (or whichever main MMDiT model file)
|
||||
# Also can have
|
||||
# - `sd3_vae.safetensors` (holds the VAE separately if needed)
|
||||
|
||||
import datetime
|
||||
import math
|
||||
import os
|
||||
|
||||
import fire
|
||||
import numpy as np
|
||||
import sd3_impls
|
||||
import torch
|
||||
from other_impls import SD3Tokenizer, SDClipModel, SDXLClipG, T5XXLModel
|
||||
from PIL import Image
|
||||
from safetensors import safe_open
|
||||
from sd3_impls import SDVAE, BaseModel, CFGDenoiser, SD3LatentFormat
|
||||
from tqdm import tqdm
|
||||
|
||||
#################################################################################################
|
||||
### Wrappers for model parts
|
||||
#################################################################################################
|
||||
|
||||
|
||||
def load_into(f, model, prefix, device, dtype=None):
|
||||
"""Just a debugging-friendly hack to apply the weights in a safetensors file to the pytorch module."""
|
||||
for key in f.keys():
|
||||
if key.startswith(prefix) and not key.startswith("loss."):
|
||||
path = key[len(prefix) :].split(".")
|
||||
obj = model
|
||||
for p in path:
|
||||
if obj is list:
|
||||
obj = obj[int(p)]
|
||||
else:
|
||||
obj = getattr(obj, p, None)
|
||||
if obj is None:
|
||||
print(f"Skipping key '{key}' in safetensors file as '{p}' does not exist in python model")
|
||||
break
|
||||
if obj is None:
|
||||
continue
|
||||
try:
|
||||
tensor = f.get_tensor(key).to(device=device)
|
||||
if dtype is not None:
|
||||
tensor = tensor.to(dtype=dtype)
|
||||
obj.requires_grad_(False)
|
||||
obj.set_(tensor)
|
||||
except Exception as e:
|
||||
print(f"Failed to load key '{key}' in safetensors file: {e}")
|
||||
raise e
|
||||
|
||||
|
||||
CLIPG_CONFIG = {
|
||||
"hidden_act": "gelu",
|
||||
"hidden_size": 1280,
|
||||
"intermediate_size": 5120,
|
||||
"num_attention_heads": 20,
|
||||
"num_hidden_layers": 32,
|
||||
}
|
||||
|
||||
|
||||
class ClipG:
|
||||
def __init__(self):
|
||||
with safe_open("models/clip_g.safetensors", framework="pt", device="cpu") as f:
|
||||
self.model = SDXLClipG(CLIPG_CONFIG, device="cpu", dtype=torch.float32)
|
||||
load_into(f, self.model.transformer, "", "cpu", torch.float32)
|
||||
|
||||
|
||||
CLIPL_CONFIG = {
|
||||
"hidden_act": "quick_gelu",
|
||||
"hidden_size": 768,
|
||||
"intermediate_size": 3072,
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
}
|
||||
|
||||
|
||||
class ClipL:
|
||||
def __init__(self):
|
||||
with safe_open("models/clip_l.safetensors", framework="pt", device="cpu") as f:
|
||||
self.model = SDClipModel(
|
||||
layer="hidden",
|
||||
layer_idx=-2,
|
||||
device="cpu",
|
||||
dtype=torch.float32,
|
||||
layer_norm_hidden_state=False,
|
||||
return_projected_pooled=False,
|
||||
textmodel_json_config=CLIPL_CONFIG,
|
||||
)
|
||||
load_into(f, self.model.transformer, "", "cpu", torch.float32)
|
||||
|
||||
|
||||
T5_CONFIG = {
|
||||
"d_ff": 10240,
|
||||
"d_model": 4096,
|
||||
"num_heads": 64,
|
||||
"num_layers": 24,
|
||||
"vocab_size": 32128,
|
||||
}
|
||||
|
||||
|
||||
class T5XXL:
|
||||
def __init__(self):
|
||||
with safe_open("models/t5xxl.safetensors", framework="pt", device="cpu") as f:
|
||||
self.model = T5XXLModel(T5_CONFIG, device="cpu", dtype=torch.float32)
|
||||
load_into(f, self.model.transformer, "", "cpu", torch.float32)
|
||||
|
||||
|
||||
class SD3:
|
||||
def __init__(self, model, shift, verbose=False):
|
||||
with safe_open(model, framework="pt", device="cpu") as f:
|
||||
self.model = BaseModel(
|
||||
shift=shift,
|
||||
file=f,
|
||||
prefix="model.diffusion_model.",
|
||||
device="cpu",
|
||||
dtype=torch.float16,
|
||||
verbose=verbose,
|
||||
).eval()
|
||||
load_into(f, self.model, "model.", "cpu", torch.float16)
|
||||
|
||||
|
||||
class VAE:
|
||||
def __init__(self, model):
|
||||
with safe_open(model, framework="pt", device="cpu") as f:
|
||||
self.model = SDVAE(device="cpu", dtype=torch.float16).eval().cpu()
|
||||
prefix = ""
|
||||
if any(k.startswith("first_stage_model.") for k in f.keys()):
|
||||
prefix = "first_stage_model."
|
||||
load_into(f, self.model, prefix, "cpu", torch.float16)
|
||||
|
||||
|
||||
#################################################################################################
|
||||
### Main inference logic
|
||||
#################################################################################################
|
||||
|
||||
|
||||
# Note: Sigma shift value, publicly released models use 3.0
|
||||
SHIFT = 3.0
|
||||
# Naturally, adjust to the width/height of the model you have
|
||||
WIDTH = 1024
|
||||
HEIGHT = 1024
|
||||
# Pick your prompt
|
||||
PROMPT = "a photo of a cat"
|
||||
# Most models prefer the range of 4-5, but still work well around 7
|
||||
CFG_SCALE = 4.5
|
||||
# Different models want different step counts but most will be good at 50, albeit that's slow to run
|
||||
# sd3_medium is quite decent at 28 steps
|
||||
STEPS = 40
|
||||
# Seed
|
||||
SEED = 23
|
||||
# SEEDTYPE = "fixed"
|
||||
SEEDTYPE = "rand"
|
||||
# SEEDTYPE = "roll"
|
||||
# Actual model file path
|
||||
# MODEL = "models/sd3_medium.safetensors"
|
||||
# MODEL = "models/sd3.5_large_turbo.safetensors"
|
||||
MODEL = "models/sd3.5_large.safetensors"
|
||||
# VAE model file path, or set None to use the same model file
|
||||
VAEFile = None # "models/sd3_vae.safetensors"
|
||||
# Optional init image file path
|
||||
INIT_IMAGE = None
|
||||
# If init_image is given, this is the percentage of denoising steps to run (1.0 = full denoise, 0.0 = no denoise at all)
|
||||
DENOISE = 0.6
|
||||
# Output file path
|
||||
OUTDIR = "outputs"
|
||||
# SAMPLER
|
||||
# SAMPLER = "euler"
|
||||
SAMPLER = "dpmpp_2m"
|
||||
|
||||
|
||||
class SD3Inferencer:
|
||||
def print(self, txt):
|
||||
if self.verbose:
|
||||
print(txt)
|
||||
|
||||
def load(self, model=MODEL, vae=VAEFile, shift=SHIFT, verbose=False):
|
||||
self.verbose = verbose
|
||||
print("Loading tokenizers...")
|
||||
# NOTE: if you need a reference impl for a high performance CLIP tokenizer instead of just using the HF transformers one,
|
||||
# check https://github.com/Stability-AI/StableSwarmUI/blob/master/src/Utils/CliplikeTokenizer.cs
|
||||
# (T5 tokenizer is different though)
|
||||
self.tokenizer = SD3Tokenizer()
|
||||
print("Loading OpenAI CLIP L...")
|
||||
self.clip_l = ClipL()
|
||||
print("Loading OpenCLIP bigG...")
|
||||
self.clip_g = ClipG()
|
||||
print("Loading Google T5-v1-XXL...")
|
||||
self.t5xxl = T5XXL()
|
||||
print(f"Loading SD3 model {os.path.basename(model)}...")
|
||||
self.sd3 = SD3(model, shift, verbose)
|
||||
print("Loading VAE model...")
|
||||
self.vae = VAE(vae or model)
|
||||
print("Models loaded.")
|
||||
|
||||
def get_empty_latent(self, width, height):
|
||||
self.print("Prep an empty latent...")
|
||||
return torch.ones(1, 16, height // 8, width // 8, device="cpu") * 0.0609
|
||||
|
||||
def get_sigmas(self, sampling, steps):
|
||||
start = sampling.timestep(sampling.sigma_max)
|
||||
end = sampling.timestep(sampling.sigma_min)
|
||||
timesteps = torch.linspace(start, end, steps)
|
||||
sigs = []
|
||||
for x in range(len(timesteps)):
|
||||
ts = timesteps[x]
|
||||
sigs.append(sampling.sigma(ts))
|
||||
sigs += [0.0]
|
||||
return torch.FloatTensor(sigs)
|
||||
|
||||
def get_noise(self, seed, latent):
|
||||
generator = torch.manual_seed(seed)
|
||||
self.print(f"dtype = {latent.dtype}, layout = {latent.layout}, device = {latent.device}")
|
||||
return torch.randn(
|
||||
latent.size(),
|
||||
dtype=torch.float32,
|
||||
layout=latent.layout,
|
||||
generator=generator,
|
||||
device="cpu",
|
||||
).to(latent.dtype)
|
||||
|
||||
def get_cond(self, prompt):
|
||||
self.print("Encode prompt...")
|
||||
tokens = self.tokenizer.tokenize_with_weights(prompt)
|
||||
l_out, l_pooled = self.clip_l.model.encode_token_weights(tokens["l"])
|
||||
g_out, g_pooled = self.clip_g.model.encode_token_weights(tokens["g"])
|
||||
t5_out, t5_pooled = self.t5xxl.model.encode_token_weights(tokens["t5xxl"])
|
||||
lg_out = torch.cat([l_out, g_out], dim=-1)
|
||||
lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1]))
|
||||
return torch.cat([lg_out, t5_out], dim=-2), torch.cat((l_pooled, g_pooled), dim=-1)
|
||||
|
||||
def max_denoise(self, sigmas):
|
||||
max_sigma = float(self.sd3.model.model_sampling.sigma_max)
|
||||
sigma = float(sigmas[0])
|
||||
return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma
|
||||
|
||||
def fix_cond(self, cond):
|
||||
cond, pooled = (cond[0].half().cuda(), cond[1].half().cuda())
|
||||
return {"c_crossattn": cond, "y": pooled}
|
||||
|
||||
def do_sampling(
|
||||
self,
|
||||
latent,
|
||||
seed,
|
||||
conditioning,
|
||||
neg_cond,
|
||||
steps,
|
||||
cfg_scale,
|
||||
sampler="dpmpp_2m",
|
||||
denoise=1.0,
|
||||
) -> torch.Tensor:
|
||||
self.print("Sampling...")
|
||||
latent = latent.half().cuda()
|
||||
self.sd3.model = self.sd3.model.cuda()
|
||||
noise = self.get_noise(seed, latent).cuda()
|
||||
sigmas = self.get_sigmas(self.sd3.model.model_sampling, steps).cuda()
|
||||
sigmas = sigmas[int(steps * (1 - denoise)) :]
|
||||
conditioning = self.fix_cond(conditioning)
|
||||
neg_cond = self.fix_cond(neg_cond)
|
||||
extra_args = {"cond": conditioning, "uncond": neg_cond, "cond_scale": cfg_scale}
|
||||
noise_scaled = self.sd3.model.model_sampling.noise_scaling(sigmas[0], noise, latent, self.max_denoise(sigmas))
|
||||
sample_fn = getattr(sd3_impls, f"sample_{sampler}")
|
||||
latent = sample_fn(CFGDenoiser(self.sd3.model), noise_scaled, sigmas, extra_args=extra_args)
|
||||
latent = SD3LatentFormat().process_out(latent)
|
||||
self.sd3.model = self.sd3.model.cpu()
|
||||
self.print("Sampling done")
|
||||
return latent
|
||||
|
||||
def vae_encode(self, image) -> torch.Tensor:
|
||||
self.print("Encoding image to latent...")
|
||||
image = image.convert("RGB")
|
||||
image_np = np.array(image).astype(np.float32) / 255.0
|
||||
image_np = np.moveaxis(image_np, 2, 0)
|
||||
batch_images = np.expand_dims(image_np, axis=0).repeat(1, axis=0)
|
||||
image_torch = torch.from_numpy(batch_images)
|
||||
image_torch = 2.0 * image_torch - 1.0
|
||||
image_torch = image_torch.cuda()
|
||||
self.vae.model = self.vae.model.cuda()
|
||||
latent = self.vae.model.encode(image_torch).cpu()
|
||||
self.vae.model = self.vae.model.cpu()
|
||||
self.print("Encoded")
|
||||
return latent
|
||||
|
||||
def vae_decode(self, latent) -> Image.Image:
|
||||
self.print("Decoding latent to image...")
|
||||
latent = latent.cuda()
|
||||
self.vae.model = self.vae.model.cuda()
|
||||
image = self.vae.model.decode(latent)
|
||||
image = image.float()
|
||||
self.vae.model = self.vae.model.cpu()
|
||||
image = torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)[0]
|
||||
decoded_np = 255.0 * np.moveaxis(image.cpu().numpy(), 0, 2)
|
||||
decoded_np = decoded_np.astype(np.uint8)
|
||||
out_image = Image.fromarray(decoded_np)
|
||||
self.print("Decoded")
|
||||
return out_image
|
||||
|
||||
def gen_image(
|
||||
self,
|
||||
prompts=[PROMPT],
|
||||
width=WIDTH,
|
||||
height=HEIGHT,
|
||||
steps=STEPS,
|
||||
cfg_scale=CFG_SCALE,
|
||||
sampler=SAMPLER,
|
||||
seed=SEED,
|
||||
seed_type=SEEDTYPE,
|
||||
out_dir=OUTDIR,
|
||||
init_image=INIT_IMAGE,
|
||||
denoise=DENOISE,
|
||||
):
|
||||
latent = self.get_empty_latent(width, height)
|
||||
if init_image:
|
||||
image_data = Image.open(init_image)
|
||||
image_data = image_data.resize((width, height), Image.LANCZOS)
|
||||
latent = self.vae_encode(image_data)
|
||||
latent = SD3LatentFormat().process_in(latent)
|
||||
neg_cond = self.get_cond("")
|
||||
seed_num = None
|
||||
pbar = tqdm(enumerate(prompts), total=len(prompts), position=0, leave=True)
|
||||
for i, prompt in pbar:
|
||||
if seed_type == "roll":
|
||||
seed_num = seed if seed_num is None else seed_num + 1
|
||||
elif seed_type == "rand":
|
||||
seed_num = torch.randint(0, 100000, (1,)).item()
|
||||
else: # fixed
|
||||
seed_num = seed
|
||||
conditioning = self.get_cond(prompt)
|
||||
sampled_latent = self.do_sampling(
|
||||
latent,
|
||||
seed_num,
|
||||
conditioning,
|
||||
neg_cond,
|
||||
steps,
|
||||
cfg_scale,
|
||||
sampler,
|
||||
denoise if init_image else 1.0,
|
||||
)
|
||||
image = self.vae_decode(sampled_latent)
|
||||
save_path = os.path.join(out_dir, f"{i:06d}.png")
|
||||
self.print(f"Will save to {save_path}")
|
||||
image.save(save_path)
|
||||
self.print("Done")
|
||||
|
||||
|
||||
CONFIGS = {
|
||||
"sd3_medium": {
|
||||
"shift": 1.0,
|
||||
"cfg": 5.0,
|
||||
"steps": 50,
|
||||
"sampler": "dpmpp_2m",
|
||||
},
|
||||
"sd3.5_large": {
|
||||
"shift": 3.0,
|
||||
"cfg": 4.5,
|
||||
"steps": 40,
|
||||
"sampler": "dpmpp_2m",
|
||||
},
|
||||
"sd3.5_large_turbo": {"shift": 3.0, "cfg": 1.0, "steps": 4, "sampler": "euler"},
|
||||
}
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def main(
|
||||
prompt=PROMPT,
|
||||
model=MODEL,
|
||||
out_dir=OUTDIR,
|
||||
postfix=None,
|
||||
seed=SEED,
|
||||
seed_type=SEEDTYPE,
|
||||
sampler=None,
|
||||
steps=None,
|
||||
cfg=None,
|
||||
shift=None,
|
||||
width=WIDTH,
|
||||
height=HEIGHT,
|
||||
vae=VAEFile,
|
||||
init_image=INIT_IMAGE,
|
||||
denoise=DENOISE,
|
||||
verbose=False,
|
||||
):
|
||||
steps = steps or CONFIGS[os.path.splitext(os.path.basename(model))[0]]["steps"]
|
||||
cfg = cfg or CONFIGS[os.path.splitext(os.path.basename(model))[0]]["cfg"]
|
||||
shift = shift or CONFIGS[os.path.splitext(os.path.basename(model))[0]]["shift"]
|
||||
sampler = sampler or CONFIGS[os.path.splitext(os.path.basename(model))[0]]["sampler"]
|
||||
|
||||
inferencer = SD3Inferencer()
|
||||
inferencer.load(model, vae, shift, verbose)
|
||||
|
||||
if isinstance(prompt, str):
|
||||
if os.path.splitext(prompt)[-1] == ".txt":
|
||||
with open(prompt, "r") as f:
|
||||
prompts = [l.strip() for l in f.readlines()]
|
||||
else:
|
||||
prompts = [prompt]
|
||||
|
||||
out_dir = os.path.join(
|
||||
out_dir,
|
||||
os.path.splitext(os.path.basename(model))[0],
|
||||
os.path.splitext(os.path.basename(prompt))[0][:50]
|
||||
+ (postfix or datetime.datetime.now().strftime("_%Y-%m-%dT%H-%M-%S")),
|
||||
)
|
||||
print(f"Saving images to {out_dir}")
|
||||
os.makedirs(out_dir, exist_ok=False)
|
||||
|
||||
inferencer.gen_image(
|
||||
prompts,
|
||||
width,
|
||||
height,
|
||||
steps,
|
||||
cfg,
|
||||
sampler,
|
||||
seed,
|
||||
seed_type,
|
||||
out_dir,
|
||||
init_image,
|
||||
denoise,
|
||||
)
|
||||
|
||||
|
||||
fire.Fire(main)
|
||||
@@ -1,72 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal, TypedDict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.sd3.mmditx import MMDiTX
|
||||
from invokeai.backend.sd3.sd3_impls import ModelSamplingDiscreteFlow
|
||||
|
||||
|
||||
class ContextEmbedderConfig(TypedDict):
|
||||
target: Literal["torch.nn.Linear"]
|
||||
params: dict[str, int]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Sd3MMDiTXParams:
|
||||
patch_size: int
|
||||
depth: int
|
||||
num_patches: int
|
||||
pos_embed_max_size: int
|
||||
adm_in_channels: int
|
||||
context_shape: tuple[int, int]
|
||||
qk_norm: Literal["rms", None]
|
||||
x_block_self_attn_layers: list[int]
|
||||
context_embedder_config: ContextEmbedderConfig
|
||||
|
||||
|
||||
class Sd3MMDiTX(torch.nn.Module):
|
||||
"""This class is based closely on
|
||||
https://github.com/Stability-AI/sd3.5/blob/19bf11c4e1e37324c5aa5a61f010d4127848a09c/sd3_impls.py#L53
|
||||
but has more standard model loading semantics.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
params: Sd3MMDiTXParams,
|
||||
shift: float = 1.0,
|
||||
device: torch.device | None = None,
|
||||
dtype: torch.dtype | None = None,
|
||||
verbose: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.diffusion_model = MMDiTX(
|
||||
input_size=None,
|
||||
pos_embed_scaling_factor=None,
|
||||
pos_embed_offset=None,
|
||||
pos_embed_max_size=params.pos_embed_max_size,
|
||||
patch_size=params.patch_size,
|
||||
in_channels=16,
|
||||
depth=params.depth,
|
||||
num_patches=params.num_patches,
|
||||
adm_in_channels=params.adm_in_channels,
|
||||
context_embedder_config=params.context_embedder_config,
|
||||
qk_norm=params.qk_norm,
|
||||
x_block_self_attn_layers=params.x_block_self_attn_layers,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
verbose=verbose,
|
||||
)
|
||||
self.model_sampling = ModelSamplingDiscreteFlow(shift=shift)
|
||||
|
||||
def apply_model(self, x: torch.Tensor, sigma: torch.Tensor, c_crossattn: torch.Tensor, y: torch.Tensor):
|
||||
dtype = self.get_dtype()
|
||||
timestep = self.model_sampling.timestep(sigma).float()
|
||||
model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float()
|
||||
return self.model_sampling.calculate_denoised(sigma, model_output, x)
|
||||
|
||||
def forward(self, x: torch.Tensor, sigma: float, c_crossattn: torch.Tensor, y: torch.Tensor):
|
||||
return self.apply_model(x=x, sigma=sigma, c_crossattn=c_crossattn, y=y)
|
||||
|
||||
def get_dtype(self):
|
||||
return self.diffusion_model.dtype
|
||||
@@ -1,70 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
from typing import Any, Dict
|
||||
|
||||
from invokeai.backend.sd3.sd3_mmditx import ContextEmbedderConfig, Sd3MMDiTXParams
|
||||
|
||||
|
||||
def is_sd3_checkpoint(sd: Dict[str, Any]) -> bool:
|
||||
"""Is the state dict for an SD3 checkpoint like this one?:
|
||||
https://huggingface.co/stabilityai/stable-diffusion-3.5-large/blob/main/sd3.5_large.safetensors
|
||||
|
||||
Note that this checkpoint format contains both the VAE and the MMDiTX model.
|
||||
|
||||
This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision.
|
||||
"""
|
||||
# If all of the expected keys are present, then this is very likely a SD3 checkpoint.
|
||||
expected_keys = {
|
||||
# VAE decoder and encoder keys.
|
||||
"first_stage_model.decoder.conv_in.bias",
|
||||
"first_stage_model.decoder.conv_in.weight",
|
||||
"first_stage_model.encoder.conv_in.bias",
|
||||
"first_stage_model.encoder.conv_in.weight",
|
||||
# MMDiTX keys.
|
||||
"model.diffusion_model.final_layer.linear.bias",
|
||||
"model.diffusion_model.final_layer.linear.weight",
|
||||
"model.diffusion_model.joint_blocks.0.context_block.attn.ln_k.weight",
|
||||
"model.diffusion_model.joint_blocks.0.context_block.attn.ln_q.weight",
|
||||
}
|
||||
|
||||
return expected_keys.issubset(sd.keys())
|
||||
|
||||
|
||||
def infer_sd3_mmditx_params(sd: Dict[str, Any], prefix: str = "model.diffusion_model.") -> Sd3MMDiTXParams:
|
||||
"""Infer the MMDiTX model parameters from the state dict.
|
||||
|
||||
This logic is based on:
|
||||
https://github.com/Stability-AI/sd3.5/blob/19bf11c4e1e37324c5aa5a61f010d4127848a09c/sd3_impls.py#L68-L88
|
||||
"""
|
||||
patch_size = sd[f"{prefix}x_embedder.proj.weight"].shape[2]
|
||||
depth = sd[f"{prefix}x_embedder.proj.weight"].shape[0] // 64
|
||||
num_patches = sd[f"{prefix}pos_embed"].shape[1]
|
||||
pos_embed_max_size = round(math.sqrt(num_patches))
|
||||
adm_in_channels = sd[f"{prefix}y_embedder.mlp.0.weight"].shape[1]
|
||||
context_shape = sd[f"{prefix}context_embedder.weight"].shape
|
||||
qk_norm = "rms" if f"{prefix}joint_blocks.0.context_block.attn.ln_k.weight" in sd else None
|
||||
x_block_self_attn_layers = sorted(
|
||||
[
|
||||
int(key.split(".x_block.attn2.ln_k.weight")[0].split(".")[-1])
|
||||
for key in list(filter(re.compile(".*.x_block.attn2.ln_k.weight").match, sd.keys()))
|
||||
]
|
||||
)
|
||||
|
||||
context_embedder_config: ContextEmbedderConfig = {
|
||||
"target": "torch.nn.Linear",
|
||||
"params": {
|
||||
"in_features": context_shape[1],
|
||||
"out_features": context_shape[0],
|
||||
},
|
||||
}
|
||||
return Sd3MMDiTXParams(
|
||||
patch_size=patch_size,
|
||||
depth=depth,
|
||||
num_patches=num_patches,
|
||||
pos_embed_max_size=pos_embed_max_size,
|
||||
adm_in_channels=adm_in_channels,
|
||||
context_shape=context_shape,
|
||||
qk_norm=qk_norm,
|
||||
x_block_self_attn_layers=x_block_self_attn_layers,
|
||||
context_embedder_config=context_embedder_config,
|
||||
)
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.42",
|
||||
"@invoke-ai/ui-library": "^0.0.43",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
||||
14
invokeai/frontend/web/pnpm-lock.yaml
generated
14
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.42
|
||||
version: 0.0.42(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.43
|
||||
version: 0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -1696,20 +1696,20 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.42(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-OuDXRipBO5mu+Nv4qN8cd8MiwiGBdq6h4PirVgPI9/ltbdcIzePgUJ0dJns26lflHSTRWW38I16wl4YTw3mNWA==}
|
||||
/@invoke-ai/ui-library@0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-t3fPYyks07ue3dEBPJuTHbeDLnDckDCOrtvc07mMDbLOnlPEZ0StaeiNGH+oO8qLzAuMAlSTdswgHfzTc2MmPw==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.1.0
|
||||
|
||||
@@ -94,6 +94,7 @@
|
||||
"close": "Close",
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"clipboard": "Clipboard",
|
||||
"on": "On",
|
||||
"off": "Off",
|
||||
"or": "or",
|
||||
@@ -1251,6 +1252,33 @@
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraphs": ["Adjust the mask."]
|
||||
},
|
||||
"inpainting": {
|
||||
"heading": "Inpainting",
|
||||
"paragraphs": ["Controls which area is modified, guided by Denoising Strength."]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "Raster Layer",
|
||||
"paragraphs": ["Pixel-based content of your canvas, used during image generation."]
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "Regional Guidance",
|
||||
"paragraphs": ["Brush to guide where elements from global prompts should appear."]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "Regional Guidance and Regional Reference Image",
|
||||
"paragraphs": [
|
||||
"For Regional Guidance, brush to guide where elements from global prompts should appear.",
|
||||
"For Regional Reference Image, brush to apply a reference image to specific areas."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "Global Reference Image",
|
||||
"paragraphs": ["Applies a reference image to influence the entire generation."]
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "Regional Reference Image",
|
||||
"paragraphs": ["Brush to apply a reference image to specific areas."]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
"paragraphs": [
|
||||
@@ -1688,8 +1716,18 @@
|
||||
"layer_other": "Layers",
|
||||
"layer_withCount_one": "Layer ({{count}})",
|
||||
"layer_withCount_other": "Layers ({{count}})",
|
||||
"convertToControlLayer": "Convert to Control Layer",
|
||||
"convertToRasterLayer": "Convert to Raster Layer",
|
||||
"convertRasterLayerTo": "Convert $t(controlLayers.rasterLayer) To",
|
||||
"convertControlLayerTo": "Convert $t(controlLayers.controlLayer) To",
|
||||
"convertInpaintMaskTo": "Convert $t(controlLayers.inpaintMask) To",
|
||||
"convertRegionalGuidanceTo": "Convert $t(controlLayers.regionalGuidance) To",
|
||||
"copyRasterLayerTo": "Copy $t(controlLayers.rasterLayer) To",
|
||||
"copyControlLayerTo": "Copy $t(controlLayers.controlLayer) To",
|
||||
"copyInpaintMaskTo": "Copy $t(controlLayers.inpaintMask) To",
|
||||
"copyRegionalGuidanceTo": "Copy $t(controlLayers.regionalGuidance) To",
|
||||
"newRasterLayer": "New $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "New $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "New $t(controlLayers.inpaintMask)",
|
||||
"newRegionalGuidance": "New $t(controlLayers.regionalGuidance)",
|
||||
"transparency": "Transparency",
|
||||
"enableTransparencyEffect": "Enable Transparency Effect",
|
||||
"disableTransparencyEffect": "Disable Transparency Effect",
|
||||
@@ -1845,11 +1883,11 @@
|
||||
"segment": {
|
||||
"autoMask": "Auto Mask",
|
||||
"pointType": "Point Type",
|
||||
"foreground": "Foreground",
|
||||
"background": "Background",
|
||||
"include": "Include",
|
||||
"exclude": "Exclude",
|
||||
"neutral": "Neutral",
|
||||
"reset": "Reset",
|
||||
"apply": "Apply",
|
||||
"saveAs": "Save As",
|
||||
"cancel": "Cancel",
|
||||
"process": "Process"
|
||||
},
|
||||
|
||||
@@ -26,5 +26,9 @@ export const IconMenuItem = ({ tooltip, icon, ...props }: Props) => {
|
||||
};
|
||||
|
||||
export const IconMenuItemGroup = ({ children }: { children: ReactNode }) => {
|
||||
return <Flex gap={2}>{children}</Flex>;
|
||||
return (
|
||||
<Flex gap={2} justifyContent="space-between">
|
||||
{children}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -23,8 +23,10 @@ export type Feature =
|
||||
| 'dynamicPrompts'
|
||||
| 'dynamicPromptsMaxPrompts'
|
||||
| 'dynamicPromptsSeedBehaviour'
|
||||
| 'globalReferenceImage'
|
||||
| 'imageFit'
|
||||
| 'infillMethod'
|
||||
| 'inpainting'
|
||||
| 'ipAdapterMethod'
|
||||
| 'lora'
|
||||
| 'loraWeight'
|
||||
@@ -46,6 +48,7 @@ export type Feature =
|
||||
| 'paramVAEPrecision'
|
||||
| 'paramWidth'
|
||||
| 'patchmatchDownScaleSize'
|
||||
| 'rasterLayer'
|
||||
| 'refinerModel'
|
||||
| 'refinerNegativeAestheticScore'
|
||||
| 'refinerPositiveAestheticScore'
|
||||
@@ -53,6 +56,9 @@ export type Feature =
|
||||
| 'refinerStart'
|
||||
| 'refinerSteps'
|
||||
| 'refinerCfgScale'
|
||||
| 'regionalGuidance'
|
||||
| 'regionalGuidanceAndReferenceImage'
|
||||
| 'regionalReferenceImage'
|
||||
| 'scaleBeforeProcessing'
|
||||
| 'seamlessTilingXAxis'
|
||||
| 'seamlessTilingYAxis'
|
||||
@@ -76,6 +82,24 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
clipSkip: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings',
|
||||
},
|
||||
inpainting: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096702-inpainting-outpainting-and-bounding-box',
|
||||
},
|
||||
rasterLayer: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000094998-raster-layers-and-initial-images',
|
||||
},
|
||||
regionalGuidance: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000165024-regional-guidance-layers',
|
||||
},
|
||||
regionalGuidanceAndReferenceImage: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000165024-regional-guidance-layers',
|
||||
},
|
||||
globalReferenceImage: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159340-global-and-regional-reference-images-ip-adapters-',
|
||||
},
|
||||
regionalReferenceImage: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000159340-global-and-regional-reference-images-ip-adapters-',
|
||||
},
|
||||
controlNet: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000105880',
|
||||
},
|
||||
|
||||
@@ -127,8 +127,6 @@ export const buildUseDisclosure = (defaultIsOpen: boolean): [() => UseDisclosure
|
||||
*
|
||||
* Hook to manage a boolean state. Use this for a local boolean state.
|
||||
* @param defaultIsOpen Initial state of the disclosure
|
||||
*
|
||||
* @knipignore
|
||||
*/
|
||||
export const useDisclosure = (defaultIsOpen: boolean): UseDisclosure => {
|
||||
const [isOpen, set] = useState(defaultIsOpen);
|
||||
|
||||
@@ -16,6 +16,7 @@ type UseGroupedModelComboboxArg<T extends AnyModelConfig> = {
|
||||
getIsDisabled?: (model: T) => boolean;
|
||||
isLoading?: boolean;
|
||||
groupByType?: boolean;
|
||||
showDescriptions?: boolean;
|
||||
};
|
||||
|
||||
type UseGroupedModelComboboxReturn = {
|
||||
@@ -37,7 +38,15 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
): UseGroupedModelComboboxReturn => {
|
||||
const { t } = useTranslation();
|
||||
const base = useAppSelector(selectBaseWithSDXLFallback);
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
|
||||
const {
|
||||
modelConfigs,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
onChange,
|
||||
isLoading,
|
||||
groupByType = false,
|
||||
showDescriptions = false,
|
||||
} = arg;
|
||||
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
|
||||
if (!modelConfigs) {
|
||||
return [];
|
||||
@@ -51,6 +60,7 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
options: val.map((model) => ({
|
||||
label: model.name,
|
||||
value: model.key,
|
||||
description: (showDescriptions && model.description) || undefined,
|
||||
isDisabled: getIsDisabled ? getIsDisabled(model) : false,
|
||||
})),
|
||||
});
|
||||
@@ -60,7 +70,7 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
);
|
||||
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base) ? -1 : 1));
|
||||
return _options;
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base]);
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base, showDescriptions]);
|
||||
|
||||
const value = useMemo(
|
||||
() =>
|
||||
|
||||
161
invokeai/frontend/web/src/common/hooks/useSubMenu.tsx
Normal file
161
invokeai/frontend/web/src/common/hooks/useSubMenu.tsx
Normal file
@@ -0,0 +1,161 @@
|
||||
import type { MenuButtonProps, MenuItemProps, MenuListProps, MenuProps } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex, Icon, Text } from '@invoke-ai/ui-library';
|
||||
import { useDisclosure } from 'common/hooks/useBoolean';
|
||||
import type { FocusEventHandler, PointerEvent, RefObject } from 'react';
|
||||
import { useCallback, useEffect, useRef } from 'react';
|
||||
import { PiCaretRightBold } from 'react-icons/pi';
|
||||
import { useDebouncedCallback } from 'use-debounce';
|
||||
|
||||
const offset: [number, number] = [0, 8];
|
||||
|
||||
type UseSubMenuReturn = {
|
||||
parentMenuItemProps: Partial<MenuItemProps>;
|
||||
menuProps: Partial<MenuProps>;
|
||||
menuButtonProps: Partial<MenuButtonProps>;
|
||||
menuListProps: Partial<MenuListProps> & { ref: RefObject<HTMLDivElement> };
|
||||
};
|
||||
|
||||
/**
|
||||
* A hook that provides the necessary props to create a sub-menu within a menu.
|
||||
*
|
||||
* The sub-menu should be wrapped inside a parent `MenuItem` component.
|
||||
*
|
||||
* Use SubMenuButtonContent to render a button with a label and a right caret icon.
|
||||
*
|
||||
* TODO(psyche): Add keyboard handling for sub-menu.
|
||||
*
|
||||
* @example
|
||||
* ```tsx
|
||||
* const SubMenuExample = () => {
|
||||
* const subMenu = useSubMenu();
|
||||
* return (
|
||||
* <Menu>
|
||||
* <MenuButton>Open Parent Menu</MenuButton>
|
||||
* <MenuList>
|
||||
* <MenuItem>Parent Item 1</MenuItem>
|
||||
* <MenuItem>Parent Item 2</MenuItem>
|
||||
* <MenuItem>Parent Item 3</MenuItem>
|
||||
* <MenuItem {...subMenu.parentMenuItemProps} icon={<PiImageBold />}>
|
||||
* <Menu {...subMenu.menuProps}>
|
||||
* <MenuButton {...subMenu.menuButtonProps}>
|
||||
* <SubMenuButtonContent label="Open Sub Menu" />
|
||||
* </MenuButton>
|
||||
* <MenuList {...subMenu.menuListProps}>
|
||||
* <MenuItem>Sub Item 1</MenuItem>
|
||||
* <MenuItem>Sub Item 2</MenuItem>
|
||||
* <MenuItem>Sub Item 3</MenuItem>
|
||||
* </MenuList>
|
||||
* </Menu>
|
||||
* </MenuItem>
|
||||
* </MenuList>
|
||||
* </Menu>
|
||||
* );
|
||||
* };
|
||||
* ```
|
||||
*/
|
||||
export const useSubMenu = (): UseSubMenuReturn => {
|
||||
const subMenu = useDisclosure(false);
|
||||
const menuListRef = useRef<HTMLDivElement>(null);
|
||||
const closeDebounced = useDebouncedCallback(subMenu.close, 300);
|
||||
const openAndCancelPendingClose = useCallback(() => {
|
||||
closeDebounced.cancel();
|
||||
subMenu.open();
|
||||
}, [closeDebounced, subMenu]);
|
||||
const toggleAndCancelPendingClose = useCallback(() => {
|
||||
if (subMenu.isOpen) {
|
||||
subMenu.close();
|
||||
return;
|
||||
} else {
|
||||
closeDebounced.cancel();
|
||||
subMenu.toggle();
|
||||
}
|
||||
}, [closeDebounced, subMenu]);
|
||||
const onBlurMenuList = useCallback<FocusEventHandler<HTMLDivElement>>(
|
||||
(e) => {
|
||||
// Don't trigger blur if focus is moving to a child element - e.g. from a sub-menu item to another sub-menu item
|
||||
if (e.currentTarget.contains(e.relatedTarget)) {
|
||||
closeDebounced.cancel();
|
||||
return;
|
||||
}
|
||||
subMenu.close();
|
||||
},
|
||||
[closeDebounced, subMenu]
|
||||
);
|
||||
|
||||
const onParentMenuItemPointerLeave = useCallback(
|
||||
(e: PointerEvent<HTMLButtonElement>) => {
|
||||
/**
|
||||
* The pointerleave event is triggered when the pen or touch device is lifted, which would close the sub-menu.
|
||||
* However, we want to keep the sub-menu open until the pen or touch device pressed some other element. This
|
||||
* will be handled in the useEffect below - just ignore the pointerleave event for pen and touch devices.
|
||||
*/
|
||||
if (e.pointerType === 'pen' || e.pointerType === 'touch') {
|
||||
return;
|
||||
}
|
||||
subMenu.close();
|
||||
},
|
||||
[subMenu]
|
||||
);
|
||||
|
||||
/**
|
||||
* When using a mouse, the pointerleave events close the menu. But when using a pen or touch device, we need to close
|
||||
* the sub-menu when the user taps outside of the menu list. So we need to listen for clicks outside of the menu list
|
||||
* and close the menu accordingly.
|
||||
*/
|
||||
useEffect(() => {
|
||||
const el = menuListRef.current;
|
||||
if (!el) {
|
||||
return;
|
||||
}
|
||||
const controller = new AbortController();
|
||||
window.addEventListener(
|
||||
'click',
|
||||
(e) => {
|
||||
if (menuListRef.current?.contains(e.target as Node)) {
|
||||
return;
|
||||
}
|
||||
subMenu.close();
|
||||
},
|
||||
{ signal: controller.signal }
|
||||
);
|
||||
return () => {
|
||||
controller.abort();
|
||||
};
|
||||
}, [subMenu]);
|
||||
|
||||
return {
|
||||
parentMenuItemProps: {
|
||||
onClick: toggleAndCancelPendingClose,
|
||||
onPointerEnter: openAndCancelPendingClose,
|
||||
onPointerLeave: onParentMenuItemPointerLeave,
|
||||
closeOnSelect: false,
|
||||
},
|
||||
menuProps: {
|
||||
isOpen: subMenu.isOpen,
|
||||
onClose: subMenu.close,
|
||||
placement: 'right',
|
||||
offset: offset,
|
||||
closeOnBlur: false,
|
||||
},
|
||||
menuButtonProps: {
|
||||
as: Box,
|
||||
width: 'full',
|
||||
height: 'full',
|
||||
},
|
||||
menuListProps: {
|
||||
ref: menuListRef,
|
||||
onPointerEnter: openAndCancelPendingClose,
|
||||
onPointerLeave: closeDebounced,
|
||||
onBlur: onBlurMenuList,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
export const SubMenuButtonContent = ({ label }: { label: string }) => {
|
||||
return (
|
||||
<Flex w="full" h="full" flexDir="row" justifyContent="space-between" alignItems="center">
|
||||
<Text>{label}</Text>
|
||||
<Icon as={PiCaretRightBold} />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Button, Flex, Heading } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import {
|
||||
useAddControlLayer,
|
||||
useAddGlobalReferenceImage,
|
||||
@@ -28,69 +29,80 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
<Flex position="relative" flexDir="column" gap={4} top="20%">
|
||||
<Flex flexDir="column" justifyContent="flex-start" gap={2}>
|
||||
<Heading size="xs">{t('controlLayers.global')}</Heading>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addGlobalReferenceImage}
|
||||
>
|
||||
{t('controlLayers.globalReferenceImage')}
|
||||
</Button>
|
||||
<InformationalPopover feature="globalReferenceImage">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addGlobalReferenceImage}
|
||||
>
|
||||
{t('controlLayers.globalReferenceImage')}
|
||||
</Button>
|
||||
</InformationalPopover>
|
||||
</Flex>
|
||||
<Flex flexDir="column" gap={2}>
|
||||
<Heading size="xs">{t('controlLayers.regional')}</Heading>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addInpaintMask}
|
||||
>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</Button>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalGuidance}
|
||||
isDisabled={isFLUX}
|
||||
>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</Button>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalReferenceImage}
|
||||
isDisabled={isFLUX}
|
||||
>
|
||||
{t('controlLayers.regionalReferenceImage')}
|
||||
</Button>
|
||||
<InformationalPopover feature="inpainting">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addInpaintMask}
|
||||
>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</Button>
|
||||
</InformationalPopover>
|
||||
<InformationalPopover feature="regionalGuidance">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalGuidance}
|
||||
isDisabled={isFLUX}
|
||||
>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</Button>
|
||||
</InformationalPopover>
|
||||
<InformationalPopover feature="regionalReferenceImage">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalReferenceImage}
|
||||
isDisabled={isFLUX}
|
||||
>
|
||||
{t('controlLayers.regionalReferenceImage')}
|
||||
</Button>
|
||||
</InformationalPopover>
|
||||
</Flex>
|
||||
<Flex flexDir="column" justifyContent="flex-start" gap={2}>
|
||||
<Heading size="xs">{t('controlLayers.layer_other')}</Heading>
|
||||
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addControlLayer}
|
||||
>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</Button>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRasterLayer}
|
||||
>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</Button>
|
||||
<InformationalPopover feature="controlNet">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addControlLayer}
|
||||
>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</Button>
|
||||
</InformationalPopover>
|
||||
<InformationalPopover feature="rasterLayer">
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRasterLayer}
|
||||
>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</Button>
|
||||
</InformationalPopover>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { MenuGroup, MenuItem } from '@invoke-ai/ui-library';
|
||||
import { Menu, MenuButton, MenuGroup, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { CanvasContextMenuItemsCropCanvasToBbox } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuItemsCropCanvasToBbox';
|
||||
import { NewLayerIcon } from 'features/controlLayers/components/common/icons';
|
||||
import {
|
||||
@@ -16,6 +17,8 @@ import { PiFloppyDiskBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasContextMenuGlobalMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const saveSubMenu = useSubMenu();
|
||||
const newSubMenu = useSubMenu();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const saveCanvasToGallery = useSaveCanvasToGallery();
|
||||
const saveBboxToGallery = useSaveBboxToGallery();
|
||||
@@ -28,27 +31,41 @@ export const CanvasContextMenuGlobalMenuItems = memo(() => {
|
||||
<>
|
||||
<MenuGroup title={t('controlLayers.canvasContextMenu.canvasGroup')}>
|
||||
<CanvasContextMenuItemsCropCanvasToBbox />
|
||||
</MenuGroup>
|
||||
<MenuGroup title={t('controlLayers.canvasContextMenu.saveToGalleryGroup')}>
|
||||
<MenuItem icon={<PiFloppyDiskBold />} isDisabled={isBusy} onClick={saveCanvasToGallery}>
|
||||
{t('controlLayers.canvasContextMenu.saveCanvasToGallery')}
|
||||
<MenuItem {...saveSubMenu.parentMenuItemProps} icon={<PiFloppyDiskBold />}>
|
||||
<Menu {...saveSubMenu.menuProps}>
|
||||
<MenuButton {...saveSubMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.canvasContextMenu.saveToGalleryGroup')} />
|
||||
</MenuButton>
|
||||
<MenuList {...saveSubMenu.menuListProps}>
|
||||
<MenuItem icon={<PiFloppyDiskBold />} isDisabled={isBusy} onClick={saveCanvasToGallery}>
|
||||
{t('controlLayers.canvasContextMenu.saveCanvasToGallery')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFloppyDiskBold />} isDisabled={isBusy} onClick={saveBboxToGallery}>
|
||||
{t('controlLayers.canvasContextMenu.saveBboxToGallery')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFloppyDiskBold />} isDisabled={isBusy} onClick={saveBboxToGallery}>
|
||||
{t('controlLayers.canvasContextMenu.saveBboxToGallery')}
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
<MenuGroup title={t('controlLayers.canvasContextMenu.bboxGroup')}>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newGlobalReferenceImageFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newGlobalReferenceImage')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newRegionalReferenceImageFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newRegionalReferenceImage')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newControlLayerFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newControlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newRasterLayerFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newRasterLayer')}
|
||||
<MenuItem {...newSubMenu.parentMenuItemProps} icon={<NewLayerIcon />}>
|
||||
<Menu {...newSubMenu.menuProps}>
|
||||
<MenuButton {...newSubMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.canvasContextMenu.bboxGroup')} />
|
||||
</MenuButton>
|
||||
<MenuList {...newSubMenu.menuListProps}>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newGlobalReferenceImageFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newGlobalReferenceImage')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newRegionalReferenceImageFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newRegionalReferenceImage')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newControlLayerFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newControlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} isDisabled={isBusy} onClick={newRasterLayerFromBbox}>
|
||||
{t('controlLayers.canvasContextMenu.newRasterLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
</>
|
||||
|
||||
@@ -1,42 +1,40 @@
|
||||
import { MenuGroup } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/common/CanvasEntityMenuItemsFilter';
|
||||
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
|
||||
import { CanvasEntityMenuItemsSegment } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSegment';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { ControlLayerMenuItems } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItems';
|
||||
import { InpaintMaskMenuItems } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItems';
|
||||
import { IPAdapterMenuItems } from 'features/controlLayers/components/IPAdapter/IPAdapterMenuItems';
|
||||
import { RasterLayerMenuItems } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItems';
|
||||
import { RegionalGuidanceMenuItems } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItems';
|
||||
import {
|
||||
EntityIdentifierContext,
|
||||
useEntityIdentifierContext,
|
||||
} from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useEntityTitle } from 'features/controlLayers/hooks/useEntityTitle';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
isFilterableEntityIdentifier,
|
||||
isSaveableEntityIdentifier,
|
||||
isSegmentableEntityIdentifier,
|
||||
isTransformableEntityIdentifier,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const CanvasContextMenuSelectedEntityMenuItemsContent = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const title = useEntityTitle(entityIdentifier);
|
||||
|
||||
return (
|
||||
<MenuGroup title={title}>
|
||||
{isFilterableEntityIdentifier(entityIdentifier) && <CanvasEntityMenuItemsFilter />}
|
||||
{isTransformableEntityIdentifier(entityIdentifier) && <CanvasEntityMenuItemsTransform />}
|
||||
{isSegmentableEntityIdentifier(entityIdentifier) && <CanvasEntityMenuItemsSegment />}
|
||||
{isSaveableEntityIdentifier(entityIdentifier) && <CanvasEntityMenuItemsCopyToClipboard />}
|
||||
{isSaveableEntityIdentifier(entityIdentifier) && <CanvasEntityMenuItemsSave />}
|
||||
{isTransformableEntityIdentifier(entityIdentifier) && <CanvasEntityMenuItemsCropToBbox />}
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</MenuGroup>
|
||||
);
|
||||
if (entityIdentifier.type === 'raster_layer') {
|
||||
return <RasterLayerMenuItems />;
|
||||
}
|
||||
if (entityIdentifier.type === 'control_layer') {
|
||||
return <ControlLayerMenuItems />;
|
||||
}
|
||||
if (entityIdentifier.type === 'inpaint_mask') {
|
||||
return <InpaintMaskMenuItems />;
|
||||
}
|
||||
if (entityIdentifier.type === 'regional_guidance') {
|
||||
return <RegionalGuidanceMenuItems />;
|
||||
}
|
||||
if (entityIdentifier.type === 'reference_image') {
|
||||
return <IPAdapterMenuItems />;
|
||||
}
|
||||
|
||||
assert<Equals<typeof entityIdentifier.type, never>>(false);
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuItemsContent.displayName = 'CanvasContextMenuSelectedEntityMenuItemsContent';
|
||||
|
||||
export const CanvasContextMenuSelectedEntityMenuItems = memo(() => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { EntityListGlobalActionBarAddLayerMenu } from 'features/controlLayers/components/CanvasEntityList/EntityListGlobalActionBarAddLayerMenu';
|
||||
import { EntityListSelectedEntityActionBarAutoMaskButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarAutoMaskButton';
|
||||
import { EntityListSelectedEntityActionBarDuplicateButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarDuplicateButton';
|
||||
import { EntityListSelectedEntityActionBarFill } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFill';
|
||||
import { EntityListSelectedEntityActionBarFilterButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFilterButton';
|
||||
@@ -16,6 +17,7 @@ export const EntityListSelectedEntityActionBar = memo(() => {
|
||||
<Spacer />
|
||||
<EntityListSelectedEntityActionBarFill />
|
||||
<Flex h="full">
|
||||
<EntityListSelectedEntityActionBarAutoMaskButton />
|
||||
<EntityListSelectedEntityActionBarFilterButton />
|
||||
<EntityListSelectedEntityActionBarTransformButton />
|
||||
<EntityListSelectedEntityActionBarSaveToAssetsButton />
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntitySegmentAnything } from 'features/controlLayers/hooks/useEntitySegmentAnything';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { isSegmentableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiMaskHappyBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarAutoMaskButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const segment = useEntitySegmentAnything(selectedEntityIdentifier);
|
||||
|
||||
if (!selectedEntityIdentifier) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isSegmentableEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={segment.start}
|
||||
isDisabled={segment.isDisabled}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.segment.autoMask')}
|
||||
tooltip={t('controlLayers.segment.autoMask')}
|
||||
icon={<PiMaskHappyBold />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarAutoMaskButton.displayName = 'EntityListSelectedEntityActionBarAutoMaskButton';
|
||||
@@ -25,8 +25,8 @@ const MenuContent = () => {
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<MenuList>
|
||||
<CanvasContextMenuGlobalMenuItems />
|
||||
<CanvasContextMenuSelectedEntityMenuItems />
|
||||
<CanvasContextMenuGlobalMenuItems />
|
||||
</MenuList>
|
||||
</CanvasManagerProviderGate>
|
||||
);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
@@ -9,7 +8,8 @@ import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/c
|
||||
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
|
||||
import { CanvasEntityMenuItemsSegment } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSegment';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { ControlLayerMenuItemsConvertControlToRaster } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsConvertControlToRaster';
|
||||
import { ControlLayerMenuItemsConvertToSubMenu } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsConvertToSubMenu';
|
||||
import { ControlLayerMenuItemsCopyToSubMenu } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsCopyToSubMenu';
|
||||
import { ControlLayerMenuItemsTransparencyEffect } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsTransparencyEffect';
|
||||
import { memo } from 'react';
|
||||
|
||||
@@ -25,12 +25,13 @@ export const ControlLayerMenuItems = memo(() => {
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<CanvasEntityMenuItemsFilter />
|
||||
<CanvasEntityMenuItemsSegment />
|
||||
<ControlLayerMenuItemsConvertControlToRaster />
|
||||
<ControlLayerMenuItemsTransparencyEffect />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<CanvasEntityMenuItemsSave />
|
||||
<MenuDivider />
|
||||
<ControlLayerMenuItemsConvertToSubMenu />
|
||||
<ControlLayerMenuItemsCopyToSubMenu />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { controlLayerConvertedToRasterLayer } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiLightningBold } from 'react-icons/pi';
|
||||
|
||||
export const ControlLayerMenuItemsConvertControlToRaster = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const convertControlLayerToRasterLayer = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToRasterLayer({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={convertControlLayerToRasterLayer} icon={<PiLightningBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.convertToRasterLayer')}
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayerMenuItemsConvertControlToRaster.displayName = 'ControlLayerMenuItemsConvertControlToRaster';
|
||||
@@ -0,0 +1,56 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import {
|
||||
controlLayerConvertedToInpaintMask,
|
||||
controlLayerConvertedToRasterLayer,
|
||||
controlLayerConvertedToRegionalGuidance,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiSwapBold } from 'react-icons/pi';
|
||||
|
||||
export const ControlLayerMenuItemsConvertToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const convertToInpaintMask = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToInpaintMask({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const convertToRegionalGuidance = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToRegionalGuidance({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const convertToRasterLayer = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToRasterLayer({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiSwapBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.convertControlLayerTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem onClick={convertToInpaintMask} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={convertToRegionalGuidance} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={convertToRasterLayer} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayerMenuItemsConvertToSubMenu.displayName = 'ControlLayerMenuItemsConvertToSubMenu';
|
||||
@@ -0,0 +1,58 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import {
|
||||
controlLayerConvertedToInpaintMask,
|
||||
controlLayerConvertedToRasterLayer,
|
||||
controlLayerConvertedToRegionalGuidance,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold } from 'react-icons/pi';
|
||||
|
||||
export const ControlLayerMenuItemsCopyToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const copyToInpaintMask = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToInpaintMask({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const copyToRegionalGuidance = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToRegionalGuidance({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const copyToRasterLayer = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToRasterLayer({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiCopyBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.copyControlLayerTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<MenuItem onClick={copyToInpaintMask} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newInpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={copyToRegionalGuidance} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newRegionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={copyToRasterLayer} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newRasterLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayerMenuItemsCopyToSubMenu.displayName = 'ControlLayerMenuItemsCopyToSubMenu';
|
||||
@@ -0,0 +1,22 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoGlobalReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold } from 'react-icons/pi';
|
||||
|
||||
export const IPAdapterMenuItemPullBbox = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
const pullBboxIntoIPAdapter = usePullBboxIntoGlobalReferenceImage(entityIdentifier);
|
||||
const isBusy = useCanvasIsBusy();
|
||||
|
||||
return (
|
||||
<MenuItem onClick={pullBboxIntoIPAdapter} icon={<PiBoundingBoxBold />} isDisabled={isBusy}>
|
||||
{t('controlLayers.pullBboxIntoReferenceImage')}
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterMenuItemPullBbox.displayName = 'IPAdapterMenuItemPullBbox';
|
||||
@@ -1,16 +1,22 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
import { IPAdapterMenuItemPullBbox } from 'features/controlLayers/components/IPAdapter/IPAdapterMenuItemPullBbox';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const IPAdapterMenuItems = memo(() => {
|
||||
return (
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<>
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<IPAdapterMenuItemPullBbox />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/componen
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { InpaintMaskMenuItemsConvertToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsConvertToSubMenu';
|
||||
import { InpaintMaskMenuItemsCopyToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsCopyToSubMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const InpaintMaskMenuItems = memo(() => {
|
||||
@@ -19,6 +21,9 @@ export const InpaintMaskMenuItems = memo(() => {
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<MenuDivider />
|
||||
<InpaintMaskMenuItemsConvertToSubMenu />
|
||||
<InpaintMaskMenuItemsCopyToSubMenu />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { inpaintMaskConvertedToRegionalGuidance } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiSwapBold } from 'react-icons/pi';
|
||||
|
||||
export const InpaintMaskMenuItemsConvertToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const convertToRegionalGuidance = useCallback(() => {
|
||||
dispatch(inpaintMaskConvertedToRegionalGuidance({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiSwapBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.convertInpaintMaskTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem onClick={convertToRegionalGuidance} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskMenuItemsConvertToSubMenu.displayName = 'InpaintMaskMenuItemsConvertToSubMenu';
|
||||
@@ -0,0 +1,40 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { inpaintMaskConvertedToRegionalGuidance } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold } from 'react-icons/pi';
|
||||
|
||||
export const InpaintMaskMenuItemsCopyToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const copyToRegionalGuidance = useCallback(() => {
|
||||
dispatch(inpaintMaskConvertedToRegionalGuidance({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiCopyBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.copyInpaintMaskTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<MenuItem onClick={copyToRegionalGuidance} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newRegionalGuidance')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskMenuItemsCopyToSubMenu.displayName = 'InpaintMaskMenuItemsCopyToSubMenu';
|
||||
@@ -1,7 +1,6 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
@@ -9,7 +8,8 @@ import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/c
|
||||
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
|
||||
import { CanvasEntityMenuItemsSegment } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSegment';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { RasterLayerMenuItemsConvertRasterToControl } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsConvertRasterToControl';
|
||||
import { RasterLayerMenuItemsConvertToSubMenu } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsConvertToSubMenu';
|
||||
import { RasterLayerMenuItemsCopyToSubMenu } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsCopyToSubMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const RasterLayerMenuItems = memo(() => {
|
||||
@@ -24,11 +24,12 @@ export const RasterLayerMenuItems = memo(() => {
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<CanvasEntityMenuItemsFilter />
|
||||
<CanvasEntityMenuItemsSegment />
|
||||
<RasterLayerMenuItemsConvertRasterToControl />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<CanvasEntityMenuItemsSave />
|
||||
<MenuDivider />
|
||||
<RasterLayerMenuItemsConvertToSubMenu />
|
||||
<RasterLayerMenuItemsCopyToSubMenu />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectDefaultControlAdapter } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { rasterLayerConvertedToControlLayer } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiLightningBold } from 'react-icons/pi';
|
||||
|
||||
export const RasterLayerMenuItemsConvertRasterToControl = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('raster_layer');
|
||||
const defaultControlAdapter = useAppSelector(selectDefaultControlAdapter);
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(
|
||||
rasterLayerConvertedToControlLayer({
|
||||
entityIdentifier,
|
||||
overrides: {
|
||||
controlAdapter: defaultControlAdapter,
|
||||
},
|
||||
})
|
||||
);
|
||||
}, [defaultControlAdapter, dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={onClick} icon={<PiLightningBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.convertToControlLayer')}
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
RasterLayerMenuItemsConvertRasterToControl.displayName = 'RasterLayerMenuItemsConvertRasterToControl';
|
||||
@@ -0,0 +1,65 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectDefaultControlAdapter } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import {
|
||||
rasterLayerConvertedToControlLayer,
|
||||
rasterLayerConvertedToInpaintMask,
|
||||
rasterLayerConvertedToRegionalGuidance,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiSwapBold } from 'react-icons/pi';
|
||||
|
||||
export const RasterLayerMenuItemsConvertToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('raster_layer');
|
||||
const defaultControlAdapter = useAppSelector(selectDefaultControlAdapter);
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const convertToInpaintMask = useCallback(() => {
|
||||
dispatch(rasterLayerConvertedToInpaintMask({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const convertToRegionalGuidance = useCallback(() => {
|
||||
dispatch(rasterLayerConvertedToRegionalGuidance({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const convertToControlLayer = useCallback(() => {
|
||||
dispatch(
|
||||
rasterLayerConvertedToControlLayer({
|
||||
entityIdentifier,
|
||||
replace: true,
|
||||
overrides: { controlAdapter: defaultControlAdapter },
|
||||
})
|
||||
);
|
||||
}, [defaultControlAdapter, dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiSwapBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.convertRasterLayerTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem onClick={convertToInpaintMask} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={convertToRegionalGuidance} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={convertToControlLayer} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
RasterLayerMenuItemsConvertToSubMenu.displayName = 'RasterLayerMenuItemsConvertToSubMenu';
|
||||
@@ -0,0 +1,66 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectDefaultControlAdapter } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import {
|
||||
rasterLayerConvertedToControlLayer,
|
||||
rasterLayerConvertedToInpaintMask,
|
||||
rasterLayerConvertedToRegionalGuidance,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold } from 'react-icons/pi';
|
||||
|
||||
export const RasterLayerMenuItemsCopyToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('raster_layer');
|
||||
const defaultControlAdapter = useAppSelector(selectDefaultControlAdapter);
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const copyToInpaintMask = useCallback(() => {
|
||||
dispatch(rasterLayerConvertedToInpaintMask({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const copyToRegionalGuidance = useCallback(() => {
|
||||
dispatch(rasterLayerConvertedToRegionalGuidance({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
const copyToControlLayer = useCallback(() => {
|
||||
dispatch(
|
||||
rasterLayerConvertedToControlLayer({
|
||||
entityIdentifier,
|
||||
overrides: { controlAdapter: defaultControlAdapter },
|
||||
})
|
||||
);
|
||||
}, [defaultControlAdapter, dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiCopyBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.copyRasterLayerTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<MenuItem onClick={copyToInpaintMask} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newInpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={copyToRegionalGuidance} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newRegionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={copyToControlLayer} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newControlLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
RasterLayerMenuItemsCopyToSubMenu.displayName = 'RasterLayerMenuItemsCopyToSubMenu';
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Flex, MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
@@ -6,16 +7,18 @@ import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/component
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { RegionalGuidanceMenuItemsAddPromptsAndIPAdapter } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItemsAddPromptsAndIPAdapter';
|
||||
import { RegionalGuidanceMenuItemsAutoNegative } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItemsAutoNegative';
|
||||
import { RegionalGuidanceMenuItemsConvertToSubMenu } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItemsConvertToSubMenu';
|
||||
import { RegionalGuidanceMenuItemsCopyToSubMenu } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItemsCopyToSubMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const RegionalGuidanceMenuItems = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<Flex gap={2}>
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</Flex>
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<RegionalGuidanceMenuItemsAddPromptsAndIPAdapter />
|
||||
<MenuDivider />
|
||||
@@ -23,6 +26,9 @@ export const RegionalGuidanceMenuItems = memo(() => {
|
||||
<RegionalGuidanceMenuItemsAutoNegative />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<MenuDivider />
|
||||
<RegionalGuidanceMenuItemsConvertToSubMenu />
|
||||
<RegionalGuidanceMenuItemsCopyToSubMenu />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { rgConvertedToInpaintMask } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiSwapBold } from 'react-icons/pi';
|
||||
|
||||
export const RegionalGuidanceMenuItemsConvertToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const convertToInpaintMask = useCallback(() => {
|
||||
dispatch(rgConvertedToInpaintMask({ entityIdentifier, replace: true }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiSwapBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.convertRegionalGuidanceTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem onClick={convertToInpaintMask} icon={<PiSwapBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceMenuItemsConvertToSubMenu.displayName = 'RegionalGuidanceMenuItemsConvertToSubMenu';
|
||||
@@ -0,0 +1,40 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { rgConvertedToInpaintMask } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold } from 'react-icons/pi';
|
||||
|
||||
export const RegionalGuidanceMenuItemsCopyToSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const isInteractable = useIsEntityInteractable(entityIdentifier);
|
||||
|
||||
const copyToInpaintMask = useCallback(() => {
|
||||
dispatch(rgConvertedToInpaintMask({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiCopyBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.copyRegionalGuidanceTo')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<MenuItem onClick={copyToInpaintMask} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.newInpaintMask')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceMenuItemsCopyToSubMenu.displayName = 'RegionalGuidanceMenuItemsCopyToSubMenu';
|
||||
@@ -1,4 +1,14 @@
|
||||
import { Button, ButtonGroup, Flex, Heading, Spacer } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
Button,
|
||||
ButtonGroup,
|
||||
Flex,
|
||||
Heading,
|
||||
Menu,
|
||||
MenuButton,
|
||||
MenuItem,
|
||||
MenuList,
|
||||
Spacer,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
|
||||
@@ -10,9 +20,9 @@ import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/kon
|
||||
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
|
||||
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { memo, useRef } from 'react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsCounterClockwiseBold, PiCheckBold, PiStarBold, PiXBold } from 'react-icons/pi';
|
||||
import { PiArrowsCounterClockwiseBold, PiFloppyDiskBold, PiStarBold, PiXBold } from 'react-icons/pi';
|
||||
|
||||
const SegmentAnythingContent = memo(
|
||||
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
|
||||
@@ -22,8 +32,25 @@ const SegmentAnythingContent = memo(
|
||||
const isCanvasFocused = useIsRegionFocused('canvas');
|
||||
const isProcessing = useStore(adapter.segmentAnything.$isProcessing);
|
||||
const hasPoints = useStore(adapter.segmentAnything.$hasPoints);
|
||||
const hasImageState = useStore(adapter.segmentAnything.$hasImageState);
|
||||
const autoProcess = useAppSelector(selectAutoProcess);
|
||||
|
||||
const saveAsInpaintMask = useCallback(() => {
|
||||
adapter.segmentAnything.saveAs('inpaint_mask');
|
||||
}, [adapter.segmentAnything]);
|
||||
|
||||
const saveAsRegionalGuidance = useCallback(() => {
|
||||
adapter.segmentAnything.saveAs('regional_guidance');
|
||||
}, [adapter.segmentAnything]);
|
||||
|
||||
const saveAsRasterLayer = useCallback(() => {
|
||||
adapter.segmentAnything.saveAs('raster_layer');
|
||||
}, [adapter.segmentAnything]);
|
||||
|
||||
const saveAsControlLayer = useCallback(() => {
|
||||
adapter.segmentAnything.saveAs('control_layer');
|
||||
}, [adapter.segmentAnything]);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'applySegmentAnything',
|
||||
category: 'canvas',
|
||||
@@ -86,15 +113,32 @@ const SegmentAnythingContent = memo(
|
||||
>
|
||||
{t('controlLayers.segment.reset')}
|
||||
</Button>
|
||||
<Button
|
||||
leftIcon={<PiCheckBold />}
|
||||
onClick={adapter.segmentAnything.apply}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.segment.apply')}
|
||||
variant="ghost"
|
||||
>
|
||||
{t('controlLayers.segment.apply')}
|
||||
</Button>
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={Button}
|
||||
leftIcon={<PiFloppyDiskBold />}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.segment.saveAs')}
|
||||
variant="ghost"
|
||||
isDisabled={!hasImageState}
|
||||
>
|
||||
{t('controlLayers.segment.saveAs')}
|
||||
</MenuButton>
|
||||
<MenuList>
|
||||
<MenuItem isDisabled={!hasImageState} onClick={saveAsInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem isDisabled={!hasImageState} onClick={saveAsRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem isDisabled={!hasImageState} onClick={saveAsControlLayer}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem isDisabled={!hasImageState} onClick={saveAsRasterLayer}>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
<Button
|
||||
leftIcon={<PiXBold />}
|
||||
onClick={adapter.segmentAnything.cancel}
|
||||
|
||||
@@ -26,13 +26,10 @@ export const SegmentAnythingPointType = memo(
|
||||
<RadioGroup value={pointType} onChange={onChange} w="full" size="md">
|
||||
<Flex alignItems="center" w="full" gap={4} fontWeight="semibold" color="base.300">
|
||||
<Radio value="foreground">
|
||||
<Text>{t('controlLayers.segment.foreground')}</Text>
|
||||
<Text>{t('controlLayers.segment.include')}</Text>
|
||||
</Radio>
|
||||
<Radio value="background">
|
||||
<Text>{t('controlLayers.segment.background')}</Text>
|
||||
</Radio>
|
||||
<Radio value="neutral">
|
||||
<Text>{t('controlLayers.segment.neutral')}</Text>
|
||||
<Text>{t('controlLayers.segment.exclude')}</Text>
|
||||
</Radio>
|
||||
</Flex>
|
||||
</RadioGroup>
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Button, Collapse, Flex, Icon, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useBoolean } from 'common/hooks/useBoolean';
|
||||
import { CanvasEntityAddOfTypeButton } from 'features/controlLayers/components/common/CanvasEntityAddOfTypeButton';
|
||||
import { CanvasEntityMergeVisibleButton } from 'features/controlLayers/components/common/CanvasEntityMergeVisibleButton';
|
||||
import { CanvasEntityTypeIsHiddenToggle } from 'features/controlLayers/components/common/CanvasEntityTypeIsHiddenToggle';
|
||||
import { useEntityTypeInformationalPopover } from 'features/controlLayers/hooks/useEntityTypeInformationalPopover';
|
||||
import { useEntityTypeTitle } from 'features/controlLayers/hooks/useEntityTypeTitle';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
@@ -21,6 +23,7 @@ const _hover: SystemStyleObject = {
|
||||
|
||||
export const CanvasEntityGroupList = memo(({ isSelected, type, children }: Props) => {
|
||||
const title = useEntityTypeTitle(type);
|
||||
const informationalPopoverFeature = useEntityTypeInformationalPopover(type);
|
||||
const collapse = useBoolean(true);
|
||||
const canMergeVisible = useMemo(() => type === 'raster_layer' || type === 'inpaint_mask', [type]);
|
||||
const canHideAll = useMemo(() => type !== 'reference_image', [type]);
|
||||
@@ -47,15 +50,30 @@ export const CanvasEntityGroupList = memo(({ isSelected, type, children }: Props
|
||||
transitionProperty="common"
|
||||
transitionDuration="fast"
|
||||
/>
|
||||
<Text
|
||||
fontWeight="semibold"
|
||||
color={isSelected ? 'base.200' : 'base.500'}
|
||||
userSelect="none"
|
||||
transitionProperty="common"
|
||||
transitionDuration="fast"
|
||||
>
|
||||
{title}
|
||||
</Text>
|
||||
{informationalPopoverFeature ? (
|
||||
<InformationalPopover feature={informationalPopoverFeature}>
|
||||
<Text
|
||||
fontWeight="semibold"
|
||||
color={isSelected ? 'base.200' : 'base.500'}
|
||||
userSelect="none"
|
||||
transitionProperty="common"
|
||||
transitionDuration="fast"
|
||||
>
|
||||
{title}
|
||||
</Text>
|
||||
</InformationalPopover>
|
||||
) : (
|
||||
<Text
|
||||
fontWeight="semibold"
|
||||
color={isSelected ? 'base.200' : 'base.500'}
|
||||
userSelect="none"
|
||||
transitionProperty="common"
|
||||
transitionDuration="fast"
|
||||
>
|
||||
{title}
|
||||
</Text>
|
||||
)}
|
||||
|
||||
<Spacer />
|
||||
</Flex>
|
||||
{canMergeVisible && <CanvasEntityMergeVisibleButton type={type} />}
|
||||
|
||||
@@ -20,7 +20,7 @@ export const CanvasEntityMenuItemsCopyToClipboard = memo(() => {
|
||||
|
||||
return (
|
||||
<MenuItem onClick={onClick} icon={<PiCopyBold />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.copyToClipboard')}
|
||||
{t('common.clipboard')}
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -5,11 +5,13 @@ import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdap
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { isFilterableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
|
||||
export const useEntityFilter = (entityIdentifier: CanvasEntityIdentifier | null) => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const adapter = useEntityAdapterSafe(entityIdentifier);
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const isInteractable = useStore(adapter?.$isInteractable ?? $false);
|
||||
const isEmpty = useStore(adapter?.$isEmpty ?? $false);
|
||||
@@ -50,8 +52,9 @@ export const useEntityFilter = (entityIdentifier: CanvasEntityIdentifier | null)
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
imageViewer.close();
|
||||
adapter.filterer.start();
|
||||
}, [isDisabled, entityIdentifier, canvasManager]);
|
||||
}, [isDisabled, entityIdentifier, canvasManager, imageViewer]);
|
||||
|
||||
return { isDisabled, start } as const;
|
||||
};
|
||||
|
||||
@@ -5,11 +5,13 @@ import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdap
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { isSegmentableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
|
||||
export const useEntitySegmentAnything = (entityIdentifier: CanvasEntityIdentifier | null) => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const adapter = useEntityAdapterSafe(entityIdentifier);
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const isInteractable = useStore(adapter?.$isInteractable ?? $false);
|
||||
const isEmpty = useStore(adapter?.$isEmpty ?? $false);
|
||||
@@ -50,8 +52,9 @@ export const useEntitySegmentAnything = (entityIdentifier: CanvasEntityIdentifie
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
imageViewer.close();
|
||||
adapter.segmentAnything.start();
|
||||
}, [isDisabled, entityIdentifier, canvasManager]);
|
||||
}, [isDisabled, entityIdentifier, canvasManager, imageViewer]);
|
||||
|
||||
return { isDisabled, start } as const;
|
||||
};
|
||||
|
||||
@@ -5,11 +5,13 @@ import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdap
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { isTransformableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
|
||||
export const useEntityTransform = (entityIdentifier: CanvasEntityIdentifier | null) => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const adapter = useEntityAdapterSafe(entityIdentifier);
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const isInteractable = useStore(adapter?.$isInteractable ?? $false);
|
||||
const isEmpty = useStore(adapter?.$isEmpty ?? $false);
|
||||
@@ -67,10 +69,11 @@ export const useEntityTransform = (entityIdentifier: CanvasEntityIdentifier | nu
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
imageViewer.close();
|
||||
await adapter.transformer.startTransform({ silent: true });
|
||||
adapter.transformer.fitToBboxContain();
|
||||
await adapter.transformer.applyTransform();
|
||||
}, [canvasManager, entityIdentifier, isDisabled]);
|
||||
}, [canvasManager, entityIdentifier, imageViewer, isDisabled]);
|
||||
|
||||
return { isDisabled, start, fitToBbox } as const;
|
||||
};
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
import type { Feature } from 'common/components/InformationalPopover/constants';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { useMemo } from 'react';
|
||||
|
||||
export const useEntityTypeInformationalPopover = (type: CanvasEntityIdentifier['type']): Feature | undefined => {
|
||||
const feature = useMemo(() => {
|
||||
switch (type) {
|
||||
case 'control_layer':
|
||||
return 'controlNet';
|
||||
case 'inpaint_mask':
|
||||
return 'inpainting';
|
||||
case 'raster_layer':
|
||||
return 'rasterLayer';
|
||||
case 'regional_guidance':
|
||||
return 'regionalGuidanceAndReferenceImage';
|
||||
case 'reference_image':
|
||||
return 'globalReferenceImage';
|
||||
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
}, [type]);
|
||||
|
||||
return feature;
|
||||
};
|
||||
@@ -6,15 +6,22 @@ import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konv
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import { CanvasObjectImage } from 'features/controlLayers/konva/CanvasObject/CanvasObjectImage';
|
||||
import { addCoords, getKonvaNodeDebugAttrs, getPrefixedId, offsetCoord } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
addCoords,
|
||||
getKonvaNodeDebugAttrs,
|
||||
getPrefixedId,
|
||||
offsetCoord,
|
||||
roundCoord,
|
||||
} from 'features/controlLayers/konva/util';
|
||||
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import type {
|
||||
CanvasEntityType,
|
||||
CanvasImageState,
|
||||
Coordinate,
|
||||
RgbaColor,
|
||||
SAMPoint,
|
||||
SAMPointLabel,
|
||||
SAMPointLabelString,
|
||||
SAMPointWithId,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { SAM_POINT_LABEL_NUMBER_TO_STRING } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
@@ -27,6 +34,9 @@ import { atom, computed } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import stableHash from 'stable-hash';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type CanvasSegmentAnythingModuleConfig = {
|
||||
/**
|
||||
@@ -70,7 +80,7 @@ const DEFAULT_CONFIG: CanvasSegmentAnythingModuleConfig = {
|
||||
SAM_POINT_FOREGROUND_COLOR: { r: 50, g: 255, b: 0, a: 1 }, // light green
|
||||
SAM_POINT_BACKGROUND_COLOR: { r: 255, g: 0, b: 50, a: 1 }, // red-ish
|
||||
SAM_POINT_NEUTRAL_COLOR: { r: 0, g: 225, b: 255, a: 1 }, // cyan
|
||||
MASK_COLOR: { r: 0, g: 200, b: 200, a: 0.5 }, // cyan with 50% opacity
|
||||
MASK_COLOR: { r: 0, g: 225, b: 255, a: 1 }, // cyan
|
||||
PROCESS_DEBOUNCE_MS: 1000,
|
||||
};
|
||||
|
||||
@@ -85,6 +95,7 @@ const DEFAULT_CONFIG: CanvasSegmentAnythingModuleConfig = {
|
||||
type SAMPointState = {
|
||||
id: string;
|
||||
label: SAMPointLabel;
|
||||
coord: Coordinate;
|
||||
konva: {
|
||||
circle: Konva.Circle;
|
||||
};
|
||||
@@ -113,9 +124,9 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
$isSegmenting = atom<boolean>(false);
|
||||
|
||||
/**
|
||||
* Whether the current set of points has been processed.
|
||||
* The hash of the last processed points. This is used to prevent re-processing the same points.
|
||||
*/
|
||||
$hasProcessed = atom<boolean>(false);
|
||||
$lastProcessedHash = atom<string>('');
|
||||
|
||||
/**
|
||||
* Whether the module is currently processing the points.
|
||||
@@ -144,10 +155,15 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
/**
|
||||
* The ephemeral image state of the processed image. Only used while segmenting.
|
||||
*/
|
||||
imageState: CanvasImageState | null = null;
|
||||
$imageState = atom<CanvasImageState | null>(null);
|
||||
|
||||
/**
|
||||
* The current input points.
|
||||
* Whether the module has an image state. This is a computed value based on $imageState.
|
||||
*/
|
||||
$hasImageState = computed(this.$imageState, (imageState) => imageState !== null);
|
||||
|
||||
/**
|
||||
* The current input points. A listener is added to this atom to process the points when they change.
|
||||
*/
|
||||
$points = atom<SAMPointState[]>([]);
|
||||
|
||||
@@ -187,6 +203,10 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
* It's rendered with a globalCompositeOperation of 'source-atop' to preview the mask as a semi-transparent overlay.
|
||||
*/
|
||||
compositingRect: Konva.Rect;
|
||||
/**
|
||||
* A tween for pulsing the mask group's opacity.
|
||||
*/
|
||||
maskTween: Konva.Tween | null;
|
||||
};
|
||||
|
||||
KONVA_CIRCLE_NAME = `${this.type}:circle`;
|
||||
@@ -209,7 +229,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
this.konva = {
|
||||
group: new Konva.Group({ name: this.KONVA_GROUP_NAME }),
|
||||
pointGroup: new Konva.Group({ name: this.KONVA_POINT_GROUP_NAME }),
|
||||
maskGroup: new Konva.Group({ name: this.KONVA_MASK_GROUP_NAME }),
|
||||
maskGroup: new Konva.Group({ name: this.KONVA_MASK_GROUP_NAME, opacity: 0.6 }),
|
||||
compositingRect: new Konva.Rect({
|
||||
name: this.KONVA_COMPOSITING_RECT_NAME,
|
||||
fill: rgbaColorToString(this.config.MASK_COLOR),
|
||||
@@ -219,6 +239,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
perfectDrawEnabled: false,
|
||||
visible: false,
|
||||
}),
|
||||
maskTween: null,
|
||||
};
|
||||
|
||||
// Points should always be rendered above the mask group
|
||||
@@ -250,10 +271,12 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
createPoint(coord: Coordinate, label: SAMPointLabel): SAMPointState {
|
||||
const id = getPrefixedId('sam_point');
|
||||
|
||||
const roundedCoord = roundCoord(coord);
|
||||
|
||||
const circle = new Konva.Circle({
|
||||
name: this.KONVA_CIRCLE_NAME,
|
||||
x: Math.round(coord.x),
|
||||
y: Math.round(coord.y),
|
||||
x: roundedCoord.x,
|
||||
y: roundedCoord.y,
|
||||
radius: this.manager.stage.unscale(this.config.SAM_POINT_RADIUS), // We will scale this as the stage scale changes
|
||||
fill: rgbaColorToString(this.getSAMPointColor(label)),
|
||||
stroke: rgbaColorToString(this.config.SAM_POINT_BORDER_COLOR),
|
||||
@@ -273,11 +296,12 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
// This event should not bubble up to the parent, stage or any other nodes
|
||||
e.cancelBubble = true;
|
||||
circle.destroy();
|
||||
this.$points.set(this.$points.get().filter((point) => point.id !== id));
|
||||
if (this.$points.get().length === 0) {
|
||||
|
||||
const newPoints = this.$points.get().filter((point) => point.id !== id);
|
||||
if (newPoints.length === 0) {
|
||||
this.resetEphemeralState();
|
||||
} else {
|
||||
this.$hasProcessed.set(false);
|
||||
this.$points.set(newPoints);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -286,25 +310,28 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
});
|
||||
|
||||
circle.on('dragend', () => {
|
||||
const roundedCoord = roundCoord(circle.position());
|
||||
|
||||
this.log.trace({ ...roundedCoord, label: SAM_POINT_LABEL_NUMBER_TO_STRING[label] }, 'Moved SAM point');
|
||||
this.$isDraggingPoint.set(false);
|
||||
// Point has changed!
|
||||
this.$hasProcessed.set(false);
|
||||
this.$points.notify();
|
||||
this.log.trace(
|
||||
{ x: Math.round(circle.x()), y: Math.round(circle.y()), label: SAM_POINT_LABEL_NUMBER_TO_STRING[label] },
|
||||
'Moved SAM point'
|
||||
);
|
||||
|
||||
const newPoints = this.$points.get().map((point) => {
|
||||
if (point.id === id) {
|
||||
return { ...point, coord: roundedCoord };
|
||||
}
|
||||
return point;
|
||||
});
|
||||
|
||||
this.$points.set(newPoints);
|
||||
});
|
||||
|
||||
this.konva.pointGroup.add(circle);
|
||||
|
||||
this.log.trace(
|
||||
{ x: Math.round(circle.x()), y: Math.round(circle.y()), label: SAM_POINT_LABEL_NUMBER_TO_STRING[label] },
|
||||
'Created SAM point'
|
||||
);
|
||||
this.log.trace({ ...roundedCoord, label: SAM_POINT_LABEL_NUMBER_TO_STRING[label] }, 'Created SAM point');
|
||||
|
||||
return {
|
||||
id,
|
||||
coord: roundedCoord,
|
||||
label,
|
||||
konva: { circle },
|
||||
};
|
||||
@@ -327,14 +354,14 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
/**
|
||||
* Gets the SAM points in the format expected by the segment-anything API. The x and y values are rounded to integers.
|
||||
*/
|
||||
getSAMPoints = (): SAMPoint[] => {
|
||||
const points: SAMPoint[] = [];
|
||||
getSAMPoints = (): SAMPointWithId[] => {
|
||||
const points: SAMPointWithId[] = [];
|
||||
|
||||
for (const { konva, label } of this.$points.get()) {
|
||||
for (const { id, coord, label } of this.$points.get()) {
|
||||
points.push({
|
||||
// Pull out and round the x and y values from Konva
|
||||
x: Math.round(konva.circle.x()),
|
||||
y: Math.round(konva.circle.y()),
|
||||
id,
|
||||
x: coord.x,
|
||||
y: coord.y,
|
||||
label,
|
||||
});
|
||||
}
|
||||
@@ -381,10 +408,8 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
|
||||
// Create a SAM point at the normalized position
|
||||
const point = this.createPoint(normalizedPoint, this.$pointType.get());
|
||||
this.$points.set([...this.$points.get(), point]);
|
||||
|
||||
// Mark the module as having _not_ processed the points now that they have changed
|
||||
this.$hasProcessed.set(false);
|
||||
const newPoints = [...this.$points.get(), point];
|
||||
this.$points.set(newPoints);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -421,6 +446,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
if (points.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.manager.stateApi.getSettings().autoProcess) {
|
||||
this.process();
|
||||
}
|
||||
@@ -433,7 +459,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
if (this.$points.get().length === 0) {
|
||||
return;
|
||||
}
|
||||
if (autoProcess && !this.$hasProcessed.get()) {
|
||||
if (autoProcess) {
|
||||
this.process();
|
||||
}
|
||||
})
|
||||
@@ -500,6 +526,12 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
return;
|
||||
}
|
||||
|
||||
const hash = stableHash(points);
|
||||
if (hash === this.$lastProcessedHash.get()) {
|
||||
this.log.trace('Already processed points');
|
||||
return;
|
||||
}
|
||||
|
||||
this.$isProcessing.set(true);
|
||||
|
||||
this.log.trace({ points }, 'Segmenting');
|
||||
@@ -521,7 +553,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
this.abortController = controller;
|
||||
|
||||
// Build the graph for segmenting the image, using the rasterized image DTO
|
||||
const { graph, outputNodeId } = this.buildGraph(rasterizeResult.value);
|
||||
const { graph, outputNodeId } = this.buildGraph(rasterizeResult.value, points);
|
||||
|
||||
// Run the graph and get the segmented image output
|
||||
const segmentResult = await withResultAsync(() =>
|
||||
@@ -548,21 +580,27 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
this.log.trace({ imageDTO: segmentResult.value }, 'Segmented');
|
||||
|
||||
// Prepare the ephemeral image state
|
||||
this.imageState = imageDTOToImageObject(segmentResult.value);
|
||||
const imageState = imageDTOToImageObject(segmentResult.value);
|
||||
this.$imageState.set(imageState);
|
||||
|
||||
// Destroy any existing masked image and create a new one
|
||||
if (this.maskedImage) {
|
||||
this.maskedImage.destroy();
|
||||
}
|
||||
this.maskedImage = new CanvasObjectImage(this.imageState, this);
|
||||
if (this.konva.maskTween) {
|
||||
this.konva.maskTween.destroy();
|
||||
this.konva.maskTween = null;
|
||||
}
|
||||
|
||||
this.maskedImage = new CanvasObjectImage(imageState, this);
|
||||
|
||||
// Force update the masked image - after awaiting, the image will be rendered (in memory)
|
||||
await this.maskedImage.update(this.imageState, true);
|
||||
await this.maskedImage.update(imageState, true);
|
||||
|
||||
// Update the compositing rect to match the image size
|
||||
this.konva.compositingRect.setAttrs({
|
||||
width: this.imageState.image.width,
|
||||
height: this.imageState.image.height,
|
||||
width: imageState.image.width,
|
||||
height: imageState.image.height,
|
||||
visible: true,
|
||||
});
|
||||
|
||||
@@ -574,12 +612,24 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
// Cache the group to ensure the mask is rendered correctly w/ opacity
|
||||
this.konva.maskGroup.cache();
|
||||
|
||||
// Create a pulsing tween
|
||||
this.konva.maskTween = new Konva.Tween({
|
||||
node: this.konva.maskGroup,
|
||||
duration: 1,
|
||||
opacity: 0.4, // oscillate between this value and pre-tween opacity
|
||||
yoyo: true,
|
||||
repeat: Infinity,
|
||||
easing: Konva.Easings.EaseOut,
|
||||
});
|
||||
|
||||
// Start the pulsing effect
|
||||
this.konva.maskTween.play();
|
||||
|
||||
this.$lastProcessedHash.set(hash);
|
||||
|
||||
// We are done processing (still segmenting though!)
|
||||
this.$isProcessing.set(false);
|
||||
|
||||
// The current points have been processed
|
||||
this.$hasProcessed.set(true);
|
||||
|
||||
// Clean up the abort controller as needed
|
||||
if (!this.abortController.signal.aborted) {
|
||||
this.abortController.abort();
|
||||
@@ -596,11 +646,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
* Applies the segmented image to the entity.
|
||||
*/
|
||||
apply = () => {
|
||||
if (!this.$hasProcessed.get()) {
|
||||
this.log.error('Cannot apply unprocessed points');
|
||||
return;
|
||||
}
|
||||
const imageState = this.imageState;
|
||||
const imageState = this.$imageState.get();
|
||||
if (!imageState) {
|
||||
this.log.error('No image state to apply');
|
||||
return;
|
||||
@@ -627,6 +673,55 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
this.teardown();
|
||||
};
|
||||
|
||||
/**
|
||||
* Applies the segmented image to the entity.
|
||||
*/
|
||||
saveAs = (type: Exclude<CanvasEntityType, 'reference_image'>) => {
|
||||
const imageState = this.$imageState.get();
|
||||
if (!imageState) {
|
||||
this.log.error('No image state to save as');
|
||||
return;
|
||||
}
|
||||
this.log.trace(`Saving as ${type}`);
|
||||
|
||||
// Clear the buffer - we are creating a new entity, so we don't want to keep the old one
|
||||
this.parent.bufferRenderer.clearBuffer();
|
||||
|
||||
// Create the new entity with the masked image as its only object
|
||||
const rect = this.parent.transformer.getRelativeRect();
|
||||
const arg = {
|
||||
overrides: {
|
||||
objects: [imageState],
|
||||
position: {
|
||||
x: Math.round(rect.x),
|
||||
y: Math.round(rect.y),
|
||||
},
|
||||
},
|
||||
isSelected: true,
|
||||
};
|
||||
|
||||
switch (type) {
|
||||
case 'raster_layer':
|
||||
this.manager.stateApi.addRasterLayer(arg);
|
||||
break;
|
||||
case 'control_layer':
|
||||
this.manager.stateApi.addControlLayer(arg);
|
||||
break;
|
||||
case 'inpaint_mask':
|
||||
this.manager.stateApi.addInpaintMask(arg);
|
||||
break;
|
||||
case 'regional_guidance':
|
||||
this.manager.stateApi.addRegionalGuidance(arg);
|
||||
break;
|
||||
default:
|
||||
assert<Equals<typeof type, never>>(false);
|
||||
}
|
||||
|
||||
// Final cleanup and teardown, returning user to main canvas UI
|
||||
this.resetEphemeralState();
|
||||
this.teardown();
|
||||
};
|
||||
|
||||
/**
|
||||
* Resets the module (e.g. remove all points and the mask image).
|
||||
*
|
||||
@@ -686,12 +781,16 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
if (this.maskedImage) {
|
||||
this.maskedImage.destroy();
|
||||
}
|
||||
if (this.konva.maskTween) {
|
||||
this.konva.maskTween.destroy();
|
||||
this.konva.maskTween = null;
|
||||
}
|
||||
|
||||
// Empty internal module state
|
||||
this.$points.set([]);
|
||||
this.imageState = null;
|
||||
this.$imageState.set(null);
|
||||
this.$pointType.set(1);
|
||||
this.$hasProcessed.set(false);
|
||||
this.$lastProcessedHash.set('');
|
||||
this.$isProcessing.set(false);
|
||||
|
||||
// Reset non-ephemeral konva nodes
|
||||
@@ -706,7 +805,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
/**
|
||||
* Builds a graph for segmenting an image with the given image DTO.
|
||||
*/
|
||||
buildGraph = ({ image_name }: ImageDTO): { graph: Graph; outputNodeId: string } => {
|
||||
buildGraph = ({ image_name }: ImageDTO, points: SAMPointWithId[]): { graph: Graph; outputNodeId: string } => {
|
||||
const graph = new Graph(getPrefixedId('canvas_segment_anything'));
|
||||
|
||||
// TODO(psyche): When SAM2 is available in transformers, use it here
|
||||
@@ -716,7 +815,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
type: 'segment_anything',
|
||||
model: 'segment-anything-huge',
|
||||
image: { image_name },
|
||||
point_lists: [{ points: this.getSAMPoints() }],
|
||||
point_lists: [{ points: points.map(({ x, y, label }) => ({ x, y, label })) }],
|
||||
mask_filter: 'largest',
|
||||
});
|
||||
|
||||
@@ -759,11 +858,11 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
|
||||
label,
|
||||
circle: getKonvaNodeDebugAttrs(konva.circle),
|
||||
})),
|
||||
imageState: deepClone(this.imageState),
|
||||
imageState: deepClone(this.$imageState.get()),
|
||||
maskedImage: this.maskedImage?.repr(),
|
||||
config: deepClone(this.config),
|
||||
$isSegmenting: this.$isSegmenting.get(),
|
||||
$hasProcessed: this.$hasProcessed.get(),
|
||||
$lastProcessedHash: this.$lastProcessedHash.get(),
|
||||
$isProcessing: this.$isProcessing.get(),
|
||||
$pointType: this.$pointType.get(),
|
||||
$pointTypeString: this.$pointTypeString.get(),
|
||||
|
||||
@@ -17,12 +17,16 @@ import {
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import {
|
||||
bboxChangedFromCanvas,
|
||||
controlLayerAdded,
|
||||
entityBrushLineAdded,
|
||||
entityEraserLineAdded,
|
||||
entityMoved,
|
||||
entityRasterized,
|
||||
entityRectAdded,
|
||||
entityReset,
|
||||
inpaintMaskAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasStagingAreaSlice } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import {
|
||||
@@ -51,6 +55,7 @@ import { getImageDTO } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO, S } from 'services/api/types';
|
||||
import { QueueError } from 'services/events/errors';
|
||||
import type { Param0 } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
import type { CanvasEntityAdapter } from './CanvasEntity/types';
|
||||
@@ -160,6 +165,34 @@ export class CanvasStateApiModule extends CanvasModuleBase {
|
||||
this.store.dispatch(entityRectAdded(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds a raster layer to the canvas, pushing state to redux.
|
||||
*/
|
||||
addRasterLayer = (arg: Param0<typeof rasterLayerAdded>) => {
|
||||
this.store.dispatch(rasterLayerAdded(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds a control layer to the canvas, pushing state to redux.
|
||||
*/
|
||||
addControlLayer = (arg: Param0<typeof controlLayerAdded>) => {
|
||||
this.store.dispatch(controlLayerAdded(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds an inpaint mask to the canvas, pushing state to redux.
|
||||
*/
|
||||
addInpaintMask = (arg: Param0<typeof inpaintMaskAdded>) => {
|
||||
this.store.dispatch(inpaintMaskAdded(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds regional guidance to the canvas, pushing state to redux.
|
||||
*/
|
||||
addRegionalGuidance = (arg: Param0<typeof rgAdded>) => {
|
||||
this.store.dispatch(rgAdded(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
* Rasterizes an entity, pushing state to redux.
|
||||
*/
|
||||
|
||||
@@ -126,6 +126,13 @@ export const floorCoord = (coord: Coordinate): Coordinate => {
|
||||
};
|
||||
};
|
||||
|
||||
export const roundCoord = (coord: Coordinate): Coordinate => {
|
||||
return {
|
||||
x: Math.round(coord.x),
|
||||
y: Math.round(coord.y),
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Snaps a position to the edge of the given rect if within a threshold of the edge
|
||||
* @param pos The position to snap
|
||||
|
||||
@@ -29,7 +29,7 @@ import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/com
|
||||
import { ASPECT_RATIO_MAP } from 'features/parameters/components/Bbox/constants';
|
||||
import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import type { IRect } from 'konva/lib/types';
|
||||
import { merge, omit } from 'lodash-es';
|
||||
import { merge } from 'lodash-es';
|
||||
import type { UndoableOptions } from 'redux-undo';
|
||||
import type { ControlNetModelConfig, ImageDTO, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -57,13 +57,13 @@ import type {
|
||||
} from './types';
|
||||
import { getEntityIdentifier, isRenderableEntity } from './types';
|
||||
import {
|
||||
converters,
|
||||
getControlLayerState,
|
||||
getInpaintMaskState,
|
||||
getRasterLayerState,
|
||||
getReferenceImageState,
|
||||
getRegionalGuidanceState,
|
||||
imageDTOToImageWithDims,
|
||||
initialControlNet,
|
||||
initialIPAdapter,
|
||||
} from './util';
|
||||
|
||||
@@ -157,28 +157,25 @@ export const canvasSlice = createSlice({
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<{ newId: string; overrides?: Partial<CanvasControlLayerState> }, 'raster_layer'>
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasControlLayerState>; replace?: boolean },
|
||||
'raster_layer'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides } = action.payload;
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the raster layer to control layer
|
||||
const controlLayerState: CanvasControlLayerState = {
|
||||
...deepClone(layer),
|
||||
id: newId,
|
||||
type: 'control_layer',
|
||||
controlAdapter: deepClone(initialControlNet),
|
||||
withTransparencyEffect: true,
|
||||
};
|
||||
const controlLayerState = converters.rasterLayer.toControlLayer(newId, layer, overrides);
|
||||
|
||||
merge(controlLayerState, overrides);
|
||||
|
||||
// Remove the raster layer
|
||||
state.rasterLayers.entities = state.rasterLayers.entities.filter((layer) => layer.id !== entityIdentifier.id);
|
||||
if (replace) {
|
||||
// Remove the raster layer
|
||||
state.rasterLayers.entities = state.rasterLayers.entities.filter((layer) => layer.id !== entityIdentifier.id);
|
||||
}
|
||||
|
||||
// Add the converted control layer
|
||||
state.controlLayers.entities.push(controlLayerState);
|
||||
@@ -186,11 +183,90 @@ export const canvasSlice = createSlice({
|
||||
state.selectedEntityIdentifier = { type: controlLayerState.type, id: controlLayerState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<{ overrides?: Partial<CanvasControlLayerState> } | undefined, 'raster_layer'>
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasControlLayerState>; replace?: boolean } | undefined,
|
||||
'raster_layer'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('control_layer') },
|
||||
}),
|
||||
},
|
||||
rasterLayerConvertedToInpaintMask: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasInpaintMaskState>; replace?: boolean },
|
||||
'raster_layer'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the raster layer to inpaint mask
|
||||
const inpaintMaskState = converters.rasterLayer.toInpaintMask(newId, layer, overrides);
|
||||
|
||||
if (replace) {
|
||||
// Remove the raster layer
|
||||
state.rasterLayers.entities = state.rasterLayers.entities.filter((layer) => layer.id !== entityIdentifier.id);
|
||||
}
|
||||
|
||||
// Add the converted inpaint mask
|
||||
state.inpaintMasks.entities.push(inpaintMaskState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: inpaintMaskState.type, id: inpaintMaskState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasInpaintMaskState>; replace?: boolean } | undefined,
|
||||
'raster_layer'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('inpaint_mask') },
|
||||
}),
|
||||
},
|
||||
rasterLayerConvertedToRegionalGuidance: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasRegionalGuidanceState>; replace?: boolean },
|
||||
'raster_layer'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the raster layer to inpaint mask
|
||||
const regionalGuidanceState = converters.rasterLayer.toRegionalGuidance(newId, layer, overrides);
|
||||
|
||||
if (replace) {
|
||||
// Remove the raster layer
|
||||
state.rasterLayers.entities = state.rasterLayers.entities.filter((layer) => layer.id !== entityIdentifier.id);
|
||||
}
|
||||
|
||||
// Add the converted inpaint mask
|
||||
state.regionalGuidance.entities.push(regionalGuidanceState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: regionalGuidanceState.type, id: regionalGuidanceState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasRegionalGuidanceState>; replace?: boolean } | undefined,
|
||||
'raster_layer'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
},
|
||||
//#region Control layers
|
||||
controlLayerAdded: {
|
||||
reducer: (
|
||||
@@ -217,32 +293,125 @@ export const canvasSlice = createSlice({
|
||||
state.selectedEntityIdentifier = { type: 'control_layer', id: data.id };
|
||||
},
|
||||
controlLayerConvertedToRasterLayer: {
|
||||
reducer: (state, action: PayloadAction<EntityIdentifierPayload<{ newId: string }, 'control_layer'>>) => {
|
||||
const { entityIdentifier, newId } = action.payload;
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasRasterLayerState>; replace?: boolean },
|
||||
'control_layer'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the raster layer to control layer
|
||||
const rasterLayerState: CanvasRasterLayerState = {
|
||||
...omit(deepClone(layer), ['type', 'controlAdapter', 'withTransparencyEffect']),
|
||||
id: newId,
|
||||
type: 'raster_layer',
|
||||
};
|
||||
const rasterLayerState = converters.controlLayer.toRasterLayer(newId, layer, overrides);
|
||||
|
||||
// Remove the control layer
|
||||
state.controlLayers.entities = state.controlLayers.entities.filter((layer) => layer.id !== entityIdentifier.id);
|
||||
if (replace) {
|
||||
// Remove the control layer
|
||||
state.controlLayers.entities = state.controlLayers.entities.filter(
|
||||
(layer) => layer.id !== entityIdentifier.id
|
||||
);
|
||||
}
|
||||
|
||||
// Add the new raster layer
|
||||
state.rasterLayers.entities.push(rasterLayerState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: rasterLayerState.type, id: rasterLayerState.id };
|
||||
},
|
||||
prepare: (payload: EntityIdentifierPayload<void, 'control_layer'>) => ({
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasRasterLayerState>; replace?: boolean } | undefined,
|
||||
'control_layer'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('raster_layer') },
|
||||
}),
|
||||
},
|
||||
controlLayerConvertedToInpaintMask: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasInpaintMaskState>; replace?: boolean },
|
||||
'control_layer'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the control layer to inpaint mask
|
||||
const inpaintMaskState = converters.controlLayer.toInpaintMask(newId, layer, overrides);
|
||||
|
||||
if (replace) {
|
||||
// Remove the control layer
|
||||
state.controlLayers.entities = state.controlLayers.entities.filter(
|
||||
(layer) => layer.id !== entityIdentifier.id
|
||||
);
|
||||
}
|
||||
|
||||
// Add the new inpaint mask
|
||||
state.inpaintMasks.entities.push(inpaintMaskState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: inpaintMaskState.type, id: inpaintMaskState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasInpaintMaskState>; replace?: boolean } | undefined,
|
||||
'control_layer'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('inpaint_mask') },
|
||||
}),
|
||||
},
|
||||
controlLayerConvertedToRegionalGuidance: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasRegionalGuidanceState>; replace?: boolean },
|
||||
'control_layer'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the control layer to regional guidance
|
||||
const regionalGuidanceState = converters.controlLayer.toRegionalGuidance(newId, layer, overrides);
|
||||
|
||||
if (replace) {
|
||||
// Remove the control layer
|
||||
state.controlLayers.entities = state.controlLayers.entities.filter(
|
||||
(layer) => layer.id !== entityIdentifier.id
|
||||
);
|
||||
}
|
||||
|
||||
// Add the new regional guidance
|
||||
state.regionalGuidance.entities.push(regionalGuidanceState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: regionalGuidanceState.type, id: regionalGuidanceState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasRegionalGuidanceState>; replace?: boolean } | undefined,
|
||||
'control_layer'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
},
|
||||
controlLayerModelChanged: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
@@ -447,6 +616,46 @@ export const canvasSlice = createSlice({
|
||||
state.regionalGuidance.entities.push(data);
|
||||
state.selectedEntityIdentifier = { type: 'regional_guidance', id: data.id };
|
||||
},
|
||||
rgConvertedToInpaintMask: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasInpaintMaskState>; replace?: boolean },
|
||||
'regional_guidance'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the regional guidance to inpaint mask
|
||||
const inpaintMaskState = converters.regionalGuidance.toInpaintMask(newId, layer, overrides);
|
||||
|
||||
if (replace) {
|
||||
// Remove the regional guidance
|
||||
state.regionalGuidance.entities = state.regionalGuidance.entities.filter(
|
||||
(layer) => layer.id !== entityIdentifier.id
|
||||
);
|
||||
}
|
||||
|
||||
// Add the new inpaint mask
|
||||
state.inpaintMasks.entities.push(inpaintMaskState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: inpaintMaskState.type, id: inpaintMaskState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasInpaintMaskState>; replace?: boolean } | undefined,
|
||||
'regional_guidance'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('inpaint_mask') },
|
||||
}),
|
||||
},
|
||||
rgPositivePromptChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ prompt: string | null }, 'regional_guidance'>>
|
||||
@@ -644,6 +853,44 @@ export const canvasSlice = createSlice({
|
||||
state.inpaintMasks.entities = [data];
|
||||
state.selectedEntityIdentifier = { type: 'inpaint_mask', id: data.id };
|
||||
},
|
||||
inpaintMaskConvertedToRegionalGuidance: {
|
||||
reducer: (
|
||||
state,
|
||||
action: PayloadAction<
|
||||
EntityIdentifierPayload<
|
||||
{ newId: string; overrides?: Partial<CanvasRegionalGuidanceState>; replace?: boolean },
|
||||
'inpaint_mask'
|
||||
>
|
||||
>
|
||||
) => {
|
||||
const { entityIdentifier, newId, overrides, replace } = action.payload;
|
||||
const layer = selectEntity(state, entityIdentifier);
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the inpaint mask to regional guidance
|
||||
const regionalGuidanceState = converters.inpaintMask.toRegionalGuidance(newId, layer, overrides);
|
||||
|
||||
if (replace) {
|
||||
// Remove the inpaint mask
|
||||
state.inpaintMasks.entities = state.inpaintMasks.entities.filter((layer) => layer.id !== entityIdentifier.id);
|
||||
}
|
||||
|
||||
// Add the new regional guidance
|
||||
state.regionalGuidance.entities.push(regionalGuidanceState);
|
||||
|
||||
state.selectedEntityIdentifier = { type: regionalGuidanceState.type, id: regionalGuidanceState.id };
|
||||
},
|
||||
prepare: (
|
||||
payload: EntityIdentifierPayload<
|
||||
{ overrides?: Partial<CanvasRegionalGuidanceState>; replace?: boolean } | undefined,
|
||||
'inpaint_mask'
|
||||
>
|
||||
) => ({
|
||||
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
},
|
||||
//#region BBox
|
||||
bboxScaledWidthChanged: (state, action: PayloadAction<number>) => {
|
||||
const gridSize = getGridSize(state.bbox.modelBase);
|
||||
@@ -1210,10 +1457,14 @@ export const {
|
||||
rasterLayerAdded,
|
||||
// rasterLayerRecalled,
|
||||
rasterLayerConvertedToControlLayer,
|
||||
rasterLayerConvertedToInpaintMask,
|
||||
rasterLayerConvertedToRegionalGuidance,
|
||||
// Control layers
|
||||
controlLayerAdded,
|
||||
// controlLayerRecalled,
|
||||
controlLayerConvertedToRasterLayer,
|
||||
controlLayerConvertedToInpaintMask,
|
||||
controlLayerConvertedToRegionalGuidance,
|
||||
controlLayerModelChanged,
|
||||
controlLayerControlModeChanged,
|
||||
controlLayerWeightChanged,
|
||||
@@ -1231,6 +1482,7 @@ export const {
|
||||
// Regions
|
||||
rgAdded,
|
||||
// rgRecalled,
|
||||
rgConvertedToInpaintMask,
|
||||
rgPositivePromptChanged,
|
||||
rgNegativePromptChanged,
|
||||
rgAutoNegativeToggled,
|
||||
@@ -1244,6 +1496,7 @@ export const {
|
||||
rgIPAdapterCLIPVisionModelChanged,
|
||||
// Inpaint mask
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskConvertedToRegionalGuidance,
|
||||
// inpaintMaskRecalled,
|
||||
} = canvasSlice.actions;
|
||||
|
||||
|
||||
@@ -131,7 +131,8 @@ const zSAMPoint = z.object({
|
||||
y: z.number().int().gte(0),
|
||||
label: zSAMPointLabel,
|
||||
});
|
||||
export type SAMPoint = z.infer<typeof zSAMPoint>;
|
||||
type SAMPoint = z.infer<typeof zSAMPoint>;
|
||||
export type SAMPointWithId = SAMPoint & { id: string };
|
||||
|
||||
const zRect = z.object({
|
||||
x: z.number(),
|
||||
|
||||
@@ -184,3 +184,153 @@ export const getInpaintMaskState = (
|
||||
merge(entityState, overrides);
|
||||
return entityState;
|
||||
};
|
||||
|
||||
const convertRasterLayerToControlLayer = (
|
||||
newId: string,
|
||||
rasterLayerState: CanvasRasterLayerState,
|
||||
overrides?: Partial<CanvasControlLayerState>
|
||||
): CanvasControlLayerState => {
|
||||
const { name, objects, position } = rasterLayerState;
|
||||
const controlLayerState = getControlLayerState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(controlLayerState, overrides);
|
||||
return controlLayerState;
|
||||
};
|
||||
|
||||
const convertRasterLayerToInpaintMask = (
|
||||
newId: string,
|
||||
rasterLayerState: CanvasRasterLayerState,
|
||||
overrides?: Partial<CanvasInpaintMaskState>
|
||||
): CanvasInpaintMaskState => {
|
||||
const { name, objects, position } = rasterLayerState;
|
||||
const inpaintMaskState = getInpaintMaskState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(inpaintMaskState, overrides);
|
||||
return inpaintMaskState;
|
||||
};
|
||||
|
||||
const convertRasterLayerToRegionalGuidance = (
|
||||
newId: string,
|
||||
rasterLayerState: CanvasRasterLayerState,
|
||||
overrides?: Partial<CanvasRegionalGuidanceState>
|
||||
): CanvasRegionalGuidanceState => {
|
||||
const { name, objects, position } = rasterLayerState;
|
||||
const regionalGuidanceState = getRegionalGuidanceState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(regionalGuidanceState, overrides);
|
||||
return regionalGuidanceState;
|
||||
};
|
||||
|
||||
const convertControlLayerToRasterLayer = (
|
||||
newId: string,
|
||||
controlLayerState: CanvasControlLayerState,
|
||||
overrides?: Partial<CanvasRasterLayerState>
|
||||
): CanvasRasterLayerState => {
|
||||
const { name, objects, position } = controlLayerState;
|
||||
const rasterLayerState = getRasterLayerState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(rasterLayerState, overrides);
|
||||
return rasterLayerState;
|
||||
};
|
||||
|
||||
const convertControlLayerToInpaintMask = (
|
||||
newId: string,
|
||||
rasterLayerState: CanvasControlLayerState,
|
||||
overrides?: Partial<CanvasInpaintMaskState>
|
||||
): CanvasInpaintMaskState => {
|
||||
const { name, objects, position } = rasterLayerState;
|
||||
const inpaintMaskState = getInpaintMaskState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(inpaintMaskState, overrides);
|
||||
return inpaintMaskState;
|
||||
};
|
||||
|
||||
const convertControlLayerToRegionalGuidance = (
|
||||
newId: string,
|
||||
rasterLayerState: CanvasControlLayerState,
|
||||
overrides?: Partial<CanvasRegionalGuidanceState>
|
||||
): CanvasRegionalGuidanceState => {
|
||||
const { name, objects, position } = rasterLayerState;
|
||||
const regionalGuidanceState = getRegionalGuidanceState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(regionalGuidanceState, overrides);
|
||||
return regionalGuidanceState;
|
||||
};
|
||||
|
||||
const convertInpaintMaskToRegionalGuidance = (
|
||||
newId: string,
|
||||
inpaintMaskState: CanvasInpaintMaskState,
|
||||
overrides?: Partial<CanvasRegionalGuidanceState>
|
||||
): CanvasRegionalGuidanceState => {
|
||||
const { name, objects, position } = inpaintMaskState;
|
||||
const regionalGuidanceState = getRegionalGuidanceState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(regionalGuidanceState, overrides);
|
||||
return regionalGuidanceState;
|
||||
};
|
||||
|
||||
const convertRegionalGuidanceToInpaintMask = (
|
||||
newId: string,
|
||||
regionalGuidanceState: CanvasRegionalGuidanceState,
|
||||
overrides?: Partial<CanvasInpaintMaskState>
|
||||
): CanvasInpaintMaskState => {
|
||||
const { name, objects, position } = regionalGuidanceState;
|
||||
const inpaintMaskState = getInpaintMaskState(newId, {
|
||||
name,
|
||||
objects,
|
||||
position,
|
||||
});
|
||||
merge(inpaintMaskState, overrides);
|
||||
return inpaintMaskState;
|
||||
};
|
||||
|
||||
/**
|
||||
* Supported conversions:
|
||||
* - Raster Layer -> Control Layer
|
||||
* - Raster Layer -> Inpaint Mask
|
||||
* - Raster Layer -> Regional Guidance
|
||||
* - Control Layer -> Control Layer
|
||||
* - Control Layer -> Inpaint Mask
|
||||
* - Control Layer -> Regional Guidance
|
||||
* - Inpaint Mask -> Regional Guidance
|
||||
* - Regional Guidance -> Inpaint Mask
|
||||
*/
|
||||
export const converters = {
|
||||
rasterLayer: {
|
||||
toControlLayer: convertRasterLayerToControlLayer,
|
||||
toInpaintMask: convertRasterLayerToInpaintMask,
|
||||
toRegionalGuidance: convertRasterLayerToRegionalGuidance,
|
||||
},
|
||||
controlLayer: {
|
||||
toRasterLayer: convertControlLayerToRasterLayer,
|
||||
toInpaintMask: convertControlLayerToInpaintMask,
|
||||
toRegionalGuidance: convertControlLayerToRegionalGuidance,
|
||||
},
|
||||
inpaintMask: {
|
||||
toRegionalGuidance: convertInpaintMaskToRegionalGuidance,
|
||||
},
|
||||
regionalGuidance: {
|
||||
toInpaintMask: convertRegionalGuidanceToInpaintMask,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Flex, Link, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { Link } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $projectName, $projectUrl } from 'app/store/nanostores/projectId';
|
||||
import { memo } from 'react';
|
||||
@@ -9,15 +9,13 @@ export const GalleryHeader = memo(() => {
|
||||
|
||||
if (projectName && projectUrl) {
|
||||
return (
|
||||
<Flex gap={2} alignItems="center" justifyContent="space-evenly" pe={2} w="50%">
|
||||
<Text fontSize="md" fontWeight="semibold" noOfLines={1} wordBreak="break-all" w="full" textAlign="center">
|
||||
<Link href={projectUrl}>{projectName}</Link>
|
||||
</Text>
|
||||
</Flex>
|
||||
<Link fontSize="md" fontWeight="semibold" noOfLines={1} wordBreak="break-all" href={projectUrl}>
|
||||
{projectName}
|
||||
</Link>
|
||||
);
|
||||
}
|
||||
|
||||
return <Spacer />;
|
||||
return null;
|
||||
});
|
||||
|
||||
GalleryHeader.displayName = 'GalleryHeader';
|
||||
|
||||
@@ -51,8 +51,8 @@ const GalleryPanelContent = () => {
|
||||
|
||||
return (
|
||||
<Flex ref={galleryPanelFocusRef} position="relative" flexDirection="column" h="full" w="full" tabIndex={-1}>
|
||||
<Flex alignItems="center" w="full">
|
||||
<Flex w="25%">
|
||||
<Flex alignItems="center" justifyContent="space-between" w="full">
|
||||
<Flex flexGrow={1} flexBasis={0}>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
@@ -62,8 +62,10 @@ const GalleryPanelContent = () => {
|
||||
{boardsListPanel.isCollapsed ? t('boards.viewBoards') : t('boards.hideBoards')}
|
||||
</Button>
|
||||
</Flex>
|
||||
<GalleryHeader />
|
||||
<Flex h="full" w="25%" justifyContent="flex-end">
|
||||
<Flex>
|
||||
<GalleryHeader />
|
||||
</Flex>
|
||||
<Flex flexGrow={1} flexBasis={0} justifyContent="flex-end">
|
||||
<BoardsSettingsPopover />
|
||||
<IconButton
|
||||
size="sm"
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
|
||||
import { useImageActions } from 'features/gallery/hooks/useImageActions';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
PiArrowBendUpLeftBold,
|
||||
PiArrowsCounterClockwiseBold,
|
||||
PiAsteriskBold,
|
||||
PiPaintBrushBold,
|
||||
@@ -14,28 +16,36 @@ import {
|
||||
export const ImageMenuItemMetadataRecallActions = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const imageDTO = useImageDTOContext();
|
||||
const subMenu = useSubMenu();
|
||||
|
||||
const { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, createAsPreset } =
|
||||
useImageActions(imageDTO);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClickCapture={remix} isDisabled={!hasMetadata}>
|
||||
{t('parameters.remixImage')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiQuotesBold />} onClickCapture={recallPrompts} isDisabled={!hasPrompts}>
|
||||
{t('parameters.usePrompt')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlantBold />} onClickCapture={recallSeed} isDisabled={!hasSeed}>
|
||||
{t('parameters.useSeed')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiAsteriskBold />} onClickCapture={recallAll} isDisabled={!hasMetadata}>
|
||||
{t('parameters.useAll')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPaintBrushBold />} onClickCapture={createAsPreset} isDisabled={!hasPrompts}>
|
||||
{t('stylePresets.useForTemplate')}
|
||||
</MenuItem>
|
||||
</>
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiArrowBendUpLeftBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label="Recall Metadata" />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={remix} isDisabled={!hasMetadata}>
|
||||
{t('parameters.remixImage')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiQuotesBold />} onClick={recallPrompts} isDisabled={!hasPrompts}>
|
||||
{t('parameters.usePrompt')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlantBold />} onClick={recallSeed} isDisabled={!hasSeed}>
|
||||
{t('parameters.useSeed')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiAsteriskBold />} onClick={recallAll} isDisabled={!hasMetadata}>
|
||||
{t('parameters.useAll')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPaintBrushBold />} onClick={createAsPreset} isDisabled={!hasPrompts}>
|
||||
{t('stylePresets.useForTemplate')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -21,9 +21,12 @@ export const useBuildModelInstallArg = () => {
|
||||
});
|
||||
|
||||
const getIsInstalled = useCallback(
|
||||
({ source, name, base, type, is_installed }: StarterModel): boolean =>
|
||||
({ source, name, base, type, is_installed, previous_names }: StarterModel): boolean =>
|
||||
modelList.some(
|
||||
(mc) => is_installed || source === mc.source || (base === mc.base && name === mc.name && type === mc.type)
|
||||
(mc) =>
|
||||
is_installed ||
|
||||
source === mc.source ||
|
||||
(base === mc.base && (name === mc.name || previous_names?.includes(name)) && type === mc.type)
|
||||
),
|
||||
[modelList]
|
||||
);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Button, Flex, Text, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { Button, Flex, ListItem, Text, Tooltip, UnorderedList } from '@invoke-ai/ui-library';
|
||||
import { flattenStarterModel, useBuildModelInstallArg } from 'features/modelManagerV2/hooks/useBuildModelsToInstall';
|
||||
import { isMainModelBase } from 'features/nodes/types/common';
|
||||
import { MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
@@ -44,8 +44,15 @@ export const StarterBundle = ({ bundleName, bundle }: { bundleName: string; bund
|
||||
return (
|
||||
<Tooltip
|
||||
label={
|
||||
<Flex flexDir="column">
|
||||
<Text>{t('modelManager.includesNModels', { n: bundle.length })}</Text>
|
||||
<Flex flexDir="column" p={1}>
|
||||
<Text>{t('modelManager.includesNModels', { n: bundle.length })}:</Text>
|
||||
<UnorderedList>
|
||||
{bundle.map((model, index) => (
|
||||
<ListItem key={index} wordBreak="break-all">
|
||||
{model.name}
|
||||
</ListItem>
|
||||
))}
|
||||
</UnorderedList>
|
||||
</Flex>
|
||||
}
|
||||
>
|
||||
|
||||
@@ -1,14 +1,4 @@
|
||||
import {
|
||||
Box,
|
||||
Flex,
|
||||
Icon,
|
||||
IconButton,
|
||||
Input,
|
||||
InputGroup,
|
||||
InputRightElement,
|
||||
Text,
|
||||
Tooltip,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { Flex, Icon, IconButton, Input, InputGroup, InputRightElement, Text, Tooltip } from '@invoke-ai/ui-library';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { map, size } from 'lodash-es';
|
||||
import type { ChangeEventHandler } from 'react';
|
||||
@@ -59,14 +49,14 @@ export const StarterModelsResults = memo(({ results }: StarterModelsResultsProps
|
||||
<Flex justifyContent="space-between" alignItems="center">
|
||||
{size(results.starter_bundles) > 0 && (
|
||||
<Flex gap={4} alignItems="center">
|
||||
<Flex gap={1} alignItems="center">
|
||||
<Flex gap={2} alignItems="center">
|
||||
<Text color="base.200" fontWeight="semibold">
|
||||
{t('modelManager.starterBundles')}
|
||||
</Text>
|
||||
<Tooltip label={t('modelManager.starterBundleHelpText')}>
|
||||
<Box>
|
||||
<Flex alignItems="center">
|
||||
<Icon as={PiInfoBold} color="base.200" />
|
||||
</Box>
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
<Flex gap={2}>
|
||||
|
||||
@@ -106,10 +106,12 @@ export const getInfill = (
|
||||
}
|
||||
|
||||
if (infillMethod === 'color') {
|
||||
const { a, ...rgb } = infillColorValue;
|
||||
const color = { ...rgb, a: Math.round(a * 255) };
|
||||
return g.addNode({
|
||||
id: 'infill_rgba',
|
||||
type: 'infill_rgba',
|
||||
color: infillColorValue,
|
||||
color,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -14731,7 +14731,7 @@ export type components = {
|
||||
bounding_boxes?: components["schemas"]["BoundingBoxField"][] | null;
|
||||
/**
|
||||
* Point Lists
|
||||
* @description The points to prompt the SAM model with.
|
||||
* @description The list of point lists to prompt the SAM model with. Each list of points represents a single object.
|
||||
* @default null
|
||||
*/
|
||||
point_lists?: components["schemas"]["SAMPointsField"][] | null;
|
||||
@@ -15347,6 +15347,11 @@ export type components = {
|
||||
* @default false
|
||||
*/
|
||||
is_installed?: boolean;
|
||||
/**
|
||||
* Previous Names
|
||||
* @default []
|
||||
*/
|
||||
previous_names?: string[];
|
||||
/** Dependencies */
|
||||
dependencies?: components["schemas"]["StarterModelWithoutDependencies"][] | null;
|
||||
};
|
||||
@@ -15375,6 +15380,11 @@ export type components = {
|
||||
* @default false
|
||||
*/
|
||||
is_installed?: boolean;
|
||||
/**
|
||||
* Previous Names
|
||||
* @default []
|
||||
*/
|
||||
previous_names?: string[];
|
||||
};
|
||||
/**
|
||||
* Step Param Easing
|
||||
|
||||
@@ -89,7 +89,7 @@ dependencies = [
|
||||
"pypatchmatch",
|
||||
'pyperclip',
|
||||
"pyreadline3",
|
||||
"python-multipart",
|
||||
"python-multipart==0.0.12",
|
||||
"requests~=2.28.2",
|
||||
"rich~=13.3",
|
||||
"scikit-image~=0.21.0",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,43 +0,0 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from invokeai.backend.sd3.sd3_mmditx import Sd3MMDiTX
|
||||
from invokeai.backend.sd3.sd3_state_dict_utils import infer_sd3_mmditx_params, is_sd3_checkpoint
|
||||
from tests.backend.sd3.sd3_5_mmditx_state_dict import sd3_sd_shapes
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["sd_shapes", "expected"],
|
||||
[
|
||||
(sd3_sd_shapes, True),
|
||||
({}, False),
|
||||
({"foo": [1]}, False),
|
||||
],
|
||||
)
|
||||
def test_is_sd3_checkpoint(sd_shapes: dict[str, list[int]], expected: bool):
|
||||
# Build mock state dict from the provided shape dict.
|
||||
sd = {k: None for k in sd_shapes}
|
||||
assert is_sd3_checkpoint(sd) == expected
|
||||
|
||||
|
||||
def test_infer_sd3_mmditx_params():
|
||||
# Build mock state dict on the meta device.
|
||||
with torch.device("meta"):
|
||||
sd = {k: torch.zeros(shape) for k, shape in sd3_sd_shapes.items()}
|
||||
|
||||
# Filter the MMDiTX parameters from the state dict.
|
||||
sd = {k: v for k, v in sd.items() if k.startswith("model.diffusion_model.")}
|
||||
|
||||
params = infer_sd3_mmditx_params(sd)
|
||||
|
||||
# Construct model from params.
|
||||
with torch.device("meta"):
|
||||
model = Sd3MMDiTX(params=params)
|
||||
|
||||
model_sd = model.state_dict()
|
||||
|
||||
# Assert that the model state dict is compatible with the original state dict.
|
||||
sd_without_prefix = {k.split("model.diffusion_model.")[-1]: v for k, v in model_sd.items()}
|
||||
assert set(model_sd.keys()) == set(sd_without_prefix.keys())
|
||||
for k in model_sd:
|
||||
assert model_sd[k].shape == sd_without_prefix[k].shape
|
||||
Reference in New Issue
Block a user