Compare commits

..

16 Commits

Author SHA1 Message Date
Ryan Dick
0ff5355ce3 Rename flux_kohya_lora_conversion_utils.py 2024-09-05 14:18:17 +00:00
Ryan Dick
3ad5fc060d Fixup FLUX LoRA unit tests. 2024-09-05 14:12:56 +00:00
Ryan Dick
17d5c85454 WIP 2024-09-04 22:52:18 +00:00
Ryan Dick
4698649cc9 WIP - add invocations to support FLUX LORAs. 2024-09-04 19:55:06 +00:00
Ryan Dick
d41c075768 Get probing of FLUX LoRA kohya models working. 2024-09-04 16:03:55 +00:00
Ryan Dick
6b129aaba6 Add utility function for detecting whether a state_dict is in the FLUX kohya LoRA format. 2024-09-04 15:51:15 +00:00
Ryan Dick
f58546fd53 Update convert_flux_kohya_state_dict_to_invoke_format() to raise an exception if an unexpected key is encountered, and add a corresponding unit test. 2024-09-04 15:34:31 +00:00
Ryan Dick
de3edf47fb Move the responsibilities of 1) state_dict loading from file, and 2) SDXL lora key conversions, out of LoRAModelRaw and into LoRALoader. 2024-09-04 15:18:43 +00:00
Ryan Dick
6dc4baa925 Remove unused LoRAModelRaw.name attribute. 2024-09-04 14:52:30 +00:00
Ryan Dick
943fa6da4b Fix type errors in sdxl_conversion_utils.py 2024-09-04 14:35:38 +00:00
Ryan Dick
bfe31838cc Start moving SDXL-specific LoRA conversions out of the general-purpose LoRAModelRaw class. 2024-09-04 14:30:18 +00:00
Ryan Dick
16b76f7e7f Get convert_flux_kohya_state_dict_to_invoke_format(...) working, with unit tests. 2024-09-04 13:42:12 +00:00
Ryan Dick
cb115743e7 WIP - FLUX LoRA conversion logic. 2024-09-03 22:36:16 +00:00
Ryan Dick
8f4279ba51 Add state_dict keys for two FLUX LoRA formats to be used in unit tests. 2024-09-03 22:01:00 +00:00
Ryan Dick
22a207b50d Move lora.py to peft/ subdir. 2024-09-03 18:13:21 +00:00
Ryan Dick
638c6003e3 Split PEFT layer implementations into separate files. 2024-09-03 18:06:21 +00:00
949 changed files with 31640 additions and 28528 deletions

View File

@@ -13,12 +13,6 @@ on:
tags:
- 'v*.*.*'
workflow_dispatch:
inputs:
push-to-registry:
description: Push the built image to the container registry
required: false
type: boolean
default: false
permissions:
contents: write
@@ -56,15 +50,16 @@ jobs:
df -h
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
images: |
ghcr.io/${{ github.repository }}
${{ env.DOCKERHUB_REPOSITORY }}
tags: |
type=ref,event=branch
type=ref,event=tag
@@ -77,33 +72,49 @@ jobs:
suffix=-${{ matrix.gpu-driver }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
with:
platforms: ${{ env.PLATFORMS }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# - name: Login to Docker Hub
# if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container
timeout-minutes: 40
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: .
file: docker/Dockerfile
platforms: ${{ env.PLATFORMS }}
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
type=gha,scope=main-${{ matrix.gpu-driver }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
# - name: Docker Hub Description
# if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
# uses: peter-evans/dockerhub-description@v3
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# repository: ${{ vars.DOCKERHUB_REPOSITORY }}
# short-description: ${{ github.event.repository.description }}

View File

@@ -11,7 +11,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
Batch,
BatchStatus,
CancelByBatchIDsResult,
CancelByDestinationResult,
ClearResult,
EnqueueBatchResult,
PruneResult,
@@ -106,21 +105,6 @@ async def cancel_by_batch_ids(
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(queue_id=queue_id, batch_ids=batch_ids)
@session_queue_router.put(
"/{queue_id}/cancel_by_destination",
operation_id="cancel_by_destination",
responses={200: {"model": CancelByBatchIDsResult}},
)
async def cancel_by_destination(
queue_id: str = Path(description="The queue id to perform this operation on"),
destination: str = Query(description="The destination to cancel all queue items for"),
) -> CancelByDestinationResult:
"""Immediately cancels all queue items with the given origin"""
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
queue_id=queue_id, destination=destination
)
@session_queue_router.put(
"/{queue_id}/clear",
operation_id="clear",

View File

@@ -20,6 +20,7 @@ from typing import (
Type,
TypeVar,
Union,
cast,
)
import semver
@@ -79,7 +80,7 @@ class UIConfigBase(BaseModel):
version: str = Field(
description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".',
)
node_pack: str = Field(description="The node pack that this node belongs to, will be 'invokeai' for built-in nodes")
node_pack: Optional[str] = Field(default=None, description="Whether or not this is a custom node")
classification: Classification = Field(default=Classification.Stable, description="The node's classification")
model_config = ConfigDict(
@@ -229,16 +230,18 @@ class BaseInvocation(ABC, BaseModel):
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
if title := model_class.UIConfig.title:
schema["title"] = title
if tags := model_class.UIConfig.tags:
schema["tags"] = tags
if category := model_class.UIConfig.category:
schema["category"] = category
if node_pack := model_class.UIConfig.node_pack:
schema["node_pack"] = node_pack
schema["classification"] = model_class.UIConfig.classification
schema["version"] = model_class.UIConfig.version
uiconfig = cast(UIConfigBase | None, getattr(model_class, "UIConfig", None))
if uiconfig is not None:
if uiconfig.title is not None:
schema["title"] = uiconfig.title
if uiconfig.tags is not None:
schema["tags"] = uiconfig.tags
if uiconfig.category is not None:
schema["category"] = uiconfig.category
if uiconfig.node_pack is not None:
schema["node_pack"] = uiconfig.node_pack
schema["classification"] = uiconfig.classification
schema["version"] = uiconfig.version
if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = []
schema["class"] = "invocation"
@@ -309,7 +312,7 @@ class BaseInvocation(ABC, BaseModel):
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
)
UIConfig: ClassVar[UIConfigBase]
UIConfig: ClassVar[Type[UIConfigBase]]
model_config = ConfigDict(
protected_namespaces=(),
@@ -438,25 +441,30 @@ def invocation(
validate_fields(cls.model_fields, invocation_type)
# Add OpenAPI schema extras
uiconfig: dict[str, Any] = {}
uiconfig["title"] = title
uiconfig["tags"] = tags
uiconfig["category"] = category
uiconfig["classification"] = classification
# The node pack is the module name - will be "invokeai" for built-in nodes
uiconfig["node_pack"] = cls.__module__.split(".")[0]
uiconfig_name = cls.__qualname__ + ".UIConfig"
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconfig_name:
cls.UIConfig = type(uiconfig_name, (UIConfigBase,), {})
cls.UIConfig.title = title
cls.UIConfig.tags = tags
cls.UIConfig.category = category
cls.UIConfig.classification = classification
# Grab the node pack's name from the module name, if it's a custom node
is_custom_node = cls.__module__.rsplit(".", 1)[0] == "invokeai.app.invocations"
if is_custom_node:
cls.UIConfig.node_pack = cls.__module__.split(".")[0]
else:
cls.UIConfig.node_pack = None
if version is not None:
try:
semver.Version.parse(version)
except ValueError as e:
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
uiconfig["version"] = version
cls.UIConfig.version = version
else:
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
uiconfig["version"] = "1.0.0"
cls.UIConfig = UIConfigBase(**uiconfig)
cls.UIConfig.version = "1.0.0"
if use_cache is not None:
cls.model_fields["use_cache"].default = use_cache

View File

@@ -19,9 +19,8 @@ from invokeai.app.invocations.model import CLIPField
from invokeai.app.invocations.primitives import ConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.ti_utils import generate_ti_list
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoraPatcher
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
ConditioningFieldData,
@@ -83,10 +82,9 @@ class CompelInvocation(BaseInvocation):
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
tokenizer_info as tokenizer,
LoraPatcher.apply_lora_patches(
model=text_encoder,
patches=_lora_loader(),
prefix="lora_te_",
ModelPatcher.apply_lora_text_encoder(
text_encoder,
loras=_lora_loader(),
cached_weights=cached_weights,
),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
@@ -179,9 +177,9 @@ class SDXLPromptInvocationBase:
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
tokenizer_info as tokenizer,
LoraPatcher.apply_lora_patches(
ModelPatcher.apply_lora(
text_encoder,
patches=_lora_loader(),
loras=_lora_loader(),
prefix=lora_prefix,
cached_weights=cached_weights,
),

View File

@@ -36,10 +36,9 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoraPatcher
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.stable_diffusion import PipelineIntermediateState
from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext, DenoiseInputs
from invokeai.backend.stable_diffusion.diffusers_pipeline import (
@@ -980,10 +979,9 @@ class DenoiseLatentsInvocation(BaseInvocation):
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME
# Apply the LoRA after unet has been moved to its target device for faster patching.
LoraPatcher.apply_lora_patches(
model=unet,
patches=_lora_loader(),
prefix="lora_unet_",
ModelPatcher.apply_lora_unet(
unet,
loras=_lora_loader(),
cached_weights=cached_weights,
),
):

View File

@@ -17,6 +17,7 @@ from invokeai.app.invocations.fields import (
)
from invokeai.app.invocations.model import TransformerField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.session_processor.session_processor_common import CanceledException
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.denoise import denoise
from invokeai.backend.flux.inpaint_extension import InpaintExtension
@@ -29,9 +30,8 @@ from invokeai.backend.flux.sampling_utils import (
pack,
unpack,
)
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoraPatcher
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.peft.peft_patcher import PeftPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
from invokeai.backend.util.devices import TorchDevice
@@ -192,7 +192,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
with (
transformer_info.model_on_device() as (cached_weights, transformer),
# Apply the LoRA after transformer has been moved to its target device for faster patching.
LoraPatcher.apply_lora_patches(
PeftPatcher.apply_peft_patches(
model=transformer,
patches=self._lora_iterator(context),
prefix="",
@@ -259,9 +259,34 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
yield (lora_info.model, lora.weight)
del lora_info
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
def step_callback(state: PipelineIntermediateState) -> None:
state.latents = unpack(state.latents.float(), self.height, self.width).squeeze()
context.util.flux_step_callback(state)
def _build_step_callback(self, context: InvocationContext) -> Callable[[], None]:
def step_callback() -> None:
if context.util.is_canceled():
raise CanceledException
# TODO: Make this look like the image before re-enabling
# latent_image = unpack(img.float(), self.height, self.width)
# latent_image = latent_image.squeeze() # Remove unnecessary dimensions
# flattened_tensor = latent_image.reshape(-1) # Flatten to shape [48*128*128]
# # Create a new tensor of the required shape [255, 255, 3]
# latent_image = flattened_tensor[: 255 * 255 * 3].reshape(255, 255, 3) # Reshape to RGB format
# # Convert to a NumPy array and then to a PIL Image
# image = Image.fromarray(latent_image.cpu().numpy().astype(np.uint8))
# (width, height) = image.size
# width *= 8
# height *= 8
# dataURL = image_to_dataURL(image, image_format="JPEG")
# # TODO: move this whole function to invocation context to properly reference these variables
# context._services.events.emit_invocation_denoise_progress(
# context._data.queue_item,
# context._data.invocation,
# state,
# ProgressImage(dataURL=dataURL, width=width, height=height),
# )
return step_callback

View File

@@ -6,19 +6,13 @@ import cv2
import numpy
from PIL import Image, ImageChops, ImageFilter, ImageOps
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.constants import IMAGE_MODES
from invokeai.app.invocations.fields import (
ColorField,
FieldDescriptions,
ImageField,
InputField,
OutputField,
WithBoard,
WithMetadata,
)
@@ -1013,62 +1007,3 @@ class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
image_dto = context.images.save(image=mask, image_category=ImageCategory.MASK)
return ImageOutput.build(image_dto)
@invocation_output("canvas_v2_mask_and_crop_output")
class CanvasV2MaskAndCropOutput(ImageOutput):
offset_x: int = OutputField(description="The x offset of the image, after cropping")
offset_y: int = OutputField(description="The y offset of the image, after cropping")
@invocation(
"canvas_v2_mask_and_crop",
title="Canvas V2 Mask and Crop",
tags=["image", "mask", "id"],
category="image",
version="1.0.0",
classification=Classification.Prototype,
)
class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Handles Canvas V2 image output masking and cropping"""
source_image: ImageField | None = InputField(
default=None,
description="The source image onto which the masked generated image is pasted. If omitted, the masked generated image is returned with transparency.",
)
generated_image: ImageField = InputField(description="The image to apply the mask to")
mask: ImageField = InputField(description="The mask to apply")
mask_blur: int = InputField(default=0, ge=0, description="The amount to blur the mask by")
def _prepare_mask(self, mask: Image.Image) -> Image.Image:
mask_array = numpy.array(mask)
kernel = numpy.ones((self.mask_blur, self.mask_blur), numpy.uint8)
dilated_mask_array = cv2.erode(mask_array, kernel, iterations=3)
dilated_mask = Image.fromarray(dilated_mask_array)
if self.mask_blur > 0:
mask = dilated_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
return ImageOps.invert(mask.convert("L"))
def invoke(self, context: InvocationContext) -> CanvasV2MaskAndCropOutput:
mask = self._prepare_mask(context.images.get_pil(self.mask.image_name))
if self.source_image:
generated_image = context.images.get_pil(self.generated_image.image_name)
source_image = context.images.get_pil(self.source_image.image_name)
source_image.paste(generated_image, (0, 0), mask)
image_dto = context.images.save(image=source_image)
else:
generated_image = context.images.get_pil(self.generated_image.image_name)
generated_image.putalpha(mask)
image_dto = context.images.save(image=generated_image)
# bbox = image.getbbox()
# image = image.crop(bbox)
return CanvasV2MaskAndCropOutput(
image=ImageField(image_name=image_dto.image_name),
offset_x=0,
offset_y=0,
width=image_dto.width,
height=image_dto.height,
)

View File

@@ -22,8 +22,8 @@ from invokeai.app.invocations.fields import (
from invokeai.app.invocations.model import UNetField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoraPatcher
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.stable_diffusion.diffusers_pipeline import ControlNetData, PipelineIntermediateState
from invokeai.backend.stable_diffusion.multi_diffusion_pipeline import (
MultiDiffusionPipeline,
@@ -204,11 +204,7 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
# Load the UNet model.
unet_info = context.models.load(self.unet.unet)
with (
ExitStack() as exit_stack,
unet_info as unet,
LoraPatcher.apply_lora_patches(model=unet, patches=_lora_loader(), prefix="lora_unet_"),
):
with ExitStack() as exit_stack, unet_info as unet, ModelPatcher.apply_lora_unet(unet, _lora_loader()):
assert isinstance(unet, UNet2DConditionModel)
latents = latents.to(device=unet.device, dtype=unet.dtype)
if noise is not None:

View File

@@ -88,8 +88,6 @@ class QueueItemEventBase(QueueEventBase):
item_id: int = Field(description="The ID of the queue item")
batch_id: str = Field(description="The ID of the queue batch")
origin: str | None = Field(default=None, description="The origin of the queue item")
destination: str | None = Field(default=None, description="The destination of the queue item")
class InvocationEventBase(QueueItemEventBase):
@@ -97,6 +95,8 @@ class InvocationEventBase(QueueItemEventBase):
session_id: str = Field(description="The ID of the session (aka graph execution state)")
queue_id: str = Field(description="The ID of the queue")
item_id: int = Field(description="The ID of the queue item")
batch_id: str = Field(description="The ID of the queue batch")
session_id: str = Field(description="The ID of the session (aka graph execution state)")
invocation: AnyInvocation = Field(description="The ID of the invocation")
invocation_source_id: str = Field(description="The ID of the prepared invocation's source node")
@@ -114,8 +114,6 @@ class InvocationStartedEvent(InvocationEventBase):
queue_id=queue_item.queue_id,
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -149,8 +147,6 @@ class InvocationDenoiseProgressEvent(InvocationEventBase):
queue_id=queue_item.queue_id,
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -188,8 +184,6 @@ class InvocationCompleteEvent(InvocationEventBase):
queue_id=queue_item.queue_id,
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -222,8 +216,6 @@ class InvocationErrorEvent(InvocationEventBase):
queue_id=queue_item.queue_id,
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
invocation=invocation,
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
@@ -261,8 +253,6 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
queue_id=queue_item.queue_id,
item_id=queue_item.item_id,
batch_id=queue_item.batch_id,
origin=queue_item.origin,
destination=queue_item.destination,
session_id=queue_item.session_id,
status=queue_item.status,
error_type=queue_item.error_type,
@@ -289,14 +279,12 @@ class BatchEnqueuedEvent(QueueEventBase):
description="The number of invocations initially requested to be enqueued (may be less than enqueued if queue was full)"
)
priority: int = Field(description="The priority of the batch")
origin: str | None = Field(default=None, description="The origin of the batch")
@classmethod
def build(cls, enqueue_result: EnqueueBatchResult) -> "BatchEnqueuedEvent":
return cls(
queue_id=enqueue_result.queue_id,
batch_id=enqueue_result.batch.batch_id,
origin=enqueue_result.batch.origin,
enqueued=enqueue_result.enqueued,
requested=enqueue_result.requested,
priority=enqueue_result.priority,

View File

@@ -6,7 +6,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
Batch,
BatchStatus,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByQueueIDResult,
ClearResult,
EnqueueBatchResult,
@@ -96,11 +95,6 @@ class SessionQueueBase(ABC):
"""Cancels all queue items with matching batch IDs"""
pass
@abstractmethod
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
"""Cancels all queue items with the given batch destination"""
pass
@abstractmethod
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
"""Cancels all queue items with matching queue ID"""

View File

@@ -77,14 +77,6 @@ BatchDataCollection: TypeAlias = list[list[BatchDatum]]
class Batch(BaseModel):
batch_id: str = Field(default_factory=uuid_string, description="The ID of the batch")
origin: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results.",
)
destination: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results",
)
data: Optional[BatchDataCollection] = Field(default=None, description="The batch data collection.")
graph: Graph = Field(description="The graph to initialize the session with")
workflow: Optional[WorkflowWithoutID] = Field(
@@ -203,14 +195,6 @@ class SessionQueueItemWithoutGraph(BaseModel):
status: QUEUE_ITEM_STATUS = Field(default="pending", description="The status of this queue item")
priority: int = Field(default=0, description="The priority of this queue item")
batch_id: str = Field(description="The ID of the batch associated with this queue item")
origin: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results.",
)
destination: str | None = Field(
default=None,
description="The origin of this queue item. This data is used by the frontend to determine how to handle results",
)
session_id: str = Field(
description="The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed."
)
@@ -310,8 +294,6 @@ class SessionQueueStatus(BaseModel):
class BatchStatus(BaseModel):
queue_id: str = Field(..., description="The ID of the queue")
batch_id: str = Field(..., description="The ID of the batch")
origin: str | None = Field(..., description="The origin of the batch")
destination: str | None = Field(..., description="The destination of the batch")
pending: int = Field(..., description="Number of queue items with status 'pending'")
in_progress: int = Field(..., description="Number of queue items with status 'in_progress'")
completed: int = Field(..., description="Number of queue items with status 'complete'")
@@ -346,12 +328,6 @@ class CancelByBatchIDsResult(BaseModel):
canceled: int = Field(..., description="Number of queue items canceled")
class CancelByDestinationResult(CancelByBatchIDsResult):
"""Result of canceling by a destination"""
pass
class CancelByQueueIDResult(CancelByBatchIDsResult):
"""Result of canceling by queue id"""
@@ -457,8 +433,6 @@ class SessionQueueValueToInsert(NamedTuple):
field_values: Optional[str] # field_values json
priority: int # priority
workflow: Optional[str] # workflow json
origin: str | None
destination: str | None
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
@@ -479,8 +453,6 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new
json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json)
priority, # priority
json.dumps(workflow, default=to_jsonable_python) if workflow else None, # workflow (json)
batch.origin, # origin
batch.destination, # destination
)
)
return values_to_insert

View File

@@ -10,7 +10,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
Batch,
BatchStatus,
CancelByBatchIDsResult,
CancelByDestinationResult,
CancelByQueueIDResult,
ClearResult,
EnqueueBatchResult,
@@ -128,8 +127,8 @@ class SqliteSessionQueue(SessionQueueBase):
self.__cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow)
VALUES (?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
@@ -418,7 +417,11 @@ class SqliteSessionQueue(SessionQueueBase):
)
self.__conn.commit()
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
queue_status = self.get_queue_status(queue_id=queue_id)
self.__invoker.services.events.emit_queue_item_status_changed(
current_queue_item, batch_status, queue_status
)
except Exception:
self.__conn.rollback()
raise
@@ -426,46 +429,6 @@ class SqliteSessionQueue(SessionQueueBase):
self.__lock.release()
return CancelByBatchIDsResult(canceled=count)
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
try:
current_queue_item = self.get_current(queue_id)
self.__lock.acquire()
where = """--sql
WHERE
queue_id == ?
AND destination == ?
AND status != 'canceled'
AND status != 'completed'
AND status != 'failed'
"""
params = (queue_id, destination)
self.__cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
{where};
""",
params,
)
count = self.__cursor.fetchone()[0]
self.__cursor.execute(
f"""--sql
UPDATE session_queue
SET status = 'canceled'
{where};
""",
params,
)
self.__conn.commit()
if current_queue_item is not None and current_queue_item.destination == destination:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
except Exception:
self.__conn.rollback()
raise
finally:
self.__lock.release()
return CancelByDestinationResult(canceled=count)
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
try:
current_queue_item = self.get_current(queue_id)
@@ -578,9 +541,7 @@ class SqliteSessionQueue(SessionQueueBase):
started_at,
session_id,
batch_id,
queue_id,
origin,
destination
queue_id
FROM session_queue
WHERE queue_id = ?
"""
@@ -660,7 +621,7 @@ class SqliteSessionQueue(SessionQueueBase):
self.__lock.acquire()
self.__cursor.execute(
"""--sql
SELECT status, count(*), origin, destination
SELECT status, count(*)
FROM session_queue
WHERE
queue_id = ?
@@ -672,8 +633,6 @@ class SqliteSessionQueue(SessionQueueBase):
result = cast(list[sqlite3.Row], self.__cursor.fetchall())
total = sum(row[1] for row in result)
counts: dict[str, int] = {row[0]: row[1] for row in result}
origin = result[0]["origin"] if result else None
destination = result[0]["destination"] if result else None
except Exception:
self.__conn.rollback()
raise
@@ -682,8 +641,6 @@ class SqliteSessionQueue(SessionQueueBase):
return BatchStatus(
batch_id=batch_id,
origin=origin,
destination=destination,
queue_id=queue_id,
pending=counts.get("pending", 0),
in_progress=counts.get("in_progress", 0),

View File

@@ -14,7 +14,7 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.model_records.model_records_base import UnknownModelException
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_manager.config import (
AnyModel,
AnyModelConfig,
@@ -557,24 +557,6 @@ class UtilInterface(InvocationContextInterface):
is_canceled=self.is_canceled,
)
def flux_step_callback(self, intermediate_state: PipelineIntermediateState) -> None:
"""
The step callback emits a progress event with the current step, the total number of
steps, a preview image, and some other internal metadata.
This should be called after each denoising step.
Args:
intermediate_state: The intermediate state of the diffusion pipeline.
"""
flux_step_callback(
context_data=self._data,
intermediate_state=intermediate_state,
events=self._services.events,
is_canceled=self.is_canceled,
)
class InvocationContext:
"""Provides access to various services and data for the current invocation.

View File

@@ -17,7 +17,6 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_11 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_12 import build_migration_12
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_13 import build_migration_13
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -52,7 +51,6 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_12(app_config=config))
migrator.register_migration(build_migration_13())
migrator.register_migration(build_migration_14())
migrator.register_migration(build_migration_15())
migrator.run_migrations()
return db

View File

@@ -1,34 +0,0 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration15Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
self._add_origin_col(cursor)
def _add_origin_col(self, cursor: sqlite3.Cursor) -> None:
"""
- Adds `origin` column to the session queue table.
- Adds `destination` column to the session queue table.
"""
cursor.execute("ALTER TABLE session_queue ADD COLUMN origin TEXT;")
cursor.execute("ALTER TABLE session_queue ADD COLUMN destination TEXT;")
def build_migration_15() -> Migration:
"""
Build the migration from database version 14 to 15.
This migration does the following:
- Adds `origin` column to the session queue table.
- Adds `destination` column to the session queue table.
"""
migration_15 = Migration(
from_version=14,
to_version=15,
callback=Migration15Callback(),
)
return migration_15

View File

@@ -38,25 +38,6 @@ SD1_5_LATENT_RGB_FACTORS = [
[-0.1307, -0.1874, -0.7445], # L4
]
FLUX_LATENT_RGB_FACTORS = [
[-0.0412, 0.0149, 0.0521],
[0.0056, 0.0291, 0.0768],
[0.0342, -0.0681, -0.0427],
[-0.0258, 0.0092, 0.0463],
[0.0863, 0.0784, 0.0547],
[-0.0017, 0.0402, 0.0158],
[0.0501, 0.1058, 0.1152],
[-0.0209, -0.0218, -0.0329],
[-0.0314, 0.0083, 0.0896],
[0.0851, 0.0665, -0.0472],
[-0.0534, 0.0238, -0.0024],
[0.0452, -0.0026, 0.0048],
[0.0892, 0.0831, 0.0881],
[-0.1117, -0.0304, -0.0789],
[0.0027, -0.0479, -0.0043],
[-0.1146, -0.0827, -0.0598],
]
def sample_to_lowres_estimated_image(
samples: torch.Tensor, latent_rgb_factors: torch.Tensor, smooth_matrix: Optional[torch.Tensor] = None
@@ -113,32 +94,3 @@ def stable_diffusion_step_callback(
intermediate_state,
ProgressImage(dataURL=dataURL, width=width, height=height),
)
def flux_step_callback(
context_data: "InvocationContextData",
intermediate_state: PipelineIntermediateState,
events: "EventServiceBase",
is_canceled: Callable[[], bool],
) -> None:
if is_canceled():
raise CanceledException
sample = intermediate_state.latents
latent_rgb_factors = torch.tensor(FLUX_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
latent_image_perm = sample.permute(1, 2, 0).to(dtype=sample.dtype, device=sample.device)
latent_image = latent_image_perm @ latent_rgb_factors
latents_ubyte = (
((latent_image + 1) / 2).clamp(0, 1).mul(0xFF) # change scale from -1..1 to 0..1 # to 0..255
).to(device="cpu", dtype=torch.uint8)
image = Image.fromarray(latents_ubyte.cpu().numpy())
(width, height) = image.size
width *= 8
height *= 8
dataURL = image_to_dataURL(image, image_format="JPEG")
events.emit_invocation_denoise_progress(
context_data.queue_item,
context_data.invocation,
intermediate_state,
ProgressImage(dataURL=dataURL, width=width, height=height),
)

View File

@@ -5,7 +5,6 @@ from tqdm import tqdm
from invokeai.backend.flux.inpaint_extension import InpaintExtension
from invokeai.backend.flux.model import Flux
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
def denoise(
@@ -18,11 +17,10 @@ def denoise(
vec: torch.Tensor,
# sampling parameters
timesteps: list[float],
step_callback: Callable[[PipelineIntermediateState], None],
step_callback: Callable[[], None],
guidance: float,
inpaint_extension: InpaintExtension | None,
):
step = 0
# guidance_vec is ignored for schnell.
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
@@ -36,21 +34,12 @@ def denoise(
timesteps=t_vec,
guidance=guidance_vec,
)
preview_img = img - t_curr * pred
img = img + (t_prev - t_curr) * pred
if inpaint_extension is not None:
img = inpaint_extension.merge_intermediate_latents_with_init_latents(img, t_prev)
step_callback(
PipelineIntermediateState(
step=step,
order=1,
total_steps=len(timesteps),
timestep=int(t_curr),
latents=preview_img,
),
)
step += 1
step_callback()
return img

View File

@@ -1,210 +0,0 @@
from typing import Dict
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Tensor]) -> bool:
"""Checks if the provided state dict is likely in the Diffusers FLUX LoRA format.
This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision. (A
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
"""
# First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format).
all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys())
# Next, check that this is likely a FLUX model by spot-checking a few keys.
expected_keys = [
"transformer.single_transformer_blocks.0.attn.to_q.lora_A.weight",
"transformer.single_transformer_blocks.0.attn.to_q.lora_B.weight",
"transformer.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
"transformer.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
]
all_expected_keys_present = all(k in state_dict for k in expected_keys)
return all_keys_in_peft_format and all_expected_keys_present
# TODO(ryand): What alpha should we use? 1.0? Rank of the LoRA?
def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor], alpha: float = 1.0) -> LoRAModelRaw: # pyright: ignore[reportRedeclaration] (state_dict is intentionally re-declared)
"""Loads a state dict in the Diffusers FLUX LoRA format into a LoRAModelRaw object.
This function is based on:
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
"""
# Group keys by layer.
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_by_layer(state_dict)
# Remove the "transformer." prefix from all keys.
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
# Constants for FLUX.1
num_double_layers = 19
num_single_layers = 38
# inner_dim = 3072
# mlp_ratio = 4.0
layers: dict[str, AnyLoRALayer] = {}
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
if src_key in grouped_state_dict:
src_layer_dict = grouped_state_dict.pop(src_key)
layers[dst_key] = LoRALayer(
values={
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
"alpha": torch.tensor(alpha),
},
)
assert len(src_layer_dict) == 0
def add_qkv_lora_layer_if_present(src_keys: list[str], dst_qkv_key: str) -> None:
"""Handle the Q, K, V matrices for a transformer block. We need special handling because the diffusers format
stores them in separate matrices, whereas the BFL format used internally by InvokeAI concatenates them.
"""
# We expect that either all src keys are present or none of them are. Verify this.
keys_present = [key in grouped_state_dict for key in src_keys]
assert all(keys_present) or not any(keys_present)
# If none of the keys are present, return early.
if not any(keys_present):
return
src_layer_dicts = [grouped_state_dict.pop(key) for key in src_keys]
sub_layers: list[LoRALayerBase] = []
for src_layer_dict in src_layer_dicts:
sub_layers.append(
LoRALayer(
values={
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
"alpha": torch.tensor(alpha),
},
)
)
assert len(src_layer_dict) == 0
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers, concat_axis=0)
# time_text_embed.timestep_embedder -> time_in.
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_2", "time_in.out_layer")
# time_text_embed.text_embedder -> vector_in.
add_lora_layer_if_present("time_text_embed.text_embedder.linear_1", "vector_in.in_layer")
add_lora_layer_if_present("time_text_embed.text_embedder.linear_2", "vector_in.out_layer")
# time_text_embed.guidance_embedder -> guidance_in.
add_lora_layer_if_present("time_text_embed.guidance_embedder.linear_1", "guidance_in")
add_lora_layer_if_present("time_text_embed.guidance_embedder.linear_2", "guidance_in")
# context_embedder -> txt_in.
add_lora_layer_if_present("context_embedder", "txt_in")
# x_embedder -> img_in.
add_lora_layer_if_present("x_embedder", "img_in")
# Double transformer blocks.
for i in range(num_double_layers):
# norms.
add_lora_layer_if_present(f"transformer_blocks.{i}.norm1.linear", f"double_blocks.{i}.img_mod.lin")
add_lora_layer_if_present(f"transformer_blocks.{i}.norm1_context.linear", f"double_blocks.{i}.txt_mod.lin")
# Q, K, V
add_qkv_lora_layer_if_present(
[
f"transformer_blocks.{i}.attn.to_q",
f"transformer_blocks.{i}.attn.to_k",
f"transformer_blocks.{i}.attn.to_v",
],
f"double_blocks.{i}.img_attn.qkv",
)
add_qkv_lora_layer_if_present(
[
f"transformer_blocks.{i}.attn.add_q_proj",
f"transformer_blocks.{i}.attn.add_k_proj",
f"transformer_blocks.{i}.attn.add_v_proj",
],
f"double_blocks.{i}.txt_attn.qkv",
)
# ff img_mlp
add_lora_layer_if_present(
f"transformer_blocks.{i}.ff.net.0.proj",
f"double_blocks.{i}.img_mlp.0",
)
add_lora_layer_if_present(
f"transformer_blocks.{i}.ff.net.2",
f"double_blocks.{i}.img_mlp.2",
)
# ff txt_mlp
add_lora_layer_if_present(
f"transformer_blocks.{i}.ff_context.net.0.proj",
f"double_blocks.{i}.txt_mlp.0",
)
add_lora_layer_if_present(
f"transformer_blocks.{i}.ff_context.net.2",
f"double_blocks.{i}.txt_mlp.2",
)
# output projections.
add_lora_layer_if_present(
f"transformer_blocks.{i}.attn.to_out.0",
f"double_blocks.{i}.img_attn.proj",
)
add_lora_layer_if_present(
f"transformer_blocks.{i}.attn.to_add_out",
f"double_blocks.{i}.txt_attn.proj",
)
# Single transformer blocks.
for i in range(num_single_layers):
# norms
add_lora_layer_if_present(
f"single_transformer_blocks.{i}.norm.linear",
f"single_blocks.{i}.modulation.lin",
)
# Q, K, V, mlp
add_qkv_lora_layer_if_present(
[
f"single_transformer_blocks.{i}.attn.to_q",
f"single_transformer_blocks.{i}.attn.to_k",
f"single_transformer_blocks.{i}.attn.to_v",
f"single_transformer_blocks.{i}.proj_mlp",
],
f"single_blocks.{i}.linear1",
)
# Output projections.
add_lora_layer_if_present(
f"single_transformer_blocks.{i}.proj_out",
f"single_blocks.{i}.linear2",
)
# Final layer.
add_lora_layer_if_present("proj_out", "final_layer.linear")
# Assert that all keys were processed.
assert len(grouped_state_dict) == 0
return LoRAModelRaw(layers=layers)
def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:
"""Groups the keys in the state dict by layer."""
layer_dict: dict[str, dict[str, torch.Tensor]] = {}
for key in state_dict:
# Split the 'lora_A.weight' or 'lora_B.weight' suffix from the layer name.
parts = key.rsplit(".", maxsplit=2)
layer_name = parts[0]
key_name = ".".join(parts[1:])
if layer_name not in layer_dict:
layer_dict[layer_name] = {}
layer_dict[layer_name][key_name] = state_dict[key]
return layer_dict

View File

@@ -1,11 +0,0 @@
from typing import Union
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.lora.layers.full_layer import FullLayer
from invokeai.backend.lora.layers.ia3_layer import IA3Layer
from invokeai.backend.lora.layers.loha_layer import LoHALayer
from invokeai.backend.lora.layers.lokr_layer import LoKRLayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.layers.norm_layer import NormLayer
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer, NormLayer, ConcatenatedLoRALayer]

View File

@@ -1,46 +0,0 @@
from typing import List, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
class ConcatenatedLoRALayer(LoRALayerBase):
"""A LoRA layer that is composed of multiple LoRA layers concatenated along a specified axis.
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
stored as separate tensors. This class enables diffusers LoRA layers to be used in BFL FLUX models.
"""
def __init__(self, lora_layers: List[LoRALayerBase], concat_axis: int = 0):
# Note: We pass values={} to the base class, because the values are handled by the individual LoRA layers.
super().__init__(values={})
self._lora_layers = lora_layers
self._concat_axis = concat_axis
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
# TODO(ryand): Currently, we pass orig_weight=None to the sub-layers. If we want to support sub-layers that
# require this value, we will need to implement chunking of the original weight tensor here.
layer_weights = [lora_layer.get_weight(None) for lora_layer in self._lora_layers] # pyright: ignore[reportArgumentType]
return torch.cat(layer_weights, dim=self._concat_axis)
def get_bias(self, orig_bias: torch.Tensor) -> Optional[torch.Tensor]:
# TODO(ryand): Currently, we pass orig_bias=None to the sub-layers. If we want to support sub-layers that
# require this value, we will need to implement chunking of the original bias tensor here.
layer_biases = [lora_layer.get_bias(None) for lora_layer in self._lora_layers] # pyright: ignore[reportArgumentType]
layer_bias_is_none = [layer_bias is None for layer_bias in layer_biases]
if any(layer_bias_is_none):
assert all(layer_bias_is_none)
return None
# Ignore the type error, because we have just verified that all layer biases are non-None.
return torch.cat(layer_biases, dim=self._concat_axis)
def calc_size(self) -> int:
return sum(lora_layer.calc_size() for lora_layer in self._lora_layers)
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
for lora_layer in self._lora_layers:
lora_layer.to(device=device, dtype=dtype)

View File

@@ -1,33 +0,0 @@
from typing import Dict
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.full_layer import FullLayer
from invokeai.backend.lora.layers.ia3_layer import IA3Layer
from invokeai.backend.lora.layers.loha_layer import LoHALayer
from invokeai.backend.lora.layers.lokr_layer import LoKRLayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.layers.norm_layer import NormLayer
def any_lora_layer_from_state_dict(state_dict: Dict[str, torch.Tensor]) -> AnyLoRALayer:
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules
if "lora_up.weight" in state_dict:
# LoRA a.k.a LoCon
return LoRALayer(state_dict)
elif "hada_w1_a" in state_dict:
return LoHALayer(state_dict)
elif "lokr_w1" in state_dict or "lokr_w1_a" in state_dict:
return LoKRLayer(state_dict)
elif "diff" in state_dict:
# Full a.k.a Diff
return FullLayer(state_dict)
elif "on_input" in state_dict:
return IA3Layer(state_dict)
elif "w_norm" in state_dict:
return NormLayer(state_dict)
else:
raise ValueError(f"Unsupported lora format: {state_dict.keys()}")

View File

@@ -32,9 +32,6 @@ from invokeai.backend.model_manager.config import (
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.util.model_util import (
convert_bundle_to_flux_transformer_checkpoint,
)
from invokeai.backend.util.silence_warnings import SilenceWarnings
try:
@@ -193,13 +190,6 @@ class FluxCheckpointModel(ModelLoader):
with SilenceWarnings():
model = Flux(params[config.config_path])
sd = load_file(model_path)
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
new_sd_size = sum([ten.nelement() * torch.bfloat16.itemsize for ten in sd.values()])
self._ram_cache.make_room(new_sd_size)
for k in sd.keys():
# We need to cast to bfloat16 due to it being the only currently supported dtype for inference
sd[k] = sd[k].to(torch.bfloat16)
model.load_state_dict(sd, assign=True)
return model
@@ -240,7 +230,5 @@ class FluxBnbQuantizednf4bCheckpointModel(ModelLoader):
model = Flux(params[config.config_path])
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16)
sd = load_file(model_path)
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
model.load_state_dict(sd, assign=True)
return model

View File

@@ -9,14 +9,6 @@ import torch
from safetensors.torch import load_file
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
lora_model_from_flux_diffusers_state_dict,
)
from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import (
lora_model_from_flux_kohya_state_dict,
)
from invokeai.backend.lora.conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
from invokeai.backend.lora.conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
from invokeai.backend.model_manager import (
AnyModel,
AnyModelConfig,
@@ -28,6 +20,11 @@ from invokeai.backend.model_manager import (
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.peft.conversions.flux_kohya_lora_conversion_utils import (
lora_model_from_flux_kohya_state_dict,
)
from invokeai.backend.peft.conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
from invokeai.backend.peft.conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)
@@ -67,12 +64,7 @@ class LoRALoader(ModelLoader):
state_dict = convert_sdxl_keys_to_diffusers_format(state_dict)
model = lora_model_from_sd_state_dict(state_dict=state_dict)
elif self._model_base == BaseModelType.Flux:
if config.format == ModelFormat.Diffusers:
model = lora_model_from_flux_diffusers_state_dict(state_dict=state_dict)
elif config.format == ModelFormat.LyCORIS:
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
else:
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:
# Currently, we don't apply any conversions for SD1 and SD2 LoRA models.
model = lora_model_from_sd_state_dict(state_dict=state_dict)

View File

@@ -15,9 +15,9 @@ from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import D
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.model_manager.config import AnyModel
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.textual_inversion import TextualInversionModelRaw

View File

@@ -10,10 +10,6 @@ from picklescan.scanner import scan_file_path
import invokeai.backend.util.logging as logger
from invokeai.app.util.misc import uuid_string
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
is_state_dict_likely_in_flux_diffusers_format,
)
from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import is_state_dict_likely_in_flux_kohya_format
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.model_manager.config import (
AnyModelConfig,
@@ -30,6 +26,7 @@ from invokeai.backend.model_manager.config import (
SchedulerPredictionType,
)
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
from invokeai.backend.peft.conversions.flux_kohya_lora_conversion_utils import is_state_dict_likely_in_flux_kohya_format
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.util.silence_warnings import SilenceWarnings
@@ -112,8 +109,6 @@ class ModelProbe(object):
"CLIPVisionModelWithProjection": ModelType.CLIPVision,
"T2IAdapter": ModelType.T2IAdapter,
"CLIPModel": ModelType.CLIPEmbed,
"CLIPTextModel": ModelType.CLIPEmbed,
"T5EncoderModel": ModelType.T5Encoder,
}
@classmethod
@@ -230,27 +225,14 @@ class ModelProbe(object):
ckpt = ckpt.get("state_dict", ckpt)
for key in [str(k) for k in ckpt.keys()]:
if key.startswith(
(
"cond_stage_model.",
"first_stage_model.",
"model.diffusion_model.",
# FLUX models in the official BFL format contain keys with the "double_blocks." prefix.
"double_blocks.",
# Some FLUX checkpoint files contain transformer keys prefixed with "model.diffusion_model".
# This prefix is typically used to distinguish between multiple models bundled in a single file.
"model.diffusion_model.double_blocks.",
)
):
if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.", "double_blocks.")):
# Keys starting with double_blocks are associated with Flux models
return ModelType.Main
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
return ModelType.VAE
elif key.startswith(("lora_te_", "lora_unet_")):
return ModelType.LoRA
# "lora_A.weight" and "lora_B.weight" are associated with models in PEFT format. We don't support all PEFT
# LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models.
elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight", "lora_A.weight", "lora_B.weight")):
elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight")):
return ModelType.LoRA
elif key.startswith(("controlnet", "control_model", "input_blocks")):
return ModelType.ControlNet
@@ -302,16 +284,9 @@ class ModelProbe(object):
if (folder_path / "image_encoder.txt").exists():
return ModelType.IPAdapter
config_path = None
for p in [
folder_path / "model_index.json", # pipeline
folder_path / "config.json", # most diffusers
folder_path / "text_encoder_2" / "config.json", # T5 text encoder
folder_path / "text_encoder" / "config.json", # T5 CLIP
]:
if p.exists():
config_path = p
break
i = folder_path / "model_index.json"
c = folder_path / "config.json"
config_path = i if i.exists() else c if c.exists() else None
if config_path:
with open(config_path, "r") as file:
@@ -354,10 +329,7 @@ class ModelProbe(object):
# TODO: Decide between dev/schnell
checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
state_dict = checkpoint.get("state_dict") or checkpoint
if (
"guidance_in.out_layer.weight" in state_dict
or "model.diffusion_model.guidance_in.out_layer.weight" in state_dict
):
if "guidance_in.out_layer.weight" in state_dict:
# For flux, this is a key in invokeai.backend.flux.util.params
# Due to model type and format being the descriminator for model configs this
# is used rather than attempting to support flux with separate model types and format
@@ -365,7 +337,7 @@ class ModelProbe(object):
config_file = "flux-dev"
else:
# For flux, this is a key in invokeai.backend.flux.util.params
# Due to model type and format being the discriminator for model configs this
# Due to model type and format being the descriminator for model configs this
# is used rather than attempting to support flux with separate model types and format
# If changed in the future, please fix me
config_file = "flux-schnell"
@@ -472,10 +444,7 @@ class CheckpointProbeBase(ProbeBase):
def get_format(self) -> ModelFormat:
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
if (
"double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict
or "model.diffusion_model.double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict
):
if "double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict:
return ModelFormat.BnbQuantizednf4b
return ModelFormat("checkpoint")
@@ -502,10 +471,7 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
def get_base_type(self) -> BaseModelType:
checkpoint = self.checkpoint
state_dict = self.checkpoint.get("state_dict") or checkpoint
if (
"double_blocks.0.img_attn.norm.key_norm.scale" in state_dict
or "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in state_dict
):
if "double_blocks.0.img_attn.norm.key_norm.scale" in state_dict:
return BaseModelType.Flux
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in state_dict and state_dict[key_name].shape[-1] == 768:
@@ -560,17 +526,10 @@ class LoRACheckpointProbe(CheckpointProbeBase):
"""Class for LoRA checkpoints."""
def get_format(self) -> ModelFormat:
if is_state_dict_likely_in_flux_diffusers_format(self.checkpoint):
# TODO(ryand): This is an unusual case. In other places throughout the codebase, we treat
# ModelFormat.Diffusers as meaning that the model is in a directory. In this case, the model is a single
# file, but the weight keys are in the diffusers format.
return ModelFormat.Diffusers
return ModelFormat.LyCORIS
return ModelFormat("lycoris")
def get_base_type(self) -> BaseModelType:
if is_state_dict_likely_in_flux_kohya_format(self.checkpoint) or is_state_dict_likely_in_flux_diffusers_format(
self.checkpoint
):
if is_state_dict_likely_in_flux_kohya_format(self.checkpoint):
return BaseModelType.Flux
# If we've gotten here, we assume that the model is a Stable Diffusion model.
@@ -791,27 +750,8 @@ class TextualInversionFolderProbe(FolderProbeBase):
class T5EncoderFolderProbe(FolderProbeBase):
def get_base_type(self) -> BaseModelType:
return BaseModelType.Any
def get_format(self) -> ModelFormat:
path = self.model_path / "text_encoder_2"
if (path / "model.safetensors.index.json").exists():
return ModelFormat.T5Encoder
files = list(path.glob("*.safetensors"))
if len(files) == 0:
raise InvalidModelConfigException(f"{self.model_path.as_posix()}: no .safetensors files found")
# shortcut: look for the quantization in the name
if any(x for x in files if "llm_int8" in x.as_posix()):
return ModelFormat.BnbQuantizedLlmInt8b
# more reliable path: probe contents for a 'SCB' key
ckpt = read_checkpoint_meta(files[0], scan=True)
if any("SCB" in x for x in ckpt.keys()):
return ModelFormat.BnbQuantizedLlmInt8b
raise InvalidModelConfigException(f"{self.model_path.as_posix()}: unknown model format")
return ModelFormat.T5Encoder
class ONNXFolderProbe(PipelineFolderProbe):

View File

@@ -133,29 +133,3 @@ def lora_token_vector_length(checkpoint: Dict[str, torch.Tensor]) -> Optional[in
break
return lora_token_vector_length
def convert_bundle_to_flux_transformer_checkpoint(
transformer_state_dict: dict[str, torch.Tensor],
) -> dict[str, torch.Tensor]:
original_state_dict: dict[str, torch.Tensor] = {}
keys_to_remove: list[str] = []
for k, v in transformer_state_dict.items():
if not k.startswith("model.diffusion_model"):
keys_to_remove.append(k) # This can be removed in the future if we only want to delete transformer keys
continue
if k.endswith("scale"):
# Scale math must be done at bfloat16 due to our current flux model
# support limitations at inference time
v = v.to(dtype=torch.bfloat16)
new_key = k.replace("model.diffusion_model.", "")
original_state_dict[new_key] = v
keys_to_remove.append(k)
# Remove processed keys from the original dictionary, leaving others in case
# other model state dicts need to be pulled
for k in keys_to_remove:
del transformer_state_dict[k]
return original_state_dict

View File

@@ -5,18 +5,32 @@ from __future__ import annotations
import pickle
from contextlib import contextmanager
from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union
from typing import Any, Dict, Generator, Iterator, List, Optional, Tuple, Type, Union
import numpy as np
import torch
from diffusers import UNet2DConditionModel
from diffusers import OnnxRuntimeModel, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.model_manager import AnyModel
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.stable_diffusion.extensions.lora import LoRAExt
from invokeai.backend.textual_inversion import TextualInversionManager, TextualInversionModelRaw
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
"""
loras = [
(lora_model1, 0.7),
(lora_model2, 0.4),
]
with LoRAHelper.apply_lora_unet(unet, loras):
# unet with applied loras
# unmodified unet
"""
class ModelPatcher:
@@ -40,6 +54,95 @@ class ModelPatcher:
finally:
unet.set_attn_processor(unet_orig_processors)
@staticmethod
def _resolve_lora_key(model: torch.nn.Module, lora_key: str, prefix: str) -> Tuple[str, torch.nn.Module]:
assert "." not in lora_key
if not lora_key.startswith(prefix):
raise Exception(f"lora_key with invalid prefix: {lora_key}, {prefix}")
module = model
module_key = ""
key_parts = lora_key[len(prefix) :].split("_")
submodule_name = key_parts.pop(0)
while len(key_parts) > 0:
try:
module = module.get_submodule(submodule_name)
module_key += "." + submodule_name
submodule_name = key_parts.pop(0)
except Exception:
submodule_name += "_" + key_parts.pop(0)
module = module.get_submodule(submodule_name)
module_key = (module_key + "." + submodule_name).lstrip(".")
return (module_key, module)
@classmethod
@contextmanager
def apply_lora_unet(
cls,
unet: UNet2DConditionModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
) -> Generator[None, None, None]:
with cls.apply_lora(
unet,
loras=loras,
prefix="lora_unet_",
cached_weights=cached_weights,
):
yield
@classmethod
@contextmanager
def apply_lora_text_encoder(
cls,
text_encoder: CLIPTextModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
) -> Generator[None, None, None]:
with cls.apply_lora(text_encoder, loras=loras, prefix="lora_te_", cached_weights=cached_weights):
yield
@classmethod
@contextmanager
def apply_lora(
cls,
model: AnyModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
prefix: str,
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
) -> Generator[None, None, None]:
"""
Apply one or more LoRAs to a model.
:param model: The model to patch.
:param loras: An iterator that returns the LoRA to patch in and its patch weight.
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
:cached_weights: Read-only copy of the model's state dict in CPU, for unpatching purposes.
"""
original_weights = OriginalWeightsStorage(cached_weights)
try:
for lora_model, lora_weight in loras:
LoRAExt.patch_model(
model=model,
prefix=prefix,
lora=lora_model,
lora_weight=lora_weight,
original_weights=original_weights,
)
del lora_model
yield
finally:
with torch.no_grad():
for param_key, weight in original_weights.get_changed_weights():
model.get_parameter(param_key).copy_(weight)
@classmethod
@contextmanager
def apply_ti(
@@ -179,6 +282,26 @@ class ModelPatcher:
class ONNXModelPatcher:
@classmethod
@contextmanager
def apply_lora_unet(
cls,
unet: OnnxRuntimeModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
) -> None:
with cls.apply_lora(unet, loras, "lora_unet_"):
yield
@classmethod
@contextmanager
def apply_lora_text_encoder(
cls,
text_encoder: OnnxRuntimeModel,
loras: List[Tuple[LoRAModelRaw, float]],
) -> None:
with cls.apply_lora(text_encoder, loras, "lora_te_"):
yield
# based on
# https://github.com/ssube/onnx-web/blob/ca2e436f0623e18b4cfe8a0363fcfcf10508acf7/api/onnx_web/convert/diffusion/lora.py#L323
@classmethod

View File

@@ -3,9 +3,9 @@ from typing import Any, Dict, TypeVar
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.peft.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.peft.layers.utils import peft_layer_from_state_dict
from invokeai.backend.peft.lora import LoRAModelRaw
# A regex pattern that matches all of the keys in the Kohya FLUX LoRA format.
# Example keys:
@@ -23,7 +23,10 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo
This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
"""
return all(re.match(FLUX_KOHYA_KEY_REGEX, k) for k in state_dict.keys())
for k in state_dict.keys():
if not re.match(FLUX_KOHYA_KEY_REGEX, k):
return False
return True
def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
@@ -41,7 +44,8 @@ def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -
# Create LoRA layers.
layers: dict[str, AnyLoRALayer] = {}
for layer_key, layer_state_dict in grouped_state_dict.items():
layers[layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
layer = peft_layer_from_state_dict(layer_key, layer_state_dict)
layers[layer_key] = layer
# Create and return the LoRAModelRaw.
return LoRAModelRaw(layers=layers)

View File

@@ -2,9 +2,9 @@ from typing import Dict
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.peft.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.peft.layers.utils import peft_layer_from_state_dict
from invokeai.backend.peft.lora import LoRAModelRaw
def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
@@ -12,7 +12,8 @@ def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAMo
layers: dict[str, AnyLoRALayer] = {}
for layer_key, values in grouped_state_dict.items():
layers[layer_key] = any_lora_layer_from_state_dict(values)
layer = peft_layer_from_state_dict(layer_key, values)
layers[layer_key] = layer
return LoRAModelRaw(layers=layers)

View File

@@ -0,0 +1,10 @@
from typing import Union
from invokeai.backend.peft.layers.full_layer import FullLayer
from invokeai.backend.peft.layers.ia3_layer import IA3Layer
from invokeai.backend.peft.layers.loha_layer import LoHALayer
from invokeai.backend.peft.layers.lokr_layer import LoKRLayer
from invokeai.backend.peft.layers.lora_layer import LoRALayer
from invokeai.backend.peft.layers.norm_layer import NormLayer
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer, NormLayer]

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.peft.layers.lora_layer_base import LoRALayerBase
class FullLayer(LoRALayerBase):
@@ -12,9 +12,10 @@ class FullLayer(LoRALayerBase):
def __init__(
self,
layer_key: str,
values: Dict[str, torch.Tensor],
):
super().__init__(values)
super().__init__(layer_key, values)
self.weight = values["diff"]
self.bias = values.get("diff_b", None)

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.peft.layers.lora_layer_base import LoRALayerBase
class IA3Layer(LoRALayerBase):
@@ -11,9 +11,10 @@ class IA3Layer(LoRALayerBase):
def __init__(
self,
layer_key: str,
values: Dict[str, torch.Tensor],
):
super().__init__(values)
super().__init__(layer_key, values)
self.weight = values["weight"]
self.on_input = values["on_input"]

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.peft.layers.lora_layer_base import LoRALayerBase
class LoHALayer(LoRALayerBase):
@@ -13,8 +13,8 @@ class LoHALayer(LoRALayerBase):
# t1: Optional[torch.Tensor] = None
# t2: Optional[torch.Tensor] = None
def __init__(self, values: Dict[str, torch.Tensor]):
super().__init__(values)
def __init__(self, layer_key: str, values: Dict[str, torch.Tensor]):
super().__init__(layer_key, values)
self.w1_a = values["hada_w1_a"]
self.w1_b = values["hada_w1_b"]

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.peft.layers.lora_layer_base import LoRALayerBase
class LoKRLayer(LoRALayerBase):
@@ -16,9 +16,10 @@ class LoKRLayer(LoRALayerBase):
def __init__(
self,
layer_key: str,
values: Dict[str, torch.Tensor],
):
super().__init__(values)
super().__init__(layer_key, values)
self.w1 = values.get("lokr_w1", None)
if self.w1 is None:

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.peft.layers.lora_layer_base import LoRALayerBase
# TODO: find and debug lora/locon with bias
@@ -13,9 +13,10 @@ class LoRALayer(LoRALayerBase):
def __init__(
self,
layer_key: str,
values: Dict[str, torch.Tensor],
):
super().__init__(values)
super().__init__(layer_key, values)
self.up = values["lora_up.weight"]
self.down = values["lora_down.weight"]

View File

@@ -9,6 +9,7 @@ class LoRALayerBase:
# rank: Optional[int]
# alpha: Optional[float]
# bias: Optional[torch.Tensor]
# layer_key: str
# @property
# def scale(self):
@@ -16,6 +17,7 @@ class LoRALayerBase:
def __init__(
self,
layer_key: str,
values: Dict[str, torch.Tensor],
):
if "alpha" in values:
@@ -34,6 +36,7 @@ class LoRALayerBase:
self.bias = None
self.rank = None # set in layer implementation
self.layer_key = layer_key
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
raise NotImplementedError()

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.peft.layers.lora_layer_base import LoRALayerBase
class NormLayer(LoRALayerBase):
@@ -12,9 +12,10 @@ class NormLayer(LoRALayerBase):
def __init__(
self,
layer_key: str,
values: Dict[str, torch.Tensor],
):
super().__init__(values)
super().__init__(layer_key, values)
self.weight = values["w_norm"]
self.bias = values.get("b_norm", None)

View File

@@ -0,0 +1,33 @@
from typing import Dict
import torch
from invokeai.backend.peft.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.peft.layers.full_layer import FullLayer
from invokeai.backend.peft.layers.ia3_layer import IA3Layer
from invokeai.backend.peft.layers.loha_layer import LoHALayer
from invokeai.backend.peft.layers.lokr_layer import LoKRLayer
from invokeai.backend.peft.layers.lora_layer import LoRALayer
from invokeai.backend.peft.layers.norm_layer import NormLayer
def peft_layer_from_state_dict(layer_key: str, state_dict: Dict[str, torch.Tensor]) -> AnyLoRALayer:
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules
if "lora_up.weight" in state_dict:
# LoRA a.k.a LoCon
return LoRALayer(layer_key, state_dict)
elif "hada_w1_a" in state_dict:
return LoHALayer(layer_key, state_dict)
elif "lokr_w1" in state_dict or "lokr_w1_a" in state_dict:
return LoKRLayer(layer_key, state_dict)
elif "diff" in state_dict:
# Full a.k.a Diff
return FullLayer(layer_key, state_dict)
elif "on_input" in state_dict:
return IA3Layer(layer_key, state_dict)
elif "w_norm" in state_dict:
return NormLayer(layer_key, state_dict)
else:
raise ValueError(f"Unsupported lora format: {state_dict.keys()}")

View File

@@ -3,7 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.peft.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.raw_model import RawModel

View File

@@ -1,51 +1,52 @@
from contextlib import contextmanager
from typing import Dict, Iterable, Optional, Tuple
from typing import Dict, Iterator, Optional, Tuple
import torch
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
class LoraPatcher:
@staticmethod
class PeftPatcher:
@classmethod
@torch.no_grad()
@contextmanager
def apply_lora_patches(
def apply_peft_patches(
cls,
model: torch.nn.Module,
patches: Iterable[Tuple[LoRAModelRaw, float]],
patches: Iterator[Tuple[LoRAModelRaw, float]],
prefix: str,
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
):
"""Apply one or more LoRA patches to a model within a context manager.
"""Apply one or more PEFT patches to a model.
:param model: The model to patch.
:param loras: An iterator that returns tuples of LoRA patches and associated weights. An iterator is used so
that the LoRA patches do not need to be loaded into memory all at once.
:param loras: An iterator that returns tuples of PEFT patches and associated weights. An iterator is used so
that the PEFT patches do not need to be loaded into memory all at once.
:param prefix: The keys in the patches will be filtered to only include weights with this prefix.
:cached_weights: Read-only copy of the model's state dict in CPU, for efficient unpatching purposes.
"""
original_weights = OriginalWeightsStorage(cached_weights)
try:
for patch, patch_weight in patches:
LoraPatcher.apply_lora_patch(
cls._apply_peft_patch(
model=model,
prefix=prefix,
patch=patch,
patch_weight=patch_weight,
original_weights=original_weights,
)
del patch
yield
finally:
for param_key, weight in original_weights.get_changed_weights():
model.get_parameter(param_key).copy_(weight)
@staticmethod
@classmethod
@torch.no_grad()
def apply_lora_patch(
def _apply_peft_patch(
cls,
model: torch.nn.Module,
prefix: str,
patch: LoRAModelRaw,
@@ -53,7 +54,7 @@ class LoraPatcher:
original_weights: OriginalWeightsStorage,
):
"""
Apply a single LoRA patch to a model.
Apply one a LoRA to a model.
:param model: The model to patch.
:param patch: LoRA model to patch in.
:param patch_weight: LoRA patch weight.
@@ -64,21 +65,11 @@ class LoraPatcher:
if patch_weight == 0:
return
# If the layer keys contain a dot, then they are not flattened, and can be directly used to access model
# submodules. If the layer keys do not contain a dot, then they are flattened, meaning that all '.' have been
# replaced with '_'. Non-flattened keys are preferred, because they allow submodules to be accessed directly
# without searching, but some legacy code still uses flattened keys.
layer_keys_are_flattened = "." not in next(iter(patch.layers.keys()))
prefix_len = len(prefix)
for layer_key, layer in patch.layers.items():
if not layer_key.startswith(prefix):
continue
module_key, module = LoraPatcher._get_submodule(
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
)
module = model.get_submodule(layer_key)
# All of the LoRA weight calculations will be done on the same device as the module weight.
# (Performance will be best if this is a CUDA device.)
@@ -96,7 +87,7 @@ class LoraPatcher:
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
for param_name, lora_param_weight in layer.get_parameters(module).items():
param_key = module_key + "." + param_name
param_key = layer_key + "." + param_name
module_param = module.get_parameter(param_name)
# Save original weight
@@ -109,40 +100,3 @@ class LoraPatcher:
module_param += lora_param_weight.to(dtype=dtype)
layer.to(device=TorchDevice.CPU_DEVICE)
@staticmethod
def _get_submodule(
model: torch.nn.Module, layer_key: str, layer_key_is_flattened: bool
) -> tuple[str, torch.nn.Module]:
"""Get the submodule corresponding to the given layer key.
:param model: The model to search.
:param layer_key: The layer key to search for.
:param layer_key_is_flattened: Whether the layer key is flattened. If flattened, then all '.' have been replaced
with '_'. Non-flattened keys are preferred, because they allow submodules to be accessed directly without
searching, but some legacy code still uses flattened keys.
:return: A tuple containing the module key and the submodule.
"""
if not layer_key_is_flattened:
return layer_key, model.get_submodule(layer_key)
# Handle flattened keys.
assert "." not in layer_key
module = model
module_key = ""
key_parts = layer_key.split("_")
submodule_name = key_parts.pop(0)
while len(key_parts) > 0:
try:
module = module.get_submodule(submodule_name)
module_key += "." + submodule_name
submodule_name = key_parts.pop(0)
except Exception:
submodule_name += "_" + key_parts.pop(0)
module = module.get_submodule(submodule_name)
module_key = (module_key + "." + submodule_name).lstrip(".")
return module_key, module

View File

@@ -1,17 +1,18 @@
from __future__ import annotations
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Tuple
import torch
from diffusers import UNet2DConditionModel
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoraPatcher
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase
from invokeai.backend.util.devices import TorchDevice
if TYPE_CHECKING:
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.peft.lora import LoRAModelRaw
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
@@ -30,14 +31,107 @@ class LoRAExt(ExtensionBase):
@contextmanager
def patch_unet(self, unet: UNet2DConditionModel, original_weights: OriginalWeightsStorage):
lora_model = self._node_context.models.load(self._model_id).model
assert isinstance(lora_model, LoRAModelRaw)
LoraPatcher.apply_lora_patch(
self.patch_model(
model=unet,
prefix="lora_unet_",
patch=lora_model,
patch_weight=self._weight,
lora=lora_model,
lora_weight=self._weight,
original_weights=original_weights,
)
del lora_model
yield
@classmethod
@torch.no_grad()
def patch_model(
cls,
model: torch.nn.Module,
prefix: str,
lora: LoRAModelRaw,
lora_weight: float,
original_weights: OriginalWeightsStorage,
):
"""
Apply one or more LoRAs to a model.
:param model: The model to patch.
:param lora: LoRA model to patch in.
:param lora_weight: LoRA patch weight.
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
:param original_weights: Storage with original weights, filled by weights which lora patches, used for unpatching.
"""
if lora_weight == 0:
return
# assert lora.device.type == "cpu"
for layer_key, layer in lora.layers.items():
if not layer_key.startswith(prefix):
continue
# TODO(ryand): A non-negligible amount of time is currently spent resolving LoRA keys. This
# should be improved in the following ways:
# 1. The key mapping could be more-efficiently pre-computed. This would save time every time a
# LoRA model is applied.
# 2. From an API perspective, there's no reason that the `ModelPatcher` should be aware of the
# intricacies of Stable Diffusion key resolution. It should just expect the input LoRA
# weights to have valid keys.
assert isinstance(model, torch.nn.Module)
module_key, module = cls._resolve_lora_key(model, layer_key, prefix)
# All of the LoRA weight calculations will be done on the same device as the module weight.
# (Performance will be best if this is a CUDA device.)
device = module.weight.device
dtype = module.weight.dtype
layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0
# We intentionally move to the target device first, then cast. Experimentally, this was found to
# be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the
# same thing in a single call to '.to(...)'.
layer.to(device=device)
layer.to(dtype=torch.float32)
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
for param_name, lora_param_weight in layer.get_parameters(module).items():
param_key = module_key + "." + param_name
module_param = module.get_parameter(param_name)
# save original weight
original_weights.save(param_key, module_param)
if module_param.shape != lora_param_weight.shape:
# TODO: debug on lycoris
lora_param_weight = lora_param_weight.reshape(module_param.shape)
lora_param_weight *= lora_weight * layer_scale
module_param += lora_param_weight.to(dtype=dtype)
layer.to(device=TorchDevice.CPU_DEVICE)
@staticmethod
def _resolve_lora_key(model: torch.nn.Module, lora_key: str, prefix: str) -> Tuple[str, torch.nn.Module]:
assert "." not in lora_key
if not lora_key.startswith(prefix):
raise Exception(f"lora_key with invalid prefix: {lora_key}, {prefix}")
module = model
module_key = ""
key_parts = lora_key[len(prefix) :].split("_")
submodule_name = key_parts.pop(0)
while len(key_parts) > 0:
try:
module = module.get_submodule(submodule_name)
module_key += "." + submodule_name
submodule_name = key_parts.pop(0)
except Exception:
submodule_name += "_" + key_parts.pop(0)
module = module.get_submodule(submodule_name)
module_key = (module_key + "." + submodule_name).lstrip(".")
return (module_key, module)

View File

@@ -12,18 +12,6 @@ module.exports = {
'i18next/no-literal-string': 'error',
// https://eslint.org/docs/latest/rules/no-console
'no-console': 'error',
// https://eslint.org/docs/latest/rules/no-promise-executor-return
'no-promise-executor-return': 'error',
// https://eslint.org/docs/latest/rules/require-await
'require-await': 'error',
'no-restricted-properties': [
'error',
{
object: 'crypto',
property: 'randomUUID',
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
},
],
},
overrides: [
/**

View File

@@ -1,5 +1,5 @@
import { PropsWithChildren, memo, useEffect } from 'react';
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
import { modelChanged } from '../src/features/parameters/store/generationSlice';
import { useAppDispatch } from '../src/app/store/storeHooks';
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
/**
@@ -10,9 +10,7 @@ export const ReduxInit = memo((props: PropsWithChildren) => {
const dispatch = useAppDispatch();
useGlobalModifiersInit();
useEffect(() => {
dispatch(
modelChanged({ model: { key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' } })
);
dispatch(modelChanged({ key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' }));
}, []);
return props.children;

View File

@@ -9,10 +9,6 @@ const config: KnipConfig = {
'src/services/api/schema.ts',
'src/features/nodes/types/v1/**',
'src/features/nodes/types/v2/**',
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
'src/features/controlLayers/konva/util.ts',
// TODO(psyche): restore HRF functionality?
'src/features/hrf/**',
],
ignoreBinaries: ['only-allow'],
paths: {

View File

@@ -24,7 +24,7 @@
"build": "pnpm run lint && vite build",
"typegen": "node scripts/typegen.js",
"preview": "vite preview",
"lint:knip": "knip --tags=-knipignore",
"lint:knip": "knip",
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx",
"lint:eslint": "eslint --max-warnings=0 .",
"lint:prettier": "prettier --check .",
@@ -52,19 +52,18 @@
}
},
"dependencies": {
"@chakra-ui/react-use-size": "^2.1.0",
"@dagrejs/dagre": "^1.1.3",
"@dagrejs/graphlib": "^2.2.3",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/sortable": "^8.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@fontsource-variable/inter": "^5.0.20",
"@invoke-ai/ui-library": "^0.0.33",
"@invoke-ai/ui-library": "^0.0.29",
"@nanostores/react": "^0.7.3",
"@reduxjs/toolkit": "2.2.3",
"@roarr/browser-log-writer": "^1.3.0",
"async-mutex": "^0.5.0",
"chakra-react-select": "^4.9.1",
"cmdk": "^1.0.0",
"compare-versions": "^6.1.1",
"dateformat": "^5.0.3",
"fracturedjsonjs": "^4.0.2",
@@ -75,8 +74,6 @@
"jsondiffpatch": "^0.6.0",
"konva": "^9.3.14",
"lodash-es": "^4.17.21",
"lru-cache": "^11.0.0",
"nanoid": "^5.0.7",
"nanostores": "^0.11.2",
"new-github-issue-url": "^1.0.0",
"overlayscrollbars": "^2.10.0",
@@ -91,8 +88,10 @@
"react-hotkeys-hook": "4.5.0",
"react-i18next": "^14.1.3",
"react-icons": "^5.2.1",
"react-konva": "^18.2.10",
"react-redux": "9.1.2",
"react-resizable-panels": "^2.1.2",
"react-resizable-panels": "^2.0.23",
"react-select": "5.8.0",
"react-use": "^17.5.1",
"react-virtuoso": "^4.9.0",
"reactflow": "^11.11.4",
@@ -103,9 +102,9 @@
"roarr": "^7.21.1",
"serialize-error": "^11.0.3",
"socket.io-client": "^4.7.5",
"stable-hash": "^0.0.4",
"use-debounce": "^10.0.2",
"use-device-pixel-ratio": "^1.1.2",
"use-image": "^1.1.1",
"uuid": "^10.0.0",
"zod": "^3.23.8",
"zod-validation-error": "^3.3.1"
@@ -136,7 +135,6 @@
"@vitest/coverage-v8": "^1.5.0",
"@vitest/ui": "^1.5.0",
"concurrently": "^8.2.2",
"csstype": "^3.1.3",
"dpdm": "^3.14.0",
"eslint": "^8.57.0",
"eslint-plugin-i18next": "^6.0.9",

File diff suppressed because it is too large Load Diff

View File

@@ -127,14 +127,7 @@
"bulkDownloadRequestedDesc": "Dein Download wird vorbereitet. Dies kann ein paar Momente dauern.",
"bulkDownloadRequestFailed": "Problem beim Download vorbereiten",
"bulkDownloadFailed": "Download fehlgeschlagen",
"alwaysShowImageSizeBadge": "Zeige immer Bilder Größe Abzeichen",
"selectForCompare": "Zum Vergleichen auswählen",
"compareImage": "Bilder vergleichen",
"exitSearch": "Suche beenden",
"newestFirst": "Neueste zuerst",
"oldestFirst": "Älteste zuerst",
"openInViewer": "Im Viewer öffnen",
"swapImages": "Bilder tauschen"
"alwaysShowImageSizeBadge": "Zeige immer Bilder Größe Abzeichen"
},
"hotkeys": {
"keyboardShortcuts": "Tastenkürzel",
@@ -638,8 +631,7 @@
"archived": "Archiviert",
"noBoards": "Kein {boardType}} Ordner",
"hideBoards": "Ordner verstecken",
"viewBoards": "Ordner ansehen",
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt."
"viewBoards": "Ordner ansehen"
},
"controlnet": {
"showAdvanced": "Zeige Erweitert",
@@ -789,9 +781,7 @@
"batchFieldValues": "Stapelverarbeitungswerte",
"batchQueued": "Stapelverarbeitung eingereiht",
"graphQueued": "Graph eingereiht",
"graphFailedToQueue": "Fehler beim Einreihen des Graphen",
"generations_one": "Generation",
"generations_other": "Generationen"
"graphFailedToQueue": "Fehler beim Einreihen des Graphen"
},
"metadata": {
"negativePrompt": "Negativ Beschreibung",
@@ -1156,10 +1146,5 @@
"noMatchingTriggers": "Keine passenden Trigger",
"addPromptTrigger": "Prompt-Trigger hinzufügen",
"compatibleEmbeddings": "Kompatible Einbettungen"
},
"ui": {
"tabs": {
"queue": "Warteschlange"
}
}
}

View File

@@ -80,7 +80,6 @@
"aboutDesc": "Using Invoke for work? Check out:",
"aboutHeading": "Own Your Creative Power",
"accept": "Accept",
"apply": "Apply",
"add": "Add",
"advanced": "Advanced",
"ai": "ai",
@@ -93,7 +92,6 @@
"copy": "Copy",
"copyError": "$t(gallery.copy) Error",
"on": "On",
"off": "Off",
"or": "or",
"checkpoint": "Checkpoint",
"communityLabel": "Community",
@@ -117,7 +115,6 @@
"githubLabel": "Github",
"goTo": "Go to",
"hotkeysLabel": "Hotkeys",
"loadingImage": "Loading Image",
"imageFailedToLoad": "Unable to Load Image",
"img2img": "Image To Image",
"inpaint": "inpaint",
@@ -135,7 +132,6 @@
"nodes": "Workflows",
"notInstalled": "Not $t(common.installed)",
"openInNewTab": "Open in New Tab",
"openInViewer": "Open in Viewer",
"orderBy": "Order By",
"outpaint": "outpaint",
"outputs": "Outputs",
@@ -166,10 +162,10 @@
"alpha": "Alpha",
"selected": "Selected",
"tab": "Tab",
"view": "View",
"viewDesc": "Review images in a large gallery view",
"edit": "Edit",
"editDesc": "Edit on the Canvas",
"viewing": "Viewing",
"viewingDesc": "Review images in a large gallery view",
"editing": "Editing",
"editingDesc": "Edit on the Control Layers canvas",
"comparing": "Comparing",
"comparingDesc": "Comparing two images",
"enabled": "Enabled",
@@ -329,14 +325,6 @@
"canceled": "Canceled",
"completedIn": "Completed in",
"batch": "Batch",
"origin": "Origin",
"destination": "Destination",
"upscaling": "Upscaling",
"canvas": "Canvas",
"generation": "Generation",
"workflows": "Workflows",
"other": "Other",
"gallery": "Gallery",
"batchFieldValues": "Batch Field Values",
"item": "Item",
"session": "Session",
@@ -375,7 +363,6 @@
"useCache": "Use Cache"
},
"gallery": {
"gallery": "Gallery",
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
"assets": "Assets",
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
@@ -388,11 +375,11 @@
"deleteImage_one": "Delete Image",
"deleteImage_other": "Delete {{count}} Images",
"deleteImagePermanent": "Deleted images cannot be restored.",
"displayBoardSearch": "Board Search",
"displaySearch": "Image Search",
"displayBoardSearch": "Display Board Search",
"displaySearch": "Display Search",
"download": "Download",
"exitBoardSearch": "Exit Board Search",
"exitSearch": "Exit Image Search",
"exitSearch": "Exit Search",
"featuresWillReset": "If you delete this image, those features will immediately be reset.",
"galleryImageSize": "Image Size",
"gallerySettings": "Gallery Settings",
@@ -438,8 +425,7 @@
"compareHelp1": "Hold <Kbd>Alt</Kbd> while clicking a gallery image or using the arrow keys to change the compare image.",
"compareHelp2": "Press <Kbd>M</Kbd> to cycle through comparison modes.",
"compareHelp3": "Press <Kbd>C</Kbd> to swap the compared images.",
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit.",
"toggleMiniViewer": "Toggle Mini Viewer"
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit."
},
"hotkeys": {
"searchHotkeys": "Search Hotkeys",
@@ -1018,8 +1004,6 @@
"noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.",
"incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.",
"noModelSelected": "No model selected",
"canvasManagerNotLoaded": "Canvas Manager not loaded",
"canvasBusy": "Canvas is busy",
"noPrompts": "No prompts generated",
"noNodesInGraph": "No nodes in graph",
"systemDisconnected": "System disconnected",
@@ -1051,11 +1035,12 @@
"scaledHeight": "Scaled H",
"scaledWidth": "Scaled W",
"scheduler": "Scheduler",
"seamlessXAxis": "Seamless X Axis",
"seamlessYAxis": "Seamless Y Axis",
"seamlessXAxis": "Seamless Tiling X Axis",
"seamlessYAxis": "Seamless Tiling Y Axis",
"seed": "Seed",
"imageActions": "Image Actions",
"sendToCanvas": "Send To Canvas",
"sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas",
"sendToUpscale": "Send To Upscale",
"showOptionsPanel": "Show Side Panel (O or T)",
"shuffle": "Shuffle Seed",
@@ -1115,6 +1100,7 @@
"confirmOnDelete": "Confirm On Delete",
"developer": "Developer",
"displayInProgress": "Display Progress Images",
"enableImageDebugging": "Enable Image Debugging",
"enableInformationalPopovers": "Enable Informational Popovers",
"informationalPopoversDisabled": "Informational Popovers Disabled",
"informationalPopoversDisabledDesc": "Informational popovers have been disabled. Enable them in Settings.",
@@ -1196,8 +1182,8 @@
"problemSavingMaskDesc": "Unable to export mask",
"prunedQueue": "Pruned Queue",
"resetInitialImage": "Reset Initial Image",
"sentToCanvas": "Sent to Canvas",
"sentToUpscale": "Sent to Upscale",
"sentToImageToImage": "Sent To Image To Image",
"sentToUnifiedCanvas": "Sent to Unified Canvas",
"serverError": "Server Error",
"sessionRef": "Session: {{sessionId}}",
"setAsCanvasInitialImage": "Set as canvas initial image",
@@ -1581,7 +1567,7 @@
"copyToClipboard": "Copy to Clipboard",
"cursorPosition": "Cursor Position",
"darkenOutsideSelection": "Darken Outside Selection",
"discardAll": "Discard All & Cancel Pending Generations",
"discardAll": "Discard All",
"discardCurrent": "Discard Current",
"downloadAsImage": "Download As Image",
"enableMask": "Enable Mask",
@@ -1659,187 +1645,39 @@
"storeNotInitialized": "Store is not initialized"
},
"controlLayers": {
"bookmark": "Bookmark for Quick Switch",
"fitBboxToLayers": "Fit Bbox To Layers",
"removeBookmark": "Remove Bookmark",
"saveCanvasToGallery": "Save Canvas To Gallery",
"saveBboxToGallery": "Save Bbox To Gallery",
"savedToGalleryOk": "Saved to Gallery",
"savedToGalleryError": "Error saving to gallery",
"mergeVisible": "Merge Visible",
"mergeVisibleOk": "Merged visible layers",
"mergeVisibleError": "Error merging visible layers",
"clearHistory": "Clear History",
"generateMode": "Generate",
"generateModeDesc": "Create individual images. Generated images are added directly to the gallery.",
"composeMode": "Compose",
"composeModeDesc": "Compose your work iterative. Generated images are added back to the canvas.",
"autoSave": "Auto-save to Gallery",
"resetCanvas": "Reset Canvas",
"resetAll": "Reset All",
"clearCaches": "Clear Caches",
"recalculateRects": "Recalculate Rects",
"clipToBbox": "Clip Strokes to Bbox",
"compositeMaskedRegions": "Composite Masked Regions",
"deleteAll": "Delete All",
"addLayer": "Add Layer",
"duplicate": "Duplicate",
"moveToFront": "Move to Front",
"moveToBack": "Move to Back",
"moveForward": "Move Forward",
"moveBackward": "Move Backward",
"brushSize": "Brush Size",
"width": "Width",
"zoom": "Zoom",
"resetView": "Reset View",
"controlLayers": "Control Layers",
"globalMaskOpacity": "Global Mask Opacity",
"autoNegative": "Auto Negative",
"enableAutoNegative": "Enable Auto Negative",
"disableAutoNegative": "Disable Auto Negative",
"deletePrompt": "Delete Prompt",
"resetRegion": "Reset Region",
"debugLayers": "Debug Layers",
"showHUD": "Show HUD",
"rectangle": "Rectangle",
"maskFill": "Mask Fill",
"maskPreviewColor": "Mask Preview Color",
"addPositivePrompt": "Add $t(common.positivePrompt)",
"addNegativePrompt": "Add $t(common.negativePrompt)",
"addIPAdapter": "Add $t(common.ipAdapter)",
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
"addControlLayer": "Add $t(controlLayers.controlLayer)",
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
"raster": "Raster",
"rasterLayer": "Raster Layer",
"controlLayer": "Control Layer",
"inpaintMask": "Inpaint Mask",
"regionalGuidance": "Regional Guidance",
"ipAdapter": "IP Adapter",
"sendingToCanvas": "Sending to Canvas",
"sendingToGallery": "Sending to Gallery",
"sendToGallery": "Send To Gallery",
"sendToGalleryDesc": "Generations will be sent to the gallery.",
"sendToCanvas": "Send To Canvas",
"sendToCanvasDesc": "Generations will be staged onto the canvas.",
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
"controlLayer_withCount_one": "$t(controlLayers.controlLayer)",
"inpaintMask_withCount_one": "$t(controlLayers.inpaintMask)",
"regionalGuidance_withCount_one": "$t(controlLayers.regionalGuidance)",
"ipAdapter_withCount_one": "$t(controlLayers.ipAdapter)",
"rasterLayer_withCount_other": "Raster Layers",
"controlLayer_withCount_other": "Control Layers",
"inpaintMask_withCount_other": "Inpaint Masks",
"regionalGuidance_withCount_other": "Regional Guidance",
"ipAdapter_withCount_other": "IP Adapters",
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
"opacity": "Opacity",
"regionalGuidance_withCount_hidden": "Regional Guidance ({{count}} hidden)",
"controlLayers_withCount_hidden": "Control Layers ({{count}} hidden)",
"rasterLayers_withCount_hidden": "Raster Layers ({{count}} hidden)",
"globalIPAdapters_withCount_hidden": "Global IP Adapters ({{count}} hidden)",
"inpaintMasks_withCount_hidden": "Inpaint Masks ({{count}} hidden)",
"regionalGuidance_withCount_visible": "Regional Guidance ({{count}})",
"controlLayers_withCount_visible": "Control Layers ({{count}})",
"rasterLayers_withCount_visible": "Raster Layers ({{count}})",
"globalIPAdapters_withCount_visible": "Global IP Adapters ({{count}})",
"inpaintMasks_withCount_visible": "Inpaint Masks ({{count}})",
"globalControlAdapter": "Global $t(controlnet.controlAdapter_one)",
"globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
"globalIPAdapter": "Global $t(common.ipAdapter)",
"globalIPAdapterLayer": "Global $t(common.ipAdapter) $t(unifiedCanvas.layer)",
"globalInitialImage": "Global Initial Image",
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
"layer": "Layer",
"opacityFilter": "Opacity Filter",
"clearProcessor": "Clear Processor",
"resetProcessor": "Reset Processor to Defaults",
"noLayersAdded": "No Layers Added",
"layer_one": "Layer",
"layer_other": "Layers",
"objects_zero": "empty",
"objects_one": "{{count}} object",
"objects_other": "{{count}} objects",
"convertToControlLayer": "Convert to Control Layer",
"convertToRasterLayer": "Convert to Raster Layer",
"transparency": "Transparency",
"enableTransparencyEffect": "Enable Transparency Effect",
"disableTransparencyEffect": "Disable Transparency Effect",
"hidingType": "Hiding {{type}}",
"showingType": "Showing {{type}}",
"dynamicGrid": "Dynamic Grid",
"logDebugInfo": "Log Debug Info",
"locked": "Locked",
"unlocked": "Unlocked",
"deleteSelected": "Delete Selected",
"deleteAll": "Delete All",
"flipHorizontal": "Flip Horizontal",
"flipVertical": "Flip Vertical",
"fill": {
"fillColor": "Fill Color",
"fillStyle": "Fill Style",
"solid": "Solid",
"grid": "Grid",
"crosshatch": "Crosshatch",
"vertical": "Vertical",
"horizontal": "Horizontal",
"diagonal": "Diagonal"
},
"tool": {
"brush": "Brush",
"eraser": "Eraser",
"rectangle": "Rectangle",
"bbox": "Bbox",
"move": "Move",
"view": "View",
"colorPicker": "Color Picker"
},
"filter": {
"filter": "Filter",
"filters": "Filters",
"filterType": "Filter Type",
"autoProcess": "Auto Process",
"reset": "Reset",
"process": "Process",
"apply": "Apply",
"cancel": "Cancel",
"spandrel": {
"label": "Image-to-Image Model",
"description": "Run an image-to-image model on the selected layer.",
"paramModel": "Model",
"paramAutoScale": "Auto Scale",
"paramAutoScaleDesc": "The selected model will be run until the target scale is reached.",
"paramScale": "Target Scale"
}
},
"transform": {
"transform": "Transform",
"fitToBbox": "Fit to Bbox",
"reset": "Reset",
"apply": "Apply",
"cancel": "Cancel"
},
"settings": {
"snapToGrid": {
"label": "Snap to Grid",
"on": "On",
"off": "Off"
}
},
"HUD": {
"bbox": "Bbox",
"scaledBbox": "Scaled Bbox",
"autoSave": "Auto Save",
"entityStatus": {
"selectedEntity": "Selected Entity",
"selectedEntityIs": "Selected Entity is",
"isFiltering": "is filtering",
"isTransforming": "is transforming",
"isLocked": "is locked",
"isHidden": "is hidden",
"isDisabled": "is disabled",
"enabled": "Enabled"
}
}
"layers_one": "Layer",
"layers_other": "Layers"
},
"upscaling": {
"upscale": "Upscale",
@@ -1927,30 +1765,5 @@
"upscaling": "Upscaling",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
}
},
"system": {
"enableLogging": "Enable Logging",
"logLevel": {
"logLevel": "Log Level",
"trace": "Trace",
"debug": "Debug",
"info": "Info",
"warn": "Warn",
"error": "Error",
"fatal": "Fatal"
},
"logNamespaces": {
"logNamespaces": "Log Namespaces",
"gallery": "Gallery",
"models": "Models",
"config": "Config",
"canvas": "Canvas",
"generation": "Generation",
"workflows": "Workflows",
"system": "System",
"events": "Events",
"queue": "Queue",
"metadata": "Metadata"
}
}
}

View File

@@ -86,15 +86,15 @@
"loadMore": "Cargar más",
"noImagesInGallery": "No hay imágenes para mostrar",
"deleteImage_one": "Eliminar Imagen",
"deleteImage_many": "Eliminar {{count}} Imágenes",
"deleteImage_other": "Eliminar {{count}} Imágenes",
"deleteImage_many": "",
"deleteImage_other": "",
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
"assets": "Activos",
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
},
"hotkeys": {
"keyboardShortcuts": "Atajos de teclado",
"appHotkeys": "Atajos de aplicación",
"appHotkeys": "Atajos de applicación",
"generalHotkeys": "Atajos generales",
"galleryHotkeys": "Atajos de galería",
"unifiedCanvasHotkeys": "Atajos de lienzo unificado",
@@ -535,7 +535,7 @@
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
"deleteBoardAndImages": "Borrar el panel y las imágenes",
"loading": "Cargando...",
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
"move": "Mover",
"menuItemAutoAdd": "Agregar automáticamente a este panel",
"searchBoard": "Buscando paneles…",
@@ -549,13 +549,7 @@
"imagesWithCount_other": "{{count}} imágenes",
"assetsWithCount_one": "{{count}} activo",
"assetsWithCount_many": "{{count}} activos",
"assetsWithCount_other": "{{count}} activos",
"hideBoards": "Ocultar Paneles",
"addPrivateBoard": "Agregar un tablero privado",
"addSharedBoard": "Agregar Panel Compartido",
"boards": "Paneles",
"archiveBoard": "Archivar Panel",
"archived": "Archivado"
"assetsWithCount_other": "{{count}} activos"
},
"accordions": {
"compositing": {

View File

@@ -496,9 +496,7 @@
"main": "Principali",
"noModelsInstalledDesc1": "Installa i modelli con",
"ipAdapters": "Adattatori IP",
"noMatchingModels": "Nessun modello corrispondente",
"starterModelsInModelManager": "I modelli iniziali possono essere trovati in Gestione Modelli",
"spandrelImageToImage": "Immagine a immagine (Spandrel)"
"noMatchingModels": "Nessun modello corrispondente"
},
"parameters": {
"images": "Immagini",
@@ -512,7 +510,7 @@
"perlinNoise": "Rumore Perlin",
"type": "Tipo",
"strength": "Forza",
"upscaling": "Amplia",
"upscaling": "Ampliamento",
"scale": "Scala",
"imageFit": "Adatta l'immagine iniziale alle dimensioni di output",
"scaleBeforeProcessing": "Scala prima dell'elaborazione",
@@ -595,7 +593,7 @@
"globalPositivePromptPlaceholder": "Prompt positivo globale",
"globalNegativePromptPlaceholder": "Prompt negativo globale",
"processImage": "Elabora Immagine",
"sendToUpscale": "Invia a Amplia",
"sendToUpscale": "Invia a Ampliare",
"postProcessing": "Post-elaborazione (Shift + U)"
},
"settings": {
@@ -1422,7 +1420,7 @@
"paramUpscaleMethod": {
"heading": "Metodo di ampliamento",
"paragraphs": [
"Metodo utilizzato per ampliare l'immagine per la correzione ad alta risoluzione."
"Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione."
]
},
"patchmatchDownScaleSize": {
@@ -1530,7 +1528,7 @@
},
"upscaleModel": {
"paragraphs": [
"Il modello di ampliamento, scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
"Il modello di ampliamento (Upscale), scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
],
"heading": "Modello di ampliamento"
},
@@ -1722,27 +1720,26 @@
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
"queue": "Coda",
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
"upscaling": "Amplia",
"upscaling": "Ampliamento",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
}
},
"upscaling": {
"creativity": "Creatività",
"structure": "Struttura",
"upscaleModel": "Modello di ampliamento",
"upscaleModel": "Modello di Ampliamento",
"scale": "Scala",
"missingModelsWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare i modelli richiesti:",
"mainModelDesc": "Modello principale (architettura SD1.5 o SDXL)",
"tileControlNetModelDesc": "Modello Tile ControlNet per l'architettura del modello principale scelto",
"upscaleModelDesc": "Modello per l'ampliamento (immagine a immagine)",
"upscaleModelDesc": "Modello per l'ampliamento (da immagine a immagine)",
"missingUpscaleInitialImage": "Immagine iniziale mancante per l'ampliamento",
"missingUpscaleModel": "Modello per lampliamento mancante",
"missingTileControlNetModel": "Nessun modello ControlNet Tile valido installato",
"postProcessingModel": "Modello di post-elaborazione",
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
"upscale": "Amplia"
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata."
},
"upsell": {
"inviteTeammates": "Invita collaboratori",
@@ -1792,7 +1789,6 @@
"positivePromptColumn": "'prompt' o 'positive_prompt'",
"noTemplates": "Nessun modello",
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
"templateActions": "Azioni modello",
"promptTemplateCleared": "Modello di prompt cancellato"
"templateActions": "Azioni modello"
}
}

View File

@@ -501,8 +501,7 @@
"noModelsInstalled": "Нет установленных моделей",
"noModelsInstalledDesc1": "Установите модели с помощью",
"noMatchingModels": "Нет подходящих моделей",
"ipAdapters": "IP адаптеры",
"starterModelsInModelManager": "Стартовые модели можно найти в Менеджере моделей"
"ipAdapters": "IP адаптеры"
},
"parameters": {
"images": "Изображения",
@@ -1759,8 +1758,7 @@
"postProcessingModel": "Модель постобработки",
"tileControlNetModelDesc": "Модель ControlNet для выбранной архитектуры основной модели",
"missingModelsWarning": "Зайдите в <LinkComponent>Менеджер моделей</LinkComponent> чтоб установить необходимые модели:",
"postProcessingMissingModelWarning": "Посетите <LinkComponent>Менеджер моделей</LinkComponent>, чтобы установить модель постобработки (img2img).",
"upscale": "Увеличить"
"postProcessingMissingModelWarning": "Посетите <LinkComponent>Менеджер моделей</LinkComponent>, чтобы установить модель постобработки (img2img)."
},
"stylePresets": {
"noMatchingTemplates": "Нет подходящих шаблонов",
@@ -1806,8 +1804,7 @@
"noTemplates": "Нет шаблонов",
"promptTemplatesDesc2": "Используйте строку-заполнитель <Pre>{{placeholder}}</Pre>, чтобы указать место, куда должен быть включен ваш запрос в шаблоне.",
"searchByName": "Поиск по имени",
"shared": "Общий",
"promptTemplateCleared": "Шаблон запроса создан"
"shared": "Общий"
},
"upsell": {
"inviteTeammates": "Пригласите членов команды",

View File

@@ -154,8 +154,7 @@
"displaySearch": "显示搜索",
"stretchToFit": "拉伸以适应",
"exitCompare": "退出对比",
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。",
"go": "运行"
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。"
},
"hotkeys": {
"keyboardShortcuts": "快捷键",
@@ -495,9 +494,7 @@
"huggingFacePlaceholder": "所有者或模型名称",
"huggingFaceRepoID": "HuggingFace仓库ID",
"loraTriggerPhrases": "LoRA 触发词",
"ipAdapters": "IP适配器",
"spandrelImageToImage": "图生图(Spandrel)",
"starterModelsInModelManager": "您可以在模型管理器中找到初始模型"
"ipAdapters": "IP适配器"
},
"parameters": {
"images": "图像",
@@ -698,9 +695,7 @@
"outOfMemoryErrorDesc": "您当前的生成设置已超出系统处理能力.请调整设置后再次尝试.",
"parametersSet": "参数已恢复",
"errorCopied": "错误信息已复制",
"modelImportCanceled": "模型导入已取消",
"importFailed": "导入失败",
"importSuccessful": "导入成功"
"modelImportCanceled": "模型导入已取消"
},
"unifiedCanvas": {
"layer": "图层",
@@ -1710,55 +1705,12 @@
"missingModelsWarning": "请访问<LinkComponent>模型管理器</LinkComponent> 安装所需的模型:",
"mainModelDesc": "主模型SD1.5或SDXL架构",
"exceedsMaxSize": "放大设置超出了最大尺寸限制",
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择.",
"upscale": "放大"
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择."
},
"upsell": {
"inviteTeammates": "邀请团队成员",
"professional": "专业",
"professionalUpsell": "可在 Invoke 的专业版中使用.点击此处或访问 invoke.com/pricing 了解更多详情.",
"shareAccess": "共享访问权限"
},
"stylePresets": {
"positivePrompt": "正向提示词",
"preview": "预览",
"deleteImage": "删除图像",
"deleteTemplate": "删除模版",
"deleteTemplate2": "您确定要删除这个模板吗?请注意,删除后无法恢复.",
"importTemplates": "导入提示模板支持CSV或JSON格式",
"insertPlaceholder": "插入一个占位符",
"myTemplates": "我的模版",
"name": "名称",
"type": "类型",
"unableToDeleteTemplate": "无法删除提示模板",
"updatePromptTemplate": "更新提示词模版",
"exportPromptTemplates": "导出我的提示模板为CSV格式",
"exportDownloaded": "导出已下载",
"noMatchingTemplates": "无匹配的模版",
"promptTemplatesDesc1": "提示模板可以帮助您在编写提示时添加预设的文本内容.",
"promptTemplatesDesc3": "如果您没有使用占位符,那么模板的内容将会被添加到您提示的末尾.",
"searchByName": "按名称搜索",
"shared": "已分享",
"sharedTemplates": "已分享的模版",
"templateActions": "模版操作",
"templateDeleted": "提示模版已删除",
"toggleViewMode": "切换显示模式",
"uploadImage": "上传图像",
"active": "激活",
"choosePromptTemplate": "选择提示词模板",
"clearTemplateSelection": "清除模版选择",
"copyTemplate": "拷贝模版",
"createPromptTemplate": "创建提示词模版",
"defaultTemplates": "默认模版",
"editTemplate": "编辑模版",
"exportFailed": "无法生成并下载CSV文件",
"flatten": "将选定的模板内容合并到当前提示中",
"negativePrompt": "反向提示词",
"promptTemplateCleared": "提示模板已清除",
"useForTemplate": "用于提示词模版",
"viewList": "预览模版列表",
"viewModeTooltip": "这是您的提示在当前选定的模板下的预览效果。如需编辑提示,请直接在文本框中点击进行修改.",
"noTemplates": "无模版",
"private": "私密"
}
}

View File

@@ -38,7 +38,7 @@ async function generateTypes(schema) {
process.stdout.write(`\nOK!\r\n`);
}
function main() {
async function main() {
const encoding = 'utf-8';
if (process.stdin.isTTY) {

View File

@@ -6,7 +6,6 @@ import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/ap
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import type { PartialAppConfig } from 'app/types/invokeai';
import ImageUploadOverlay from 'common/components/ImageUploadOverlay';
import { useScopeFocusWatcher } from 'common/hooks/interactionScopes';
import { useClearStorage } from 'common/hooks/useClearStorage';
import { useFullscreenDropzone } from 'common/hooks/useFullscreenDropzone';
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
@@ -14,16 +13,13 @@ import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardMo
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
import { activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
import SettingsModal from 'features/system/components/SettingsModal/SettingsModal';
import { configChanged } from 'features/system/store/configSlice';
import { selectLanguage } from 'features/system/store/systemSelectors';
import { AppContent } from 'features/ui/components/AppContent';
import { languageSelector } from 'features/system/store/systemSelectors';
import InvokeTabs from 'features/ui/components/InvokeTabs';
import type { InvokeTabName } from 'features/ui/store/tabMap';
import { setActiveTab } from 'features/ui/store/uiSlice';
import type { TabName } from 'features/ui/store/uiTypes';
import { useGetAndLoadLibraryWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadLibraryWorkflow';
import { AnimatePresence } from 'framer-motion';
import i18n from 'i18n';
@@ -45,7 +41,7 @@ interface Props {
};
selectedWorkflowId?: string;
selectedStylePresetId?: string;
destination?: TabName;
destination?: InvokeTabName | undefined;
}
const App = ({
@@ -55,7 +51,7 @@ const App = ({
selectedStylePresetId,
destination,
}: Props) => {
const language = useAppSelector(selectLanguage);
const language = useAppSelector(languageSelector);
const logger = useLogger('system');
const dispatch = useAppDispatch();
const clearStorage = useClearStorage();
@@ -111,7 +107,6 @@ const App = ({
useStarterModelsToast();
useSyncQueueStatus();
useScopeFocusWatcher();
return (
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
@@ -124,7 +119,7 @@ const App = ({
{...dropzone.getRootProps()}
>
<input {...dropzone.getInputProps()} />
<AppContent />
<InvokeTabs />
<AnimatePresence>
{dropzone.isDragActive && isHandlingUpload && (
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
@@ -135,10 +130,7 @@ const App = ({
<ChangeBoardModal />
<DynamicPromptsModal />
<StylePresetModal />
<ClearQueueConfirmationsAlertDialog />
<PreselectedImage selectedImage={selectedImage} />
<SettingsModal />
<RefreshAfterResetModal />
</ErrorBoundary>
);
};

View File

@@ -1,7 +1,5 @@
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { selectConfigSlice } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import newGithubIssueUrl from 'new-github-issue-url';
import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg';
@@ -15,11 +13,9 @@ type Props = {
resetErrorBoundary: () => void;
};
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
const { t } = useTranslation();
const isLocal = useAppSelector(selectIsLocal);
const isLocal = useAppSelector((s) => s.config.isLocal);
const handleCopy = useCallback(() => {
const text = JSON.stringify(serializeError(error), null, 2);

View File

@@ -19,7 +19,7 @@ import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import AppDndContext from 'features/dnd/components/AppDndContext';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import type { TabName } from 'features/ui/store/uiTypes';
import type { InvokeTabName } from 'features/ui/store/tabMap';
import type { PropsWithChildren, ReactNode } from 'react';
import React, { lazy, memo, useEffect, useMemo } from 'react';
import { Provider } from 'react-redux';
@@ -46,7 +46,7 @@ interface Props extends PropsWithChildren {
};
selectedWorkflowId?: string;
selectedStylePresetId?: string;
destination?: TabName;
destination?: InvokeTabName;
customStarUi?: CustomStarUi;
socketOptions?: Partial<ManagerOptions & SocketOptions>;
isDebugging?: boolean;

View File

@@ -21,16 +21,10 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
direction,
shadows: {
..._theme.shadows,
selected:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
hoverSelected:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
hoverUnselected:
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
selectedForCompare:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-400)',
hoverSelectedForCompare:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-300)',
},
});
}, [direction]);

View File

@@ -2,7 +2,7 @@ import { useStore } from '@nanostores/react';
import { $authToken } from 'app/store/nanostores/authToken';
import { $baseUrl } from 'app/store/nanostores/baseUrl';
import { $isDebugging } from 'app/store/nanostores/isDebugging';
import { useAppStore } from 'app/store/nanostores/store';
import { useAppDispatch } from 'app/store/storeHooks';
import type { MapStore } from 'nanostores';
import { atom, map } from 'nanostores';
import { useEffect, useMemo } from 'react';
@@ -18,19 +18,14 @@ declare global {
}
}
export type AppSocket = Socket<ServerToClientEvents, ClientToServerEvents>;
export const $socket = atom<AppSocket | null>(null);
export const $socketOptions = map<Partial<ManagerOptions & SocketOptions>>({});
const $isSocketInitialized = atom<boolean>(false);
export const $isConnected = atom<boolean>(false);
/**
* Initializes the socket.io connection and sets up event listeners.
*/
export const useSocketIO = () => {
const { dispatch, getState } = useAppStore();
const dispatch = useAppDispatch();
const baseUrl = useStore($baseUrl);
const authToken = useStore($authToken);
const addlSocketOptions = useStore($socketOptions);
@@ -66,9 +61,8 @@ export const useSocketIO = () => {
return;
}
const socket: AppSocket = io(socketUrl, socketOptions);
$socket.set(socket);
setEventListeners({ socket, dispatch, getState, setIsConnected: $isConnected.set });
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(socketUrl, socketOptions);
setEventListeners({ dispatch, socket });
socket.connect();
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
@@ -90,5 +84,5 @@ export const useSocketIO = () => {
socket.disconnect();
$isSocketInitialized.set(false);
};
}, [dispatch, getState, socketOptions, socketUrl]);
}, [dispatch, socketOptions, socketUrl]);
};

View File

@@ -15,21 +15,21 @@ export const BASE_CONTEXT = {};
export const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
export const zLogNamespace = z.enum([
'canvas',
'config',
'events',
'gallery',
'generation',
'metadata',
'models',
'system',
'queue',
'workflows',
]);
export type LogNamespace = z.infer<typeof zLogNamespace>;
export type LoggerNamespace =
| 'images'
| 'models'
| 'config'
| 'canvas'
| 'generation'
| 'nodes'
| 'system'
| 'socketio'
| 'session'
| 'queue'
| 'dnd'
| 'controlLayers';
export const logger = (namespace: LogNamespace) => $logger.get().child({ namespace });
export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace });
export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal']);
export type LogLevel = z.infer<typeof zLogLevel>;

View File

@@ -1,41 +1,29 @@
import { createLogWriter } from '@roarr/browser-log-writer';
import { useAppSelector } from 'app/store/storeHooks';
import {
selectSystemLogIsEnabled,
selectSystemLogLevel,
selectSystemLogNamespaces,
} from 'features/system/store/systemSlice';
import { useEffect, useMemo } from 'react';
import { ROARR, Roarr } from 'roarr';
import type { LogNamespace } from './logger';
import type { LoggerNamespace } from './logger';
import { $logger, BASE_CONTEXT, LOG_LEVEL_MAP, logger } from './logger';
export const useLogger = (namespace: LogNamespace) => {
const logLevel = useAppSelector(selectSystemLogLevel);
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
export const useLogger = (namespace: LoggerNamespace) => {
const consoleLogLevel = useAppSelector((s) => s.system.consoleLogLevel);
const shouldLogToConsole = useAppSelector((s) => s.system.shouldLogToConsole);
// The provided Roarr browser log writer uses localStorage to config logging to console
useEffect(() => {
if (logIsEnabled) {
if (shouldLogToConsole) {
// Enable console log output
localStorage.setItem('ROARR_LOG', 'true');
// Use a filter to show only logs of the given level
let filter = `context.logLevel:>=${LOG_LEVEL_MAP[logLevel]}`;
if (logNamespaces.length > 0) {
filter += ` AND (${logNamespaces.map((ns) => `context.namespace:${ns}`).join(' OR ')})`;
} else {
filter += ' AND context.namespace:undefined';
}
localStorage.setItem('ROARR_FILTER', filter);
localStorage.setItem('ROARR_FILTER', `context.logLevel:>=${LOG_LEVEL_MAP[consoleLogLevel]}`);
} else {
// Disable console log output
localStorage.setItem('ROARR_LOG', 'false');
}
ROARR.write = createLogWriter();
}, [logLevel, logIsEnabled, logNamespaces]);
}, [consoleLogLevel, shouldLogToConsole]);
// Update the module-scoped logger context as needed
useEffect(() => {

View File

@@ -1,7 +1,7 @@
import { createAction } from '@reduxjs/toolkit';
import type { TabName } from 'features/ui/store/uiTypes';
import type { InvokeTabName } from 'features/ui/store/tabMap';
export const enqueueRequested = createAction<{
tabName: TabName;
tabName: InvokeTabName;
prepend: boolean;
}>('app/enqueueRequested');

View File

@@ -1,3 +1,2 @@
export const STORAGE_PREFIX = '@@invokeai-';
export const EMPTY_ARRAY = [];
export const EMPTY_OBJECT = {};

View File

@@ -1,6 +1,5 @@
import { createDraftSafeSelectorCreator, createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
import type { GetSelectorsOptions } from '@reduxjs/toolkit/dist/entities/state_selectors';
import type { RootState } from 'app/store/store';
import { isEqual } from 'lodash-es';
/**
@@ -20,5 +19,3 @@ export const getSelectorsOptions: GetSelectorsOptions = {
argsMemoize: lruMemoize,
}),
};
export const createMemoizedAppSelector = createMemoizedSelector.withTypes<RootState>();

View File

@@ -1,4 +1,5 @@
import { logger } from 'app/logging/logger';
import { parseify } from 'common/util/serialize';
import { PersistError, RehydrateError } from 'redux-remember';
import { serializeError } from 'serialize-error';
@@ -40,6 +41,6 @@ export const errorHandler = (err: PersistError | RehydrateError) => {
} else if (err instanceof RehydrateError) {
log.error({ error: serializeError(err) }, 'Problem rehydrating state');
} else {
log.error({ error: serializeError(err) }, 'Problem in persistence layer');
log.error({ error: parseify(err) }, 'Problem in persistence layer');
}
};

View File

@@ -1,7 +1,9 @@
import type { UnknownAction } from '@reduxjs/toolkit';
import { deepClone } from 'common/util/deepClone';
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import type { Graph } from 'services/api/types';
import { socketGeneratorProgress } from 'services/events/actions';
export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
if (isAnyGraphBuilt(action)) {
@@ -22,5 +24,13 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
};
}
if (socketGeneratorProgress.match(action)) {
const sanitized = deepClone(action);
if (sanitized.payload.data.progress_image) {
sanitized.payload.data.progress_image.dataURL = '<Progress image omitted>';
}
return sanitized;
}
return action;
};

View File

@@ -1,7 +1,7 @@
import type { TypedStartListening } from '@reduxjs/toolkit';
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
import { createListenerMiddleware } from '@reduxjs/toolkit';
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
import { addStagingListeners } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
import { addCommitStagingAreaImageListener } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
@@ -9,7 +9,17 @@ import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddlewar
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
import { addCancellationsListeners } from 'app/store/middleware/listenerMiddleware/listeners/cancellationsListeners';
import { addCanvasCopiedToClipboardListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard';
import { addCanvasDownloadedAsImageListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage';
import { addCanvasImageToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet';
import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery';
import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet';
import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged';
import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery';
import { addControlAdapterPreprocessor } from 'app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor';
import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess';
import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed';
import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas';
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
import { addEnqueueRequestedNodes } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes';
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
@@ -27,7 +37,16 @@ import { addModelSelectedListener } from 'app/store/middleware/listenerMiddlewar
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
import { addDynamicPromptsListener } from 'app/store/middleware/listenerMiddleware/listeners/promptChanged';
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected';
import { addSocketDisconnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected';
import { addGeneratorProgressEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress';
import { addInvocationCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete';
import { addInvocationErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError';
import { addInvocationStartedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted';
import { addModelInstallEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall';
import { addModelLoadEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad';
import { addSocketQueueItemStatusChangedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged';
import { addStagingAreaImageSavedListener } from 'app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved';
import { addUpdateAllNodesRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested';
import { addWorkflowLoadRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested';
import type { AppDispatch, RootState } from 'app/store/store';
@@ -41,8 +60,6 @@ export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
const startAppListening = listenerMiddleware.startListening as AppStartListening;
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
/**
* The RTK listener middleware is a lightweight alternative sagas/observables.
*
@@ -66,6 +83,7 @@ addGalleryImageClickedListener(startAppListening);
addGalleryOffsetChangedListener(startAppListening);
// User Invoked
addEnqueueRequestedCanvasListener(startAppListening);
addEnqueueRequestedNodes(startAppListening);
addEnqueueRequestedLinear(startAppListening);
addEnqueueRequestedUpscale(startAppListening);
@@ -73,23 +91,32 @@ addAnyEnqueuedListener(startAppListening);
addBatchEnqueuedListener(startAppListening);
// Canvas actions
// addCanvasSavedToGalleryListener(startAppListening);
// addCanvasMaskSavedToGalleryListener(startAppListening);
// addCanvasImageToControlNetListener(startAppListening);
// addCanvasMaskToControlNetListener(startAppListening);
// addCanvasDownloadedAsImageListener(startAppListening);
// addCanvasCopiedToClipboardListener(startAppListening);
// addCanvasMergedListener(startAppListening);
// addStagingAreaImageSavedListener(startAppListening);
// addCommitStagingAreaImageListener(startAppListening);
addStagingListeners(startAppListening);
addCanvasSavedToGalleryListener(startAppListening);
addCanvasMaskSavedToGalleryListener(startAppListening);
addCanvasImageToControlNetListener(startAppListening);
addCanvasMaskToControlNetListener(startAppListening);
addCanvasDownloadedAsImageListener(startAppListening);
addCanvasCopiedToClipboardListener(startAppListening);
addCanvasMergedListener(startAppListening);
addStagingAreaImageSavedListener(startAppListening);
addCommitStagingAreaImageListener(startAppListening);
// Socket.IO
addGeneratorProgressEventListener(startAppListening);
addInvocationCompleteEventListener(startAppListening);
addInvocationErrorEventListener(startAppListening);
addInvocationStartedEventListener(startAppListening);
addSocketConnectedEventListener(startAppListening);
// Gallery bulk download
addSocketDisconnectedEventListener(startAppListening);
addModelLoadEventListener(startAppListening);
addModelInstallEventListener(startAppListening);
addSocketQueueItemStatusChangedEventListener(startAppListening);
addBulkDownloadListeners(startAppListening);
// ControlNet
addControlNetImageProcessedListener(startAppListening);
addControlNetAutoProcessListener(startAppListening);
// Boards
addImageAddedToBoardFulfilledListener(startAppListening);
addImageRemovedFromBoardFulfilledListener(startAppListening);
@@ -121,6 +148,4 @@ addAdHocPostProcessingRequestedListener(startAppListening);
addDynamicPromptsListener(startAppListening);
addSetDefaultSettingsListener(startAppListening);
// addControlAdapterPreprocessor(startAppListening);
addCancellationsListeners(startAppListening);
addControlAdapterPreprocessor(startAppListening);

View File

@@ -1,21 +1,21 @@
import { createAction } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { SerializableObject } from 'common/types';
import { parseify } from 'common/util/serialize';
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { queueApi } from 'services/api/endpoints/queue';
import type { BatchConfig, ImageDTO } from 'services/api/types';
const log = logger('queue');
export const adHocPostProcessingRequested = createAction<{ imageDTO: ImageDTO }>(`upscaling/postProcessingRequested`);
export const addAdHocPostProcessingRequestedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: adHocPostProcessingRequested,
effect: async (action, { dispatch, getState }) => {
const log = logger('session');
const { imageDTO } = action.payload;
const state = getState();
@@ -39,9 +39,9 @@ export const addAdHocPostProcessingRequestedListener = (startAppListening: AppSt
const enqueueResult = await req.unwrap();
req.reset();
log.debug({ enqueueResult } as SerializableObject, t('queue.graphQueued'));
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
} catch (error) {
log.error({ enqueueBatchArg } as SerializableObject, t('queue.graphFailedToQueue'));
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
if (error instanceof Object && 'status' in error && error.status === 403) {
return;

View File

@@ -23,7 +23,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
*/
startAppListening({
matcher: matchAnyBoardDeleted,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const state = getState();
const deletedBoardId = action.meta.arg.originalArgs;
const { autoAddBoardId, selectedBoardId } = state.gallery;
@@ -44,7 +44,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
// If we archived a board, it may end up hidden. If it's selected or the auto-add board, we should reset those.
startAppListening({
matcher: boardsApi.endpoints.updateBoard.matchFulfilled,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const state = getState();
const { shouldShowArchivedBoards } = state.gallery;
@@ -61,7 +61,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
// When we hide archived boards, if the selected or the auto-add board is archived, we should reset those.
startAppListening({
actionCreator: shouldShowArchivedBoardsChanged,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const shouldShowArchivedBoards = action.payload;
// We only need to take action if we have just hidden archived boards.
@@ -100,7 +100,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
*/
startAppListening({
matcher: boardsApi.endpoints.listAllBoards.matchFulfilled,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const boards = action.payload;
const state = getState();
const { selectedBoardId, autoAddBoardId } = state.gallery;

View File

@@ -1,34 +1,33 @@
import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasReset, rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
import { stagingAreaImageAccepted, stagingAreaReset } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
import {
canvasBatchIdsReset,
commitStagingAreaImage,
discardStagedImages,
resetCanvas,
setInitialCanvasImage,
} from 'features/canvas/store/canvasSlice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { queueApi } from 'services/api/endpoints/queue';
import { assert } from 'tsafe';
const log = logger('canvas');
const matcher = isAnyOf(commitStagingAreaImage, discardStagedImages, resetCanvas, setInitialCanvasImage);
const matchCanvasOrStagingAreaRest = isAnyOf(stagingAreaReset, canvasReset);
export const addStagingListeners = (startAppListening: AppStartListening) => {
export const addCommitStagingAreaImageListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: matchCanvasOrStagingAreaRest,
effect: async (_, { dispatch }) => {
matcher,
effect: async (_, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
const { batchIds } = state.canvas;
try {
const req = dispatch(
queueApi.endpoints.cancelByBatchDestination.initiate(
{ destination: 'canvas' },
{ fixedCacheKey: 'cancelByBatchOrigin' }
)
queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: batchIds }, { fixedCacheKey: 'cancelByBatchIds' })
);
const { canceled } = await req.unwrap();
req.reset();
if (canceled > 0) {
log.debug(`Canceled ${canceled} canvas batches`);
toast({
@@ -37,6 +36,7 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
status: 'success',
});
}
dispatch(canvasBatchIdsReset());
} catch {
log.error('Failed to cancel canvas batches');
toast({
@@ -47,26 +47,4 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
}
},
});
startAppListening({
actionCreator: stagingAreaImageAccepted,
effect: (action, api) => {
const { index } = action.payload;
const state = api.getState();
const stagingAreaImage = state.canvasStagingArea.stagedImages[index];
assert(stagingAreaImage, 'No staged image found to accept');
const { x, y } = selectCanvasSlice(state).bbox.rect;
const { imageDTO, offsetX, offsetY } = stagingAreaImage;
const imageObject = imageDTOToImageObject(imageDTO);
const overrides: Partial<CanvasRasterLayerState> = {
position: { x: x + offsetX, y: y + offsetY },
objects: [imageObject],
};
api.dispatch(rasterLayerAdded({ overrides, isSelected: false }));
api.dispatch(stagingAreaReset());
},
});
};

View File

@@ -4,7 +4,7 @@ import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
export const addAnyEnqueuedListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,
effect: (_, { dispatch, getState }) => {
effect: async (_, { dispatch, getState }) => {
const { data } = selectQueueStatus(getState());
if (!data || data.processor.is_started) {

View File

@@ -1,14 +1,14 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
import { setInfillMethod } from 'features/parameters/store/generationSlice';
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
import { appInfoApi } from 'services/api/endpoints/appInfo';
export const addAppConfigReceivedListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
effect: (action, { getState, dispatch }) => {
effect: async (action, { getState, dispatch }) => {
const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload;
const infillMethod = getState().params.infillMethod;
const infillMethod = getState().generation.infillMethod;
if (!infill_methods.includes(infillMethod)) {
// if there is no infill method, set it to the first one

View File

@@ -6,7 +6,7 @@ export const appStarted = createAction('app/appStarted');
export const addAppStartedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: appStarted,
effect: (action, { unsubscribe, cancelActiveListeners }) => {
effect: async (action, { unsubscribe, cancelActiveListeners }) => {
// this should only run once
cancelActiveListeners();
unsubscribe();

View File

@@ -1,30 +1,27 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { SerializableObject } from 'common/types';
import { parseify } from 'common/util/serialize';
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { truncate, upperFirst } from 'lodash-es';
import { serializeError } from 'serialize-error';
import { queueApi } from 'services/api/endpoints/queue';
const log = logger('queue');
export const addBatchEnqueuedListener = (startAppListening: AppStartListening) => {
// success
startAppListening({
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,
effect: (action) => {
const enqueueResult = action.payload;
effect: async (action) => {
const response = action.payload;
const arg = action.meta.arg.originalArgs;
log.debug({ enqueueResult } as SerializableObject, 'Batch enqueued');
logger('queue').debug({ enqueueResult: parseify(response) }, 'Batch enqueued');
toast({
id: 'QUEUE_BATCH_SUCCEEDED',
title: t('queue.batchQueued'),
status: 'success',
description: t('queue.batchQueuedDesc', {
count: enqueueResult.enqueued,
count: response.enqueued,
direction: arg.prepend ? t('queue.front') : t('queue.back'),
}),
});
@@ -34,9 +31,9 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
// error
startAppListening({
matcher: queueApi.endpoints.enqueueBatch.matchRejected,
effect: (action) => {
effect: async (action) => {
const response = action.payload;
const batchConfig = action.meta.arg.originalArgs;
const arg = action.meta.arg.originalArgs;
if (!response) {
toast({
@@ -45,7 +42,7 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
status: 'error',
description: t('common.unknownError'),
});
log.error({ batchConfig } as SerializableObject, t('queue.batchFailedToQueue'));
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
return;
}
@@ -71,7 +68,7 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
description: t('common.unknownError'),
});
}
log.error({ batchConfig, error: serializeError(response) } as SerializableObject, t('queue.batchFailedToQueue'));
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
},
});
};

View File

@@ -1,31 +1,47 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { resetCanvas } from 'features/canvas/store/canvasSlice';
import { controlAdaptersReset } from 'features/controlAdapters/store/controlAdaptersSlice';
import { allLayersDeleted } from 'features/controlLayers/store/controlLayersSlice';
import { getImageUsage } from 'features/deleteImageModal/store/selectors';
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import { imagesApi } from 'services/api/endpoints/images';
export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.deleteBoardAndImages.matchFulfilled,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const { deleted_images } = action.payload;
// Remove all deleted images from the UI
let wasCanvasReset = false;
let wasNodeEditorReset = false;
let wereControlAdaptersReset = false;
let wereControlLayersReset = false;
const state = getState();
const nodes = selectNodesSlice(state);
const canvas = selectCanvasSlice(state);
const { canvas, nodes, controlAdapters, controlLayers } = getState();
deleted_images.forEach((image_name) => {
const imageUsage = getImageUsage(nodes, canvas, image_name);
const imageUsage = getImageUsage(canvas, nodes.present, controlAdapters, controlLayers.present, image_name);
if (imageUsage.isCanvasImage && !wasCanvasReset) {
dispatch(resetCanvas());
wasCanvasReset = true;
}
if (imageUsage.isNodesImage && !wasNodeEditorReset) {
dispatch(nodeEditorReset());
wasNodeEditorReset = true;
}
if (imageUsage.isControlImage && !wereControlAdaptersReset) {
dispatch(controlAdaptersReset());
wereControlAdaptersReset = true;
}
if (imageUsage.isControlLayerImage && !wereControlLayersReset) {
dispatch(allLayersDeleted());
wereControlLayersReset = true;
}
});
},
});

View File

@@ -1,15 +1,21 @@
import { ExternalLink } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
import {
socketBulkDownloadComplete,
socketBulkDownloadError,
socketBulkDownloadStarted,
} from 'services/events/actions';
const log = logger('gallery');
const log = logger('images');
export const addBulkDownloadListeners = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.bulkDownloadImages.matchFulfilled,
effect: (action) => {
effect: async (action) => {
log.debug(action.payload, 'Bulk download requested');
// If we have an item name, we are processing the bulk download locally and should use it as the toast id to
@@ -27,7 +33,7 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
startAppListening({
matcher: imagesApi.endpoints.bulkDownloadImages.matchRejected,
effect: () => {
effect: async () => {
log.debug('Bulk download request failed');
// There isn't any toast to update if we get this event.
@@ -38,4 +44,55 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
});
},
});
startAppListening({
actionCreator: socketBulkDownloadStarted,
effect: async (action) => {
// This should always happen immediately after the bulk download request, so we don't need to show a toast here.
log.debug(action.payload.data, 'Bulk download preparation started');
},
});
startAppListening({
actionCreator: socketBulkDownloadComplete,
effect: async (action) => {
log.debug(action.payload.data, 'Bulk download preparation completed');
const { bulk_download_item_name } = action.payload.data;
// TODO(psyche): This URL may break in in some environments (e.g. Nvidia workbench) but we need to test it first
const url = `/api/v1/images/download/${bulk_download_item_name}`;
toast({
id: bulk_download_item_name,
title: t('gallery.bulkDownloadReady', 'Download ready'),
status: 'success',
description: (
<ExternalLink
label={t('gallery.clickToDownload', 'Click here to download')}
href={url}
download={bulk_download_item_name}
/>
),
duration: null,
});
},
});
startAppListening({
actionCreator: socketBulkDownloadError,
effect: async (action) => {
log.debug(action.payload.data, 'Bulk download preparation failed');
const { bulk_download_item_name } = action.payload.data;
toast({
id: bulk_download_item_name,
title: t('gallery.bulkDownloadFailed'),
status: 'error',
description: action.payload.data.error,
duration: null,
});
},
});
};

View File

@@ -1,137 +0,0 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { $lastCanvasProgressEvent } from 'features/controlLayers/store/canvasSlice';
import { queueApi } from 'services/api/endpoints/queue';
/**
* To prevent a race condition where a progress event arrives after a successful cancellation, we need to keep track of
* cancellations:
* - In the route handlers above, we track and update the cancellations object
* - When the user queues a, we should reset the cancellations, also handled int he route handlers above
* - When we get a progress event, we should check if the event is cancelled before setting the event
*
* We have a few ways that cancellations are effected, so we need to track them all:
* - by queue item id (in this case, we will compare the session_id and not the item_id)
* - by batch id
* - by destination
* - by clearing the queue
*/
type Cancellations = {
sessionIds: Set<string>;
batchIds: Set<string>;
destinations: Set<string>;
clearQueue: boolean;
};
const resetCancellations = (): void => {
cancellations.clearQueue = false;
cancellations.sessionIds.clear();
cancellations.batchIds.clear();
cancellations.destinations.clear();
};
const cancellations: Cancellations = {
sessionIds: new Set(),
batchIds: new Set(),
destinations: new Set(),
clearQueue: false,
} as Readonly<Cancellations>;
/**
* Checks if an item is cancelled, used to prevent race conditions with event handling.
*
* To use this, provide the session_id, batch_id and destination from the event payload.
*/
export const getIsCancelled = (item: {
session_id: string;
batch_id: string;
destination?: string | null;
}): boolean => {
if (cancellations.clearQueue) {
return true;
}
if (cancellations.sessionIds.has(item.session_id)) {
return true;
}
if (cancellations.batchIds.has(item.batch_id)) {
return true;
}
if (item.destination && cancellations.destinations.has(item.destination)) {
return true;
}
return false;
};
export const addCancellationsListeners = (startAppListening: AppStartListening) => {
// When we get a cancellation, we may need to clear the last progress event - next few listeners handle those cases.
// Maybe we could use the `getIsCancelled` util here, but I think that could introduce _another_ race condition...
startAppListening({
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,
effect: () => {
resetCancellations();
},
});
startAppListening({
matcher: queueApi.endpoints.cancelByBatchDestination.matchFulfilled,
effect: (action) => {
cancellations.destinations.add(action.meta.arg.originalArgs.destination);
const event = $lastCanvasProgressEvent.get();
if (!event) {
return;
}
const { session_id, batch_id, destination } = event;
if (getIsCancelled({ session_id, batch_id, destination })) {
$lastCanvasProgressEvent.set(null);
}
},
});
startAppListening({
matcher: queueApi.endpoints.cancelQueueItem.matchFulfilled,
effect: (action) => {
cancellations.sessionIds.add(action.payload.session_id);
const event = $lastCanvasProgressEvent.get();
if (!event) {
return;
}
const { session_id, batch_id, destination } = event;
if (getIsCancelled({ session_id, batch_id, destination })) {
$lastCanvasProgressEvent.set(null);
}
},
});
startAppListening({
matcher: queueApi.endpoints.cancelByBatchIds.matchFulfilled,
effect: (action) => {
for (const batch_id of action.meta.arg.originalArgs.batch_ids) {
cancellations.batchIds.add(batch_id);
}
const event = $lastCanvasProgressEvent.get();
if (!event) {
return;
}
const { session_id, batch_id, destination } = event;
if (getIsCancelled({ session_id, batch_id, destination })) {
$lastCanvasProgressEvent.set(null);
}
},
});
startAppListening({
matcher: queueApi.endpoints.clearQueue.matchFulfilled,
effect: () => {
cancellations.clearQueue = true;
const event = $lastCanvasProgressEvent.get();
if (!event) {
return;
}
const { session_id, batch_id, destination } = event;
if (getIsCancelled({ session_id, batch_id, destination })) {
$lastCanvasProgressEvent.set(null);
}
},
});
};

View File

@@ -0,0 +1,38 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasCopiedToClipboard } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
export const addCanvasCopiedToClipboardListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasCopiedToClipboard,
effect: async (action, { getState }) => {
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
const state = getState();
try {
const blob = getBaseLayerBlob(state);
copyBlobToClipboard(blob);
} catch (err) {
moduleLog.error(String(err));
toast({
id: 'CANVAS_COPY_FAILED',
title: t('toast.problemCopyingCanvas'),
description: t('toast.problemCopyingCanvasDesc'),
status: 'error',
});
return;
}
toast({
id: 'CANVAS_COPY_SUCCEEDED',
title: t('toast.canvasCopiedClipboard'),
status: 'success',
});
},
});
};

View File

@@ -0,0 +1,34 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasDownloadedAsImage } from 'features/canvas/store/actions';
import { downloadBlob } from 'features/canvas/util/downloadBlob';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
export const addCanvasDownloadedAsImageListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasDownloadedAsImage,
effect: async (action, { getState }) => {
const moduleLog = $logger.get().child({ namespace: 'canvasSavedToGalleryListener' });
const state = getState();
let blob;
try {
blob = await getBaseLayerBlob(state);
} catch (err) {
moduleLog.error(String(err));
toast({
id: 'CANVAS_DOWNLOAD_FAILED',
title: t('toast.problemDownloadingCanvas'),
description: t('toast.problemDownloadingCanvasDesc'),
status: 'error',
});
return;
}
downloadBlob(blob, 'canvas.png');
toast({ id: 'CANVAS_DOWNLOAD_SUCCEEDED', title: t('toast.canvasDownloaded'), status: 'success' });
},
});
};

View File

@@ -0,0 +1,60 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasImageToControlAdapter } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasImageToControlNetListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasImageToControlAdapter,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
const { id } = action.payload;
let blob: Blob;
try {
blob = await getBaseLayerBlob(state, true);
} catch (err) {
log.error(String(err));
toast({
id: 'PROBLEM_SAVING_CANVAS',
title: t('toast.problemSavingCanvas'),
description: t('toast.problemSavingCanvasDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
const imageDTO = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([blob], 'savedCanvas.png', {
type: 'image/png',
}),
image_category: 'control',
is_intermediate: true,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: false,
postUploadAction: {
type: 'TOAST',
title: t('toast.canvasSentControlnetAssets'),
},
})
).unwrap();
const { image_name } = imageDTO;
dispatch(
controlAdapterImageChanged({
id,
controlImage: image_name,
})
);
},
});
};

View File

@@ -0,0 +1,60 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMaskSavedToGallery } from 'features/canvas/store/actions';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasMaskSavedToGalleryListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMaskSavedToGallery,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
const canvasBlobsAndImageData = await getCanvasData(
state.canvas.layerState,
state.canvas.boundingBoxCoordinates,
state.canvas.boundingBoxDimensions,
state.canvas.isMaskEnabled,
state.canvas.shouldPreserveMaskedArea
);
if (!canvasBlobsAndImageData) {
return;
}
const { maskBlob } = canvasBlobsAndImageData;
if (!maskBlob) {
log.error('Problem getting mask layer blob');
toast({
id: 'PROBLEM_SAVING_MASK',
title: t('toast.problemSavingMask'),
description: t('toast.problemSavingMaskDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
image_category: 'mask',
is_intermediate: false,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: true,
postUploadAction: {
type: 'TOAST',
title: t('toast.maskSavedAssets'),
},
})
);
},
});
};

View File

@@ -0,0 +1,70 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMaskToControlAdapter } from 'features/canvas/store/actions';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasMaskToControlNetListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMaskToControlAdapter,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
const { id } = action.payload;
const canvasBlobsAndImageData = await getCanvasData(
state.canvas.layerState,
state.canvas.boundingBoxCoordinates,
state.canvas.boundingBoxDimensions,
state.canvas.isMaskEnabled,
state.canvas.shouldPreserveMaskedArea
);
if (!canvasBlobsAndImageData) {
return;
}
const { maskBlob } = canvasBlobsAndImageData;
if (!maskBlob) {
log.error('Problem getting mask layer blob');
toast({
id: 'PROBLEM_IMPORTING_MASK',
title: t('toast.problemImportingMask'),
description: t('toast.problemImportingMaskDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
const imageDTO = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
image_category: 'mask',
is_intermediate: true,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: false,
postUploadAction: {
type: 'TOAST',
title: t('toast.maskSentControlnetAssets'),
},
})
).unwrap();
const { image_name } = imageDTO;
dispatch(
controlAdapterImageChanged({
id,
controlImage: image_name,
})
);
},
});
};

View File

@@ -0,0 +1,73 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMerged } from 'features/canvas/store/actions';
import { $canvasBaseLayer } from 'features/canvas/store/canvasNanostore';
import { setMergedCanvas } from 'features/canvas/store/canvasSlice';
import { getFullBaseLayerBlob } from 'features/canvas/util/getFullBaseLayerBlob';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasMergedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMerged,
effect: async (action, { dispatch }) => {
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
const blob = await getFullBaseLayerBlob();
if (!blob) {
moduleLog.error('Problem getting base layer blob');
toast({
id: 'PROBLEM_MERGING_CANVAS',
title: t('toast.problemMergingCanvas'),
description: t('toast.problemMergingCanvasDesc'),
status: 'error',
});
return;
}
const canvasBaseLayer = $canvasBaseLayer.get();
if (!canvasBaseLayer) {
moduleLog.error('Problem getting canvas base layer');
toast({
id: 'PROBLEM_MERGING_CANVAS',
title: t('toast.problemMergingCanvas'),
description: t('toast.problemMergingCanvasDesc'),
status: 'error',
});
return;
}
const baseLayerRect = canvasBaseLayer.getClientRect({
relativeTo: canvasBaseLayer.getParent() ?? undefined,
});
const imageDTO = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([blob], 'mergedCanvas.png', {
type: 'image/png',
}),
image_category: 'general',
is_intermediate: true,
postUploadAction: {
type: 'TOAST',
title: t('toast.canvasMerged'),
},
})
).unwrap();
// TODO: I can't figure out how to do the type narrowing in the `take()` so just brute forcing it here
const { image_name } = imageDTO;
dispatch(
setMergedCanvas({
kind: 'image',
layer: 'base',
imageName: image_name,
...baseLayerRect,
})
);
},
});
};

View File

@@ -0,0 +1,53 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { parseify } from 'common/util/serialize';
import { canvasSavedToGallery } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
export const addCanvasSavedToGalleryListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasSavedToGallery,
effect: async (action, { dispatch, getState }) => {
const log = logger('canvas');
const state = getState();
let blob;
try {
blob = await getBaseLayerBlob(state);
} catch (err) {
log.error(String(err));
toast({
id: 'CANVAS_SAVE_FAILED',
title: t('toast.problemSavingCanvas'),
description: t('toast.problemSavingCanvasDesc'),
status: 'error',
});
return;
}
const { autoAddBoardId } = state.gallery;
dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([blob], 'savedCanvas.png', {
type: 'image/png',
}),
image_category: 'general',
is_intermediate: false,
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
crop_visible: true,
postUploadAction: {
type: 'TOAST',
title: t('toast.canvasSavedGallery'),
},
metadata: {
_canvas_objects: parseify(state.canvas.layerState.objects),
},
})
);
},
});
};

View File

@@ -0,0 +1,194 @@
import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch } from 'app/store/store';
import { parseify } from 'common/util/serialize';
import {
caLayerImageChanged,
caLayerModelChanged,
caLayerProcessedImageChanged,
caLayerProcessorConfigChanged,
caLayerProcessorPendingBatchIdChanged,
caLayerRecalled,
isControlAdapterLayer,
} from 'features/controlLayers/store/controlLayersSlice';
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { isEqual } from 'lodash-es';
import { getImageDTO } from 'services/api/endpoints/images';
import { queueApi } from 'services/api/endpoints/queue';
import type { BatchConfig } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions';
import { assert } from 'tsafe';
const matcher = isAnyOf(
caLayerImageChanged,
caLayerProcessedImageChanged,
caLayerProcessorConfigChanged,
caLayerModelChanged,
caLayerRecalled
);
const DEBOUNCE_MS = 300;
const log = logger('session');
/**
* Simple helper to cancel a batch and reset the pending batch ID
*/
const cancelProcessorBatch = async (dispatch: AppDispatch, layerId: string, batchId: string) => {
const req = dispatch(queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: [batchId] }));
log.trace({ batchId }, 'Cancelling existing preprocessor batch');
try {
await req.unwrap();
} catch {
// no-op
} finally {
req.reset();
// Always reset the pending batch ID - the cancel req could fail if the batch doesn't exist
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
}
};
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
startAppListening({
matcher,
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take, signal }) => {
const layerId = caLayerRecalled.match(action) ? action.payload.id : action.payload.layerId;
const state = getState();
const originalState = getOriginalState();
// Cancel any in-progress instances of this listener
cancelActiveListeners();
log.trace('Control Layer CA auto-process triggered');
// Delay before starting actual work
await delay(DEBOUNCE_MS);
const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
if (!layer) {
return;
}
// We should only process if the processor settings or image have changed
const originalLayer = originalState.controlLayers.present.layers
.filter(isControlAdapterLayer)
.find((l) => l.id === layerId);
const originalImage = originalLayer?.controlAdapter.image;
const originalConfig = originalLayer?.controlAdapter.processorConfig;
const image = layer.controlAdapter.image;
const processedImage = layer.controlAdapter.processedImage;
const config = layer.controlAdapter.processorConfig;
if (isEqual(config, originalConfig) && isEqual(image, originalImage) && processedImage) {
// Neither config nor image have changed, we can bail
return;
}
if (!image || !config) {
// - If we have no image, we have nothing to process
// - If we have no processor config, we have nothing to process
// Clear the processed image and bail
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
return;
}
// At this point, the user has stopped fiddling with the processor settings and there is a processor selected.
// If there is a pending processor batch, cancel it.
if (layer.controlAdapter.processorPendingBatchId) {
cancelProcessorBatch(dispatch, layerId, layer.controlAdapter.processorPendingBatchId);
}
// TODO(psyche): I can't get TS to be happy, it thinkgs `config` is `never` but it should be inferred from the generic... I'll just cast it for now
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config as never);
const enqueueBatchArg: BatchConfig = {
prepend: true,
batch: {
graph: {
nodes: {
[processorNode.id]: {
...processorNode,
// Control images are always intermediate - do not save to gallery
is_intermediate: true,
},
},
edges: [],
},
runs: 1,
},
};
// Kick off the processor batch
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
fixedCacheKey: 'enqueueBatch',
})
);
try {
const enqueueResult = await req.unwrap();
// TODO(psyche): Update the pydantic models, pretty sure we will _always_ have a batch_id here, but the model says it's optional
assert(enqueueResult.batch.batch_id, 'Batch ID not returned from queue');
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: enqueueResult.batch.batch_id }));
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
// Wait for the processor node to complete
const [invocationCompleteAction] = await take(
(action): action is ReturnType<typeof socketInvocationComplete> =>
socketInvocationComplete.match(action) &&
action.payload.data.batch_id === enqueueResult.batch.batch_id &&
action.payload.data.invocation_source_id === processorNode.id
);
// We still have to check the output type
assert(
invocationCompleteAction.payload.data.result.type === 'image_output',
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
);
const { image_name } = invocationCompleteAction.payload.data.result.image;
const imageDTO = await getImageDTO(image_name);
assert(imageDTO, "Failed to fetch processor output's image DTO");
// Whew! We made it. Update the layer with the processed image
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO }));
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
} catch (error) {
if (signal.aborted) {
// The listener was canceled - we need to cancel the pending processor batch, if there is one (could have changed by now).
const pendingBatchId = getState()
.controlLayers.present.layers.filter(isControlAdapterLayer)
.find((l) => l.id === layerId)?.controlAdapter.processorPendingBatchId;
if (pendingBatchId) {
cancelProcessorBatch(dispatch, layerId, pendingBatchId);
}
log.trace('Control Adapter preprocessor cancelled');
} else {
// Some other error condition...
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
if (error instanceof Object) {
if ('data' in error && 'status' in error) {
if (error.status === 403) {
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
return;
}
}
}
toast({
id: 'GRAPH_QUEUE_FAILED',
title: t('queue.graphFailedToQueue'),
status: 'error',
});
}
} finally {
req.reset();
}
},
});
};

View File

@@ -0,0 +1,85 @@
import type { AnyListenerPredicate } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { RootState } from 'app/store/store';
import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions';
import {
controlAdapterAutoConfigToggled,
controlAdapterImageChanged,
controlAdapterModelChanged,
controlAdapterProcessorParamsChanged,
controlAdapterProcessortTypeChanged,
selectControlAdapterById,
} from 'features/controlAdapters/store/controlAdaptersSlice';
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
type AnyControlAdapterParamChangeAction =
| ReturnType<typeof controlAdapterProcessorParamsChanged>
| ReturnType<typeof controlAdapterModelChanged>
| ReturnType<typeof controlAdapterImageChanged>
| ReturnType<typeof controlAdapterProcessortTypeChanged>
| ReturnType<typeof controlAdapterAutoConfigToggled>;
const predicate: AnyListenerPredicate<RootState> = (action, state, prevState) => {
const isActionMatched =
controlAdapterProcessorParamsChanged.match(action) ||
controlAdapterModelChanged.match(action) ||
controlAdapterImageChanged.match(action) ||
controlAdapterProcessortTypeChanged.match(action) ||
controlAdapterAutoConfigToggled.match(action);
if (!isActionMatched) {
return false;
}
const { id } = action.payload;
const prevCA = selectControlAdapterById(prevState.controlAdapters, id);
const ca = selectControlAdapterById(state.controlAdapters, id);
if (!prevCA || !isControlNetOrT2IAdapter(prevCA) || !ca || !isControlNetOrT2IAdapter(ca)) {
return false;
}
if (controlAdapterAutoConfigToggled.match(action)) {
// do not process if the user just disabled auto-config
if (prevCA.shouldAutoConfig === true) {
return false;
}
}
const { controlImage, processorType, shouldAutoConfig } = ca;
if (controlAdapterModelChanged.match(action) && !shouldAutoConfig) {
// do not process if the action is a model change but the processor settings are dirty
return false;
}
const isProcessorSelected = processorType !== 'none';
const hasControlImage = Boolean(controlImage);
return isProcessorSelected && hasControlImage;
};
const DEBOUNCE_MS = 300;
/**
* Listener that automatically processes a ControlNet image when its processor parameters are changed.
*
* The network request is debounced.
*/
export const addControlNetAutoProcessListener = (startAppListening: AppStartListening) => {
startAppListening({
predicate,
effect: async (action, { dispatch, cancelActiveListeners, delay }) => {
const log = logger('session');
const { id } = (action as AnyControlAdapterParamChangeAction).payload;
// Cancel any in-progress instances of this listener
cancelActiveListeners();
log.trace('ControlNet auto-process triggered');
// Delay before starting actual work
await delay(DEBOUNCE_MS);
dispatch(controlAdapterImageProcessed({ id }));
},
});
};

View File

@@ -0,0 +1,118 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { parseify } from 'common/util/serialize';
import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions';
import {
controlAdapterImageChanged,
controlAdapterProcessedImageChanged,
pendingControlImagesCleared,
selectControlAdapterById,
} from 'features/controlAdapters/store/controlAdaptersSlice';
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
import { queueApi } from 'services/api/endpoints/queue';
import type { BatchConfig, ImageDTO } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions';
export const addControlNetImageProcessedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: controlAdapterImageProcessed,
effect: async (action, { dispatch, getState, take }) => {
const log = logger('session');
const { id } = action.payload;
const ca = selectControlAdapterById(getState().controlAdapters, id);
if (!ca?.controlImage || !isControlNetOrT2IAdapter(ca)) {
log.error('Unable to process ControlNet image');
return;
}
if (ca.processorType === 'none' || ca.processorNode.type === 'none') {
return;
}
// ControlNet one-off procressing graph is just the processor node, no edges.
// Also we need to grab the image.
const nodeId = ca.processorNode.id;
const enqueueBatchArg: BatchConfig = {
prepend: true,
batch: {
graph: {
nodes: {
[ca.processorNode.id]: {
...ca.processorNode,
is_intermediate: true,
use_cache: false,
image: { image_name: ca.controlImage },
},
},
edges: [],
},
runs: 1,
},
};
try {
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
fixedCacheKey: 'enqueueBatch',
})
);
const enqueueResult = await req.unwrap();
req.reset();
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
const [invocationCompleteAction] = await take(
(action): action is ReturnType<typeof socketInvocationComplete> =>
socketInvocationComplete.match(action) &&
action.payload.data.batch_id === enqueueResult.batch.batch_id &&
action.payload.data.invocation_source_id === nodeId
);
// We still have to check the output type
if (invocationCompleteAction.payload.data.result.type === 'image_output') {
const { image_name } = invocationCompleteAction.payload.data.result.image;
// Wait for the ImageDTO to be received
const [{ payload }] = await take(
(action) =>
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
);
const processedControlImage = payload as ImageDTO;
log.debug({ controlNetId: action.payload, processedControlImage }, 'ControlNet image processed');
// Update the processed image in the store
dispatch(
controlAdapterProcessedImageChanged({
id,
processedControlImage: processedControlImage.image_name,
})
);
}
} catch (error) {
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
if (error instanceof Object) {
if ('data' in error && 'status' in error) {
if (error.status === 403) {
dispatch(pendingControlImagesCleared());
dispatch(controlAdapterImageChanged({ id, controlImage: null }));
return;
}
}
}
toast({
id: 'GRAPH_QUEUE_FAILED',
title: t('queue.graphFailedToQueue'),
status: 'error',
});
}
},
});
};

View File

@@ -0,0 +1,144 @@
import { logger } from 'app/logging/logger';
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { parseify } from 'common/util/serialize';
import { canvasBatchIdAdded, stagingAreaInitialized } from 'features/canvas/store/canvasSlice';
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
import { canvasGraphBuilt } from 'features/nodes/store/actions';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildCanvasGraph } from 'features/nodes/util/graph/canvas/buildCanvasGraph';
import { imagesApi } from 'services/api/endpoints/images';
import { queueApi } from 'services/api/endpoints/queue';
import type { ImageDTO } from 'services/api/types';
/**
* This listener is responsible invoking the canvas. This involves a number of steps:
*
* 1. Generate image blobs from the canvas layers
* 2. Determine the generation mode from the layers (txt2img, img2img, inpaint)
* 3. Build the canvas graph
* 4. Create the session with the graph
* 5. Upload the init image if necessary
* 6. Upload the mask image if necessary
* 7. Update the init and mask images with the session ID
* 8. Initialize the staging area if not yet initialized
* 9. Dispatch the sessionReadyToInvoke action to invoke the session
*/
export const addEnqueueRequestedCanvasListener = (startAppListening: AppStartListening) => {
startAppListening({
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
enqueueRequested.match(action) && action.payload.tabName === 'canvas',
effect: async (action, { getState, dispatch }) => {
const log = logger('queue');
const { prepend } = action.payload;
const state = getState();
const { layerState, boundingBoxCoordinates, boundingBoxDimensions, isMaskEnabled, shouldPreserveMaskedArea } =
state.canvas;
// Build canvas blobs
const canvasBlobsAndImageData = await getCanvasData(
layerState,
boundingBoxCoordinates,
boundingBoxDimensions,
isMaskEnabled,
shouldPreserveMaskedArea
);
if (!canvasBlobsAndImageData) {
log.error('Unable to create canvas data');
return;
}
const { baseBlob, baseImageData, maskBlob, maskImageData } = canvasBlobsAndImageData;
// Determine the generation mode
const generationMode = getCanvasGenerationMode(baseImageData, maskImageData);
if (state.system.enableImageDebugging) {
const baseDataURL = await blobToDataURL(baseBlob);
const maskDataURL = await blobToDataURL(maskBlob);
openBase64ImageInTab([
{ base64: maskDataURL, caption: 'mask b64' },
{ base64: baseDataURL, caption: 'image b64' },
]);
}
log.debug(`Generation mode: ${generationMode}`);
// Temp placeholders for the init and mask images
let canvasInitImage: ImageDTO | undefined;
let canvasMaskImage: ImageDTO | undefined;
// For img2img and inpaint/outpaint, we need to upload the init images
if (['img2img', 'inpaint', 'outpaint'].includes(generationMode)) {
// upload the image, saving the request id
canvasInitImage = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([baseBlob], 'canvasInitImage.png', {
type: 'image/png',
}),
image_category: 'general',
is_intermediate: true,
})
).unwrap();
}
// For inpaint/outpaint, we also need to upload the mask layer
if (['inpaint', 'outpaint'].includes(generationMode)) {
// upload the image, saving the request id
canvasMaskImage = await dispatch(
imagesApi.endpoints.uploadImage.initiate({
file: new File([maskBlob], 'canvasMaskImage.png', {
type: 'image/png',
}),
image_category: 'mask',
is_intermediate: true,
})
).unwrap();
}
const graph = await buildCanvasGraph(state, generationMode, canvasInitImage, canvasMaskImage);
log.debug({ graph: parseify(graph) }, `Canvas graph built`);
// currently this action is just listened to for logging
dispatch(canvasGraphBuilt(graph));
const batchConfig = prepareLinearUIBatch(state, graph, prepend);
try {
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
fixedCacheKey: 'enqueueBatch',
})
);
const enqueueResult = await req.unwrap();
req.reset();
const batchId = enqueueResult.batch.batch_id as string; // we know the is a string, backend provides it
// Prep the canvas staging area if it is not yet initialized
if (!state.canvas.layerState.stagingArea.boundingBox) {
dispatch(
stagingAreaInitialized({
boundingBox: {
...state.canvas.boundingBoxCoordinates,
...state.canvas.boundingBoxDimensions,
},
})
);
}
// Associate the session with the canvas session ID
dispatch(canvasBatchIdAdded(batchId));
} catch {
// no-op
}
},
});
};

View File

@@ -1,25 +1,10 @@
import { logger } from 'app/logging/logger';
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { SerializableObject } from 'common/types';
import type { Result } from 'common/util/result';
import { isErr, withResult, withResultAsync } from 'common/util/result';
import { $canvasManager } from 'features/controlLayers/store/canvasSlice';
import {
selectIsStaging,
stagingAreaReset,
stagingAreaStartedStaging,
} from 'features/controlLayers/store/canvasStagingAreaSlice';
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { serializeError } from 'serialize-error';
import { buildGenerationTabGraph } from 'features/nodes/util/graph/generation/buildGenerationTabGraph';
import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/generation/buildGenerationTabSDXLGraph';
import { queueApi } from 'services/api/endpoints/queue';
import type { Invocation } from 'services/api/types';
import { assert } from 'tsafe';
const log = logger('generation');
export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) => {
startAppListening({
@@ -27,81 +12,33 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
enqueueRequested.match(action) && action.payload.tabName === 'generation',
effect: async (action, { getState, dispatch }) => {
const state = getState();
const model = state.params.model;
const { shouldShowProgressInViewer } = state.ui;
const model = state.generation.model;
const { prepend } = action.payload;
const manager = $canvasManager.get();
assert(manager, 'No model found in state');
let graph;
let didStartStaging = false;
if (!selectIsStaging(state) && state.canvasSettings.sendToCanvas) {
dispatch(stagingAreaStartedStaging());
didStartStaging = true;
if (model?.base === 'sdxl') {
graph = await buildGenerationTabSDXLGraph(state);
} else {
graph = await buildGenerationTabGraph(state);
}
const abortStaging = () => {
if (didStartStaging && selectIsStaging(getState())) {
dispatch(stagingAreaReset());
}
};
let buildGraphResult: Result<
{ g: Graph; noise: Invocation<'noise'>; posCond: Invocation<'compel' | 'sdxl_compel_prompt'> },
Error
>;
assert(model, 'No model found in state');
const base = model.base;
switch (base) {
case 'sdxl':
buildGraphResult = await withResultAsync(() => buildSDXLGraph(state, manager));
break;
case 'sd-1':
case `sd-2`:
buildGraphResult = await withResultAsync(() => buildSD1Graph(state, manager));
break;
default:
assert(false, `No graph builders for base ${base}`);
}
if (isErr(buildGraphResult)) {
log.error({ error: serializeError(buildGraphResult.error) }, 'Failed to build graph');
abortStaging();
return;
}
const { g, noise, posCond } = buildGraphResult.value;
const destination = state.canvasSettings.sendToCanvas ? 'canvas' : 'gallery';
const prepareBatchResult = withResult(() =>
prepareLinearUIBatch(state, g, prepend, noise, posCond, 'generation', destination)
);
if (isErr(prepareBatchResult)) {
log.error({ error: serializeError(prepareBatchResult.error) }, 'Failed to prepare batch');
abortStaging();
return;
}
const batchConfig = prepareLinearUIBatch(state, graph, prepend);
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
fixedCacheKey: 'enqueueBatch',
})
);
req.reset();
const enqueueResult = await withResultAsync(() => req.unwrap());
if (isErr(enqueueResult)) {
log.error({ error: serializeError(enqueueResult.error) }, 'Failed to enqueue batch');
abortStaging();
return;
try {
await req.unwrap();
if (shouldShowProgressInViewer) {
dispatch(isImageViewerOpenChanged(true));
}
} finally {
req.reset();
}
log.debug({ batchConfig: prepareBatchResult.value } as SerializableObject, 'Enqueued batch');
},
});
};

View File

@@ -1,6 +1,5 @@
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
import { queueApi } from 'services/api/endpoints/queue';
@@ -12,12 +11,12 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
enqueueRequested.match(action) && action.payload.tabName === 'workflows',
effect: async (action, { getState, dispatch }) => {
const state = getState();
const nodes = selectNodesSlice(state);
const { nodes, edges } = state.nodes.present;
const workflow = state.workflow;
const graph = buildNodesGraph(nodes);
const graph = buildNodesGraph(state.nodes.present);
const builtWorkflow = buildWorkflowWithValidation({
nodes: nodes.nodes,
edges: nodes.edges,
nodes,
edges,
workflow,
});
@@ -30,9 +29,7 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
batch: {
graph,
workflow: builtWorkflow,
runs: state.params.iterations,
origin: 'workflows',
destination: 'gallery',
runs: state.generation.iterations,
},
prepend: action.payload.prepend,
};

View File

@@ -1,5 +1,6 @@
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
import { queueApi } from 'services/api/endpoints/queue';
@@ -10,11 +11,12 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
enqueueRequested.match(action) && action.payload.tabName === 'upscaling',
effect: async (action, { getState, dispatch }) => {
const state = getState();
const { shouldShowProgressInViewer } = state.ui;
const { prepend } = action.payload;
const { g, noise, posCond } = await buildMultidiffusionUpscaleGraph(state);
const graph = await buildMultidiffusionUpscaleGraph(state);
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
const batchConfig = prepareLinearUIBatch(state, graph, prepend);
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
@@ -23,6 +25,9 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
);
try {
await req.unwrap();
if (shouldShowProgressInViewer) {
dispatch(isImageViewerOpenChanged(true));
}
} finally {
req.reset();
}

View File

@@ -27,7 +27,7 @@ export const galleryImageClicked = createAction<{
export const addGalleryImageClickedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: galleryImageClicked,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const { imageDTO, shiftKey, ctrlKey, metaKey, altKey } = action.payload;
const state = getState();
const queryArgs = selectListImagesQueryArgs(state);

View File

@@ -1,27 +1,24 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { SerializableObject } from 'common/types';
import { parseify } from 'common/util/serialize';
import { $templates } from 'features/nodes/store/nodesSlice';
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
import { size } from 'lodash-es';
import { serializeError } from 'serialize-error';
import { appInfoApi } from 'services/api/endpoints/appInfo';
const log = logger('system');
export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled,
effect: (action, { getState }) => {
const log = logger('system');
const schemaJSON = action.payload;
log.debug({ schemaJSON: parseify(schemaJSON) } as SerializableObject, 'Received OpenAPI schema');
log.debug({ schemaJSON: parseify(schemaJSON) }, 'Received OpenAPI schema');
const { nodesAllowlist, nodesDenylist } = getState().config;
const nodeTemplates = parseSchema(schemaJSON, nodesAllowlist, nodesDenylist);
log.debug({ nodeTemplates } as SerializableObject, `Built ${size(nodeTemplates)} node templates`);
log.debug({ nodeTemplates: parseify(nodeTemplates) }, `Built ${size(nodeTemplates)} node templates`);
$templates.set(nodeTemplates);
},
@@ -33,7 +30,8 @@ export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening
// If action.meta.condition === true, the request was canceled/skipped because another request was in flight or
// the value was already in the cache. We don't want to log these errors.
if (!action.meta.condition) {
log.error({ error: serializeError(action.error) }, 'Problem retrieving OpenAPI Schema');
const log = logger('system');
log.error({ error: parseify(action.error) }, 'Problem retrieving OpenAPI Schema');
}
},
});

View File

@@ -2,13 +2,15 @@ import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { imagesApi } from 'services/api/endpoints/images';
const log = logger('gallery');
export const addImageAddedToBoardFulfilledListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.addImageToBoard.matchFulfilled,
effect: (action) => {
const log = logger('images');
const { board_id, imageDTO } = action.meta.arg.originalArgs;
// TODO: update listImages cache for this board
log.debug({ board_id, imageDTO }, 'Image added to board');
},
});
@@ -16,7 +18,9 @@ export const addImageAddedToBoardFulfilledListener = (startAppListening: AppStar
startAppListening({
matcher: imagesApi.endpoints.addImageToBoard.matchRejected,
effect: (action) => {
const log = logger('images');
const { board_id, imageDTO } = action.meta.arg.originalArgs;
log.debug({ board_id, imageDTO }, 'Problem adding image to board');
},
});

View File

@@ -1,9 +1,20 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch, RootState } from 'app/store/store';
import { entityDeleted, ipaImageChanged } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { resetCanvas } from 'features/canvas/store/canvasSlice';
import {
controlAdapterImageChanged,
controlAdapterProcessedImageChanged,
selectControlAdapterAll,
} from 'features/controlAdapters/store/controlAdaptersSlice';
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
import {
isControlAdapterLayer,
isInitialImageLayer,
isIPAdapterLayer,
isRegionalGuidanceLayer,
layerDeleted,
} from 'features/controlLayers/store/controlLayersSlice';
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
@@ -15,10 +26,6 @@ import { forEach, intersectionBy } from 'lodash-es';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
const log = logger('gallery');
//TODO(psyche): handle image deletion (canvas staging area?)
// Some utils to delete images from different parts of the app
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
state.nodes.present.nodes.forEach((node) => {
@@ -40,37 +47,52 @@ const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: Im
});
};
// const deleteControlAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
// state.canvas.present.controlAdapters.entities.forEach(({ id, imageObject, processedImageObject }) => {
// if (
// imageObject?.image.image_name === imageDTO.image_name ||
// processedImageObject?.image.image_name === imageDTO.image_name
// ) {
// dispatch(caImageChanged({ id, imageDTO: null }));
// dispatch(caProcessedImageChanged({ id, imageDTO: null }));
// }
// });
// };
const deleteIPAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).ipAdapters.entities.forEach((entity) => {
if (entity.ipAdapter.image?.image_name === imageDTO.image_name) {
dispatch(ipaImageChanged({ entityIdentifier: getEntityIdentifier(entity), imageDTO: null }));
const deleteControlAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
forEach(selectControlAdapterAll(state.controlAdapters), (ca) => {
if (
ca.controlImage === imageDTO.image_name ||
(isControlNetOrT2IAdapter(ca) && ca.processedControlImage === imageDTO.image_name)
) {
dispatch(
controlAdapterImageChanged({
id: ca.id,
controlImage: null,
})
);
dispatch(
controlAdapterProcessedImageChanged({
id: ca.id,
processedControlImage: null,
})
);
}
});
};
const deleteLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).rasterLayers.entities.forEach(({ id, objects }) => {
let shouldDelete = false;
for (const obj of objects) {
if (obj.type === 'image' && obj.image.image_name === imageDTO.image_name) {
shouldDelete = true;
break;
const deleteControlLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
state.controlLayers.present.layers.forEach((l) => {
if (isRegionalGuidanceLayer(l)) {
if (l.ipAdapters.some((ipa) => ipa.image?.name === imageDTO.image_name)) {
dispatch(layerDeleted(l.id));
}
}
if (shouldDelete) {
dispatch(entityDeleted({ entityIdentifier: { id, type: 'raster_layer' } }));
if (isControlAdapterLayer(l)) {
if (
l.controlAdapter.image?.name === imageDTO.image_name ||
l.controlAdapter.processedImage?.name === imageDTO.image_name
) {
dispatch(layerDeleted(l.id));
}
}
if (isIPAdapterLayer(l)) {
if (l.ipAdapter.image?.name === imageDTO.image_name) {
dispatch(layerDeleted(l.id));
}
}
if (isInitialImageLayer(l)) {
if (l.image?.name === imageDTO.image_name) {
dispatch(layerDeleted(l.id));
}
}
});
};
@@ -123,10 +145,14 @@ export const addImageDeletionListeners = (startAppListening: AppStartListening)
}
}
// We need to reset the features where the image is in use - none of these work if their image(s) don't exist
if (imageUsage.isCanvasImage) {
dispatch(resetCanvas());
}
deleteControlAdapterImages(state, dispatch, imageDTO);
deleteNodesImages(state, dispatch, imageDTO);
// deleteControlAdapterImages(state, dispatch, imageDTO);
deleteIPAdapterImages(state, dispatch, imageDTO);
deleteLayerImages(state, dispatch, imageDTO);
deleteControlLayerImages(state, dispatch, imageDTO);
} catch {
// no-op
} finally {
@@ -163,11 +189,14 @@ export const addImageDeletionListeners = (startAppListening: AppStartListening)
// We need to reset the features where the image is in use - none of these work if their image(s) don't exist
if (imagesUsage.some((i) => i.isCanvasImage)) {
dispatch(resetCanvas());
}
imageDTOs.forEach((imageDTO) => {
deleteControlAdapterImages(state, dispatch, imageDTO);
deleteNodesImages(state, dispatch, imageDTO);
// deleteControlAdapterImages(state, dispatch, imageDTO);
deleteIPAdapterImages(state, dispatch, imageDTO);
deleteLayerImages(state, dispatch, imageDTO);
deleteControlLayerImages(state, dispatch, imageDTO);
});
} catch {
// no-op
@@ -191,6 +220,7 @@ export const addImageDeletionListeners = (startAppListening: AppStartListening)
startAppListening({
matcher: imagesApi.endpoints.deleteImage.matchFulfilled,
effect: (action) => {
const log = logger('images');
log.debug({ imageDTO: action.meta.arg.originalArgs }, 'Image deleted');
},
});
@@ -198,6 +228,7 @@ export const addImageDeletionListeners = (startAppListening: AppStartListening)
startAppListening({
matcher: imagesApi.endpoints.deleteImage.matchRejected,
effect: (action) => {
const log = logger('images');
log.debug({ imageDTO: action.meta.arg.originalArgs }, 'Unable to delete image');
},
});

View File

@@ -1,20 +1,28 @@
import { createAction } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectDefaultControlAdapter } from 'features/controlLayers/hooks/addLayerHooks';
import { parseify } from 'common/util/serialize';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import {
controlLayerAdded,
ipaImageChanged,
rasterLayerAdded,
rgIPAdapterImageChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type { CanvasControlLayerState, CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
controlAdapterImageChanged,
controlAdapterIsEnabledChanged,
} from 'features/controlAdapters/store/controlAdaptersSlice';
import {
caLayerImageChanged,
iiLayerImageChanged,
ipaLayerImageChanged,
rgLayerIPAdapterImageChanged,
} from 'features/controlLayers/store/controlLayersSlice';
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
import { isValidDrop } from 'features/dnd/util/isValidDrop';
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
import {
imageSelected,
imageToCompareChanged,
isImageViewerOpenChanged,
selectionChanged,
} from 'features/gallery/store/gallerySlice';
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
import { imagesApi } from 'services/api/endpoints/images';
@@ -23,12 +31,11 @@ export const dndDropped = createAction<{
activeData: TypesafeDraggableData;
}>('dnd/dndDropped');
const log = logger('system');
export const addImageDroppedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: dndDropped,
effect: (action, { dispatch, getState }) => {
effect: async (action, { dispatch, getState }) => {
const log = logger('dnd');
const { activeData, overData } = action.payload;
if (!isValidDrop(overData, activeData)) {
return;
@@ -39,22 +46,80 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
} else if (activeData.payloadType === 'GALLERY_SELECTION') {
log.debug({ activeData, overData }, `Images (${getState().gallery.selection.length}) dropped`);
} else if (activeData.payloadType === 'NODE_FIELD') {
log.debug({ activeData, overData }, 'Node field dropped');
log.debug({ activeData: parseify(activeData), overData: parseify(overData) }, 'Node field dropped');
} else {
log.debug({ activeData, overData }, `Unknown payload dropped`);
}
/**
* Image dropped on current image
*/
if (
overData.actionType === 'SET_CURRENT_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
dispatch(imageSelected(activeData.payload.imageDTO));
dispatch(isImageViewerOpenChanged(true));
return;
}
/**
* Image dropped on ControlNet
*/
if (
overData.actionType === 'SET_CONTROL_ADAPTER_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const { id } = overData.context;
dispatch(
controlAdapterImageChanged({
id,
controlImage: activeData.payload.imageDTO.image_name,
})
);
dispatch(
controlAdapterIsEnabledChanged({
id,
isEnabled: true,
})
);
return;
}
/**
* Image dropped on Control Adapter Layer
*/
if (
overData.actionType === 'SET_CA_LAYER_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const { layerId } = overData.context;
dispatch(
caLayerImageChanged({
layerId,
imageDTO: activeData.payload.imageDTO,
})
);
return;
}
/**
* Image dropped on IP Adapter Layer
*/
if (
overData.actionType === 'SET_IPA_IMAGE' &&
overData.actionType === 'SET_IPA_LAYER_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const { id } = overData.context;
const { layerId } = overData.context;
dispatch(
ipaImageChanged({ entityIdentifier: { id, type: 'ip_adapter' }, imageDTO: activeData.payload.imageDTO })
ipaLayerImageChanged({
layerId,
imageDTO: activeData.payload.imageDTO,
})
);
return;
}
@@ -63,14 +128,14 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
* Image dropped on RG Layer IP Adapter
*/
if (
overData.actionType === 'SET_RG_IP_ADAPTER_IMAGE' &&
overData.actionType === 'SET_RG_LAYER_IP_ADAPTER_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const { id, ipAdapterId } = overData.context;
const { layerId, ipAdapterId } = overData.context;
dispatch(
rgIPAdapterImageChanged({
entityIdentifier: { id, type: 'regional_guidance' },
rgLayerIPAdapterImageChanged({
layerId,
ipAdapterId,
imageDTO: activeData.payload.imageDTO,
})
@@ -79,41 +144,32 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
}
/**
* Image dropped on Raster layer
* Image dropped on II Layer Image
*/
if (
overData.actionType === 'ADD_RASTER_LAYER_FROM_IMAGE' &&
overData.actionType === 'SET_II_LAYER_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
const overrides: Partial<CanvasRasterLayerState> = {
objects: [imageObject],
position: { x, y },
};
dispatch(rasterLayerAdded({ overrides, isSelected: true }));
const { layerId } = overData.context;
dispatch(
iiLayerImageChanged({
layerId,
imageDTO: activeData.payload.imageDTO,
})
);
return;
}
/**
* Image dropped on Raster layer
* Image dropped on Canvas
*/
if (
overData.actionType === 'ADD_CONTROL_LAYER_FROM_IMAGE' &&
overData.actionType === 'SET_CANVAS_INITIAL_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const state = getState();
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
const { x, y } = selectCanvasSlice(state).bbox.rect;
const defaultControlAdapter = selectDefaultControlAdapter(state);
const overrides: Partial<CanvasControlLayerState> = {
objects: [imageObject],
position: { x, y },
controlAdapter: defaultControlAdapter,
};
dispatch(controlLayerAdded({ overrides, isSelected: true }));
dispatch(setInitialCanvasImage(activeData.payload.imageDTO, selectOptimalDimension(getState())));
return;
}
@@ -146,6 +202,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
) {
const { imageDTO } = activeData.payload;
dispatch(imageToCompareChanged(imageDTO));
dispatch(isImageViewerOpenChanged(true));
return;
}

Some files were not shown because too many files have changed in this diff Show More