Compare commits

...

65 Commits

Author SHA1 Message Date
psychedelicious
825f163492 chore: bump version to v5.4.0a1 2024-10-30 11:06:01 +11:00
psychedelicious
bc42205593 fix(ui): remember to disable isFiltering when finishing filtering 2024-10-30 09:19:30 +11:00
psychedelicious
2e3cba6416 fix(ui): flash of original layer when applying filter/segment
Let the parent module adopt the filtered/segemented image instead of destroying it and making the parent re-create it, which results in a brief flash of the parent layer's original objects before the new image is rendered.
2024-10-30 09:19:30 +11:00
psychedelicious
7852aacd11 fix(uI): track whether graph succeeded in runGraphAndReturnImageOutput
This prevents extraneous graph cancel requests when cleaning up the abort signal after a successful run of a graph.
2024-10-30 09:19:30 +11:00
psychedelicious
6cccd67ecd feat(ui): update SAM module to w/ minor improvements from filter module 2024-10-30 09:19:30 +11:00
psychedelicious
a7a89c9de1 feat(ui): use more resilient logic in canvas filter module, same as in SAM module 2024-10-30 09:19:30 +11:00
psychedelicious
5ca8eed89e tidy(ui): remove all buffer renderer interactions in SAM module
We don't use the buffer rendere in this module; there's no reason to clear it.
2024-10-30 09:19:30 +11:00
psychedelicious
c885c3c9a6 fix(ui): filter layer data pushed to parent rendered when saving as 2024-10-30 09:19:30 +11:00
Mary Hipp
d81c38c350 update announcements 2024-10-29 09:53:13 -04:00
Riku
92d5b73215 fix(ui): seamless zod parameter cleanup 2024-10-29 20:43:44 +11:00
Riku
097e92db6a fix(ui): always write seamless metadata
Ensure images without seamless enabled correctly reset the setting
when all parameters are recalled
2024-10-29 20:43:44 +11:00
Riku
84c6209a45 feat(ui): display seamless values in metadata viewer 2024-10-29 20:43:44 +11:00
Riku
107e48808a fix(ui): recall seamless settings 2024-10-29 20:43:44 +11:00
dunkeroni
47168b5505 chore: make ruff 2024-10-29 14:07:20 +11:00
dunkeroni
58152ec981 fix preview progress bar pre-denoise 2024-10-29 14:07:20 +11:00
dunkeroni
c74afbf332 convert to bgr on sdxl t2i 2024-10-29 14:07:20 +11:00
psychedelicious
7cdda00a54 feat(ui): rearrange canvas paste back nodes to save an image step
We were scaling the unscaled image and mask down before doing the paste-back, but this adds an extraneous step & image output.

We can do the paste-back first, then scale to output size after. So instead of 2 resizes before the paste-back, we have 1 resize after.

The end result is the same.
2024-10-29 11:13:31 +11:00
psychedelicious
a74282bce6 feat(ui): graph builders use objects for arg instead of many args 2024-10-29 11:13:31 +11:00
psychedelicious
107f048c7a feat(ui): extract canvas output node prefix to constant 2024-10-29 11:13:31 +11:00
Ryan Dick
a2486a5f06 Remove unused prediction_type and upcast_attention from from_single_file(...) calls. 2024-10-28 13:05:17 -04:00
Ryan Dick
07ab116efb Remove load_safety_checker=False from calls to from_single_file(...).
This param has been deprecated, and by including it (even when set to
False) the safety checker automatically gets downloaded.
2024-10-28 13:05:17 -04:00
Ryan Dick
1a13af3c7a Fix huggingface_hub.errors imports after version bump. 2024-10-28 13:05:17 -04:00
Ryan Dick
f2966a2594 Fix changed import for FromOriginalControlNetMixin after diffusers bump. 2024-10-28 13:05:17 -04:00
Ryan Dick
58bb97e3c6 Bump diffusers, accelerate, and huggingface-hub. 2024-10-28 13:05:17 -04:00
psychedelicious
a84aa5c049 fix(ui): canvas alerts blocking metadata panel 2024-10-27 09:46:01 +11:00
psychedelicious
aebcec28e0 chore: bump version to v5.3.0 2024-10-25 22:37:59 -04:00
psychedelicious
db1c5a94f7 feat(ui): image ctx -> New from Image -> Canvas as Raster/Control Layer 2024-10-25 22:27:00 -04:00
psychedelicious
56222a8493 feat(ui): organize layer context menu items 2024-10-25 22:27:00 -04:00
psychedelicious
b7510ce709 feat(ui): filter, select object and transform UI buttons
- Restore dedicated `Apply` buttons
- Remove icons from the buttons, too much noise when the words are short and clear
- Update loading state to show a spinner next to the `Process` button instead of on _every_ button
2024-10-25 22:27:00 -04:00
psychedelicious
5739799e2e fix(ui): close viewer when transforming 2024-10-25 22:27:00 -04:00
psychedelicious
813cf87920 feat(ui): move canvas alerts to top-left corner 2024-10-25 22:27:00 -04:00
psychedelicious
c95b151daf feat(ui): add layer title heading for canvas ctx menu 2024-10-25 22:27:00 -04:00
psychedelicious
a0f823a3cf feat(ui): reset shouldShowStagedImage flag when starting staging 2024-10-25 22:27:00 -04:00
Hippalectryon
64e0f6d688 Improve dev install docs
Fix numbering
2024-10-25 08:27:26 -04:00
psychedelicious
ddd5b1087c fix(nodes): return copies of objects in invocation ctx
Closes #6820
2024-10-25 08:26:09 -04:00
psychedelicious
008be9b846 feat(ui): add all save as options to filter 2024-10-25 08:12:14 -04:00
psychedelicious
8e7cabdc04 feat(ui): add Replace Current open to Select Object -> Save As 2024-10-25 08:12:14 -04:00
psychedelicious
a4c4237f99 feat(ui): use PiPlayFill for process buttons for filter & select object 2024-10-25 08:12:14 -04:00
psychedelicious
bda3740dcd feat(ui): use fill style icons for Filter 2024-10-25 08:12:14 -04:00
psychedelicious
5b4633baa9 feat(ui): use PiShapesFill icon for Select Object 2024-10-25 08:12:14 -04:00
psychedelicious
96351181cb feat(ui): make canvas layer toolbar icons a bit larger 2024-10-25 08:12:14 -04:00
psychedelicious
957d591d99 feat(ui): "Auto-Mask" -> "Select Object" 2024-10-25 08:12:14 -04:00
psychedelicious
75f605ba1a feat(ui): support inverted selection in auto-mask 2024-10-25 08:12:14 -04:00
psychedelicious
ab898a7180 chore(ui): typegen 2024-10-25 08:12:14 -04:00
psychedelicious
c9a4516ab1 feat(nodes): add invert to apply_tensor_mask_to_image 2024-10-25 08:12:14 -04:00
psychedelicious
fe97c0d5eb tweak(ui): default settings verbiage 2024-10-25 16:09:59 +11:00
psychedelicious
6056764840 feat(ui): disable default settings button when synced
A blue button is begging to be clicked, but clicking it will do nothing. Instead, we should communicate that no action is needed by disabling the button when the default settings are already in use.
2024-10-25 16:09:59 +11:00
psychedelicious
8747c0dbb0 fix(ui): handle no model selection in default settings tooltip 2024-10-25 16:09:59 +11:00
psychedelicious
c5cdd5f9c6 fix(ui): use const EMPTY_OBJECT to prevent rerenders 2024-10-25 16:09:59 +11:00
psychedelicious
abc5d53159 fix(ui): use explicit null check when comparing default settings
Using `&&` will result in false negatives for settings where a falsy value might be valid. For example, any setting for which 0 is a valid number. To be on the safe side, just use an explicit null check on all values.
2024-10-25 16:09:59 +11:00
psychedelicious
2f76019a89 tweak(ui): defaults sync tooltip styling 2024-10-25 16:09:59 +11:00
Mary Hipp
3f45beb1ed feat(ui): add out of sync details to model default settings button 2024-10-25 16:09:59 +11:00
Mary Hipp
bc1126a85b (ui): add setting for showing model descriptions in dropdown defaulted to true 2024-10-25 14:52:33 +11:00
psychedelicious
380017041e fix(app): mutating an image also changes the in-memory cached image
We use an in-memory cache for PIL images to reduce I/O. If a node mutates the image in any way, the cached image object is also updated (but the on-disk image file is not).

We've lucked out that this hasn't caused major issues in the past (well, maybe it has but we didn't understand them?) mainly because of a happy accident. When you call `context.images.get_pil` in a node, if you provide an image mode (e.g. `mode="RGB"`), we call `convert`  on the image. This returns a copy. The node can do whatever it wants to that copy and nothing breaks.

However, when mode is not specified, we return the image directly. This is where we get in trouble - nodes that load the image like this, and then mutate the image, update the cache. Other nodes that reference that same image will now get the mutated version of it.

The fix is super simple - we make sure to return only copies from `get_pil`.
2024-10-25 10:22:22 +11:00
psychedelicious
ab7cdbb7e0 fix(ui): do not delete point on right-mouse click 2024-10-25 10:22:22 +11:00
psychedelicious
e5b78d0221 fix(ui): canvas drop area grid layout 2024-10-25 10:22:22 +11:00
psychedelicious
1acaa6c486 chore: bump version to v5.3.0rc2 2024-10-25 07:50:58 +11:00
psychedelicious
b0381076b7 revert(ui): drop targets for inpaint mask and rg 2024-10-25 07:42:46 +11:00
psychedelicious
ffff2d6dbb feat(ui): add New from Image submenu for image ctx menu 2024-10-25 07:42:46 +11:00
psychedelicious
afa9f07649 fix(ui): missing cursor when transforming 2024-10-25 07:42:46 +11:00
psychedelicious
addb5c49ea feat(ui): support dnd images onto inpaint mask/rg entities 2024-10-25 07:42:46 +11:00
psychedelicious
a112d2d55b feat(ui): add logging to useCopyLayerToClipboard 2024-10-25 07:42:46 +11:00
psychedelicious
619a271c8a feat(ui): disable copy to clipboard when layer is empty 2024-10-25 07:42:46 +11:00
psychedelicious
909f2ee36d feat(ui): add help tooltip to automask 2024-10-25 07:42:46 +11:00
psychedelicious
b4cf3d9d03 fix(ui): canvas context menu w/ eraser tool erases 2024-10-25 07:42:46 +11:00
87 changed files with 1557 additions and 596 deletions

View File

@@ -17,46 +17,49 @@ If you just want to use Invoke, you should use the [installer][installer link].
## Setup
1. Run through the [requirements][requirements link].
1. [Fork and clone][forking link] the [InvokeAI repo][repo link].
1. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
1. Create a python virtual environment inside the directory you just created:
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
3. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
4. Create a python virtual environment inside the directory you just created:
```sh
python3 -m venv .venv --prompt InvokeAI-Dev
```
```sh
python3 -m venv .venv --prompt InvokeAI-Dev
```
1. Activate the venv (you'll need to do this every time you want to run the app):
5. Activate the venv (you'll need to do this every time you want to run the app):
```sh
source .venv/bin/activate
```
```sh
source .venv/bin/activate
```
1. Install the repo as an [editable install][editable install link]:
6. Install the repo as an [editable install][editable install link]:
```sh
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
```
```sh
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
```
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
1. Install the frontend dev toolchain:
7. Install the frontend dev toolchain:
- [`nodejs`](https://nodejs.org/) (recommend v20 LTS)
- [`pnpm`](https://pnpm.io/installation#installing-a-specific-version) (must be v8 - not v9!)
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
1. Do a production build of the frontend:
8. Do a production build of the frontend:
```sh
pnpm build
```
```sh
cd PATH_TO_INVOKEAI_REPO/invokeai/frontend/web
pnpm i
pnpm build
```
1. Start the application:
9. Start the application:
```sh
python scripts/invokeai-web.py
```
```sh
cd PATH_TO_INVOKEAI_REPO
python scripts/invokeai-web.py
```
1. Access the UI at `localhost:9090`.
10. Access the UI at `localhost:9090`.
## Updating the UI

View File

@@ -13,6 +13,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
from diffusers.schedulers.scheduling_tcd import TCDScheduler
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
from PIL import Image
from pydantic import field_validator
from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPVisionModelWithProjection
@@ -510,6 +511,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
context: InvocationContext,
t2i_adapters: Optional[Union[T2IAdapterField, list[T2IAdapterField]]],
ext_manager: ExtensionsManager,
bgr_mode: bool = False,
) -> None:
if t2i_adapters is None:
return
@@ -519,6 +521,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
t2i_adapters = [t2i_adapters]
for t2i_adapter_field in t2i_adapters:
image = context.images.get_pil(t2i_adapter_field.image.image_name)
if bgr_mode: # SDXL t2i trained on cv2's BGR outputs, but PIL won't convert straight to BGR
r, g, b = image.split()
image = Image.merge("RGB", (b, g, r))
ext_manager.add_extension(
T2IAdapterExt(
node_context=context,
@@ -623,6 +629,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
max_unet_downscale = 8
elif t2i_adapter_model_config.base == BaseModelType.StableDiffusionXL:
max_unet_downscale = 4
# SDXL adapters are trained on cv2's BGR outputs
r, g, b = image.split()
image = Image.merge("RGB", (b, g, r))
else:
raise ValueError(f"Unexpected T2I-Adapter base model type: '{t2i_adapter_model_config.base}'.")
@@ -900,7 +910,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
# ext = extension_field.to_extension(exit_stack, context, ext_manager)
# ext_manager.add_extension(ext)
self.parse_controlnet_field(exit_stack, context, self.control, ext_manager)
self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager)
bgr_mode = self.unet.unet.base == BaseModelType.StableDiffusionXL
self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager, bgr_mode)
# ext: t2i/ip adapter
ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx)

View File

@@ -165,6 +165,7 @@ class ApplyMaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
mask: TensorField = InputField(description="The mask tensor to apply.")
image: ImageField = InputField(description="The image to apply the mask to.")
invert: bool = InputField(default=False, description="Whether to invert the mask.")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, mode="RGBA")
@@ -179,6 +180,9 @@ class ApplyMaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
mask = mask > 0.5
mask_np = (mask.float() * 255).byte().cpu().numpy().astype(np.uint8)
if self.invert:
mask_np = 255 - mask_np
# Apply the mask only to the alpha channel where the original alpha is non-zero. This preserves the original
# image's transparency - else the transparent regions would end up as opaque black.

View File

@@ -1,3 +1,4 @@
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Optional, Union
@@ -221,7 +222,7 @@ class ImagesInterface(InvocationContextInterface):
)
def get_pil(self, image_name: str, mode: IMAGE_MODES | None = None) -> Image:
"""Gets an image as a PIL Image object.
"""Gets an image as a PIL Image object. This method returns a copy of the image.
Args:
image_name: The name of the image to get.
@@ -233,11 +234,15 @@ class ImagesInterface(InvocationContextInterface):
image = self._services.images.get_pil_image(image_name)
if mode and mode != image.mode:
try:
# convert makes a copy!
image = image.convert(mode)
except ValueError:
self._services.logger.warning(
f"Could not convert image from {image.mode} to {mode}. Using original mode instead."
)
else:
# copy the image to prevent the user from modifying the original
image = image.copy()
return image
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
@@ -290,15 +295,15 @@ class TensorsInterface(InvocationContextInterface):
return name
def load(self, name: str) -> Tensor:
"""Loads a tensor by name.
"""Loads a tensor by name. This method returns a copy of the tensor.
Args:
name: The name of the tensor to load.
Returns:
The loaded tensor.
The tensor.
"""
return self._services.tensors.load(name)
return self._services.tensors.load(name).clone()
class ConditioningInterface(InvocationContextInterface):
@@ -316,16 +321,16 @@ class ConditioningInterface(InvocationContextInterface):
return name
def load(self, name: str) -> ConditioningFieldData:
"""Loads conditioning data by name.
"""Loads conditioning data by name. This method returns a copy of the conditioning data.
Args:
name: The name of the conditioning data to load.
Returns:
The loaded conditioning data.
The conditioning data.
"""
return self._services.conditioning.load(name)
return deepcopy(self._services.conditioning.load(name))
class ModelsInterface(InvocationContextInterface):

View File

@@ -117,8 +117,6 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
load_class = load_classes[config.base][config.variant]
except KeyError as e:
raise Exception(f"No diffusers pipeline known for base={config.base}, variant={config.variant}") from e
prediction_type = config.prediction_type.value
upcast_attention = config.upcast_attention
# Without SilenceWarnings we get log messages like this:
# site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
@@ -129,13 +127,7 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
# ['text_model.embeddings.position_ids']
with SilenceWarnings():
pipeline = load_class.from_single_file(
config.path,
torch_dtype=self._torch_dtype,
prediction_type=prediction_type,
upcast_attention=upcast_attention,
load_safety_checker=False,
)
pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype)
if not submodel_type:
return pipeline

View File

@@ -20,7 +20,7 @@ from typing import Optional
import requests
from huggingface_hub import HfApi, configure_http_backend, hf_hub_url
from huggingface_hub.utils._errors import RepositoryNotFoundError, RevisionNotFoundError
from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundError
from pydantic.networks import AnyHttpUrl
from requests.sessions import Session

View File

@@ -33,7 +33,7 @@ class PreviewExt(ExtensionBase):
def initial_preview(self, ctx: DenoiseContext):
self.callback(
PipelineIntermediateState(
step=-1,
step=0,
order=ctx.scheduler.order,
total_steps=len(ctx.inputs.timesteps),
timestep=int(ctx.scheduler.config.num_train_timesteps), # TODO: is there any code which uses it?

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import diffusers
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import FromOriginalControlNetMixin
from diffusers.loaders.single_file_model import FromOriginalModelMixin
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
from diffusers.models.embeddings import (
@@ -32,7 +32,9 @@ from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger(__name__)
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
# NOTE(ryand): I'm not the origina author of this code, but for future reference, it appears that this class was copied
# from diffusers in order to add support for the encoder_attention_mask argument.
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
"""
A ControlNet model.

View File

@@ -682,7 +682,8 @@
"recallParameters": "Recall Parameters",
"recallParameter": "Recall {{label}}",
"scheduler": "Scheduler",
"seamless": "Seamless",
"seamlessXAxis": "Seamless X Axis",
"seamlessYAxis": "Seamless Y Axis",
"seed": "Seed",
"steps": "Steps",
"strength": "Image to image strength",
@@ -713,8 +714,12 @@
"convertToDiffusersHelpText4": "This is a one time process only. It might take around 30s-60s depending on the specifications of your computer.",
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 2GB-7GB in size.",
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
"noDefaultSettings": "No default settings configured for this model. Visit the Model Manager to add default settings.",
"defaultSettings": "Default Settings",
"defaultSettingsSaved": "Default Settings Saved",
"defaultSettingsOutOfSync": "Some settings do not match the model's defaults:",
"restoreDefaultSettings": "Click to use the model's default settings.",
"usingDefaultSettings": "Using model's default settings",
"delete": "Delete",
"deleteConfig": "Delete Config",
"deleteModel": "Delete Model",
@@ -799,7 +804,6 @@
"uploadImage": "Upload Image",
"urlOrLocalPath": "URL or Local Path",
"urlOrLocalPathHelper": "URLs should point to a single file. Local paths can point to a single file or folder for a single diffusers model.",
"useDefaultSettings": "Use Default Settings",
"vae": "VAE",
"vaePrecision": "VAE Precision",
"variant": "Variant",
@@ -1109,6 +1113,9 @@
"enableInformationalPopovers": "Enable Informational Popovers",
"informationalPopoversDisabled": "Informational Popovers Disabled",
"informationalPopoversDisabledDesc": "Informational popovers have been disabled. Enable them in Settings.",
"enableModelDescriptions": "Enable Model Descriptions in Dropdowns",
"modelDescriptionsDisabled": "Model Descriptions in Dropdowns Disabled",
"modelDescriptionsDisabledDesc": "Model descriptions in dropdowns have been disabled. Enable them in Settings.",
"enableInvisibleWatermark": "Enable Invisible Watermark",
"enableNSFWChecker": "Enable NSFW Checker",
"general": "General",
@@ -1676,6 +1683,8 @@
"controlLayer": "Control Layer",
"inpaintMask": "Inpaint Mask",
"regionalGuidance": "Regional Guidance",
"canvasAsRasterLayer": "$t(controlLayers.canvas) as $t(controlLayers.rasterLayer)",
"canvasAsControlLayer": "$t(controlLayers.canvas) as $t(controlLayers.controlLayer)",
"referenceImage": "Reference Image",
"regionalReferenceImage": "Regional Reference Image",
"globalReferenceImage": "Global Reference Image",
@@ -1751,6 +1760,7 @@
"newGallerySessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be sent to the gallery.",
"newCanvasSession": "New Canvas Session",
"newCanvasSessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be staged on the canvas.",
"replaceCurrent": "Replace Current",
"controlMode": {
"controlMode": "Control Mode",
"balanced": "Balanced",
@@ -1880,16 +1890,24 @@
"apply": "Apply",
"cancel": "Cancel"
},
"segment": {
"autoMask": "Auto Mask",
"selectObject": {
"selectObject": "Select Object",
"pointType": "Point Type",
"invertSelection": "Invert Selection",
"include": "Include",
"exclude": "Exclude",
"neutral": "Neutral",
"apply": "Apply",
"reset": "Reset",
"saveAs": "Save As",
"cancel": "Cancel",
"process": "Process"
"process": "Process",
"help1": "Select a single target object. Add <Bold>Include</Bold> and <Bold>Exclude</Bold> points to indicate which parts of the layer are part of the target object.",
"help2": "Start with one <Bold>Include</Bold> point within the target object. Add more points to refine the selection. Fewer points typically produce better results.",
"help3": "Invert the selection to select everything except the target object.",
"clickToAdd": "Click on the layer to add a point",
"dragToMove": "Drag a point to move it",
"clickToRemove": "Click on a point to remove it"
},
"settings": {
"snapToGrid": {
@@ -1930,6 +1948,8 @@
"newRegionalReferenceImage": "New Regional Reference Image",
"newControlLayer": "New Control Layer",
"newRasterLayer": "New Raster Layer",
"newInpaintMask": "New Inpaint Mask",
"newRegionalGuidance": "New Regional Guidance",
"cropCanvasToBbox": "Crop Canvas to Bbox"
},
"stagingArea": {
@@ -2062,13 +2082,11 @@
},
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"canvasV2Announcement": {
"newCanvas": "A powerful new control canvas",
"newLayerTypes": "New layer types for even more control",
"fluxSupport": "Support for the Flux family of models",
"readReleaseNotes": "Read Release Notes",
"watchReleaseVideo": "Watch Release Video",
"watchUiUpdatesOverview": "Watch UI Updates Overview"
}
"line1": "<ItalicComponent>Select Object</ItalicComponent> tool for precise object selection and editing",
"line2": "Expanded Flux support, now with Global Reference Images",
"line3": "Improved tooltips and context menus",
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",
"watchUiUpdatesOverview": "Watch UI Updates Overview"
}
}

View File

@@ -8,6 +8,7 @@ import {
controlLayerAdded,
entityRasterized,
entitySelected,
inpaintMaskAdded,
rasterLayerAdded,
referenceImageAdded,
referenceImageIPAdapterImageChanged,
@@ -17,6 +18,7 @@ import {
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import type {
CanvasControlLayerState,
CanvasInpaintMaskState,
CanvasRasterLayerState,
CanvasReferenceImageState,
CanvasRegionalGuidanceState,
@@ -110,6 +112,46 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
return;
}
/**
/**
* Image dropped on Inpaint Mask
*/
if (
overData.actionType === 'ADD_INPAINT_MASK_FROM_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
const overrides: Partial<CanvasInpaintMaskState> = {
objects: [imageObject],
position: { x, y },
};
dispatch(inpaintMaskAdded({ overrides, isSelected: true }));
return;
}
/**
/**
* Image dropped on Regional Guidance
*/
if (
overData.actionType === 'ADD_REGIONAL_GUIDANCE_FROM_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
const overrides: Partial<CanvasRegionalGuidanceState> = {
objects: [imageObject],
position: { x, y },
};
dispatch(rgAdded({ overrides, isSelected: true }));
return;
}
/**
* Image dropped on Raster layer
*/

View File

@@ -4,6 +4,7 @@ import { useAppSelector } from 'app/store/storeHooks';
import type { GroupBase } from 'chakra-react-select';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
import { groupBy, reduce } from 'lodash-es';
import { useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -16,7 +17,6 @@ type UseGroupedModelComboboxArg<T extends AnyModelConfig> = {
getIsDisabled?: (model: T) => boolean;
isLoading?: boolean;
groupByType?: boolean;
showDescriptions?: boolean;
};
type UseGroupedModelComboboxReturn = {
@@ -38,15 +38,8 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
): UseGroupedModelComboboxReturn => {
const { t } = useTranslation();
const base = useAppSelector(selectBaseWithSDXLFallback);
const {
modelConfigs,
selectedModel,
getIsDisabled,
onChange,
isLoading,
groupByType = false,
showDescriptions = false,
} = arg;
const shouldShowModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
if (!modelConfigs) {
return [];
@@ -60,7 +53,7 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
options: val.map((model) => ({
label: model.name,
value: model.key,
description: (showDescriptions && model.description) || undefined,
description: (shouldShowModelDescriptions && model.description) || undefined,
isDisabled: getIsDisabled ? getIsDisabled(model) : false,
})),
});
@@ -70,7 +63,7 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
);
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base) ? -1 : 1));
return _options;
}, [modelConfigs, groupByType, getIsDisabled, base, showDescriptions]);
}, [modelConfigs, groupByType, getIsDisabled, base, shouldShowModelDescriptions]);
const value = useMemo(
() =>

View File

@@ -1,5 +1,7 @@
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
import { useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import type { AnyModelConfig } from 'services/api/types';
@@ -24,13 +26,16 @@ type UseModelComboboxReturn = {
export const useModelCombobox = <T extends AnyModelConfig>(arg: UseModelComboboxArg<T>): UseModelComboboxReturn => {
const { t } = useTranslation();
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, optionsFilter = () => true } = arg;
const shouldShowModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
const options = useMemo<ComboboxOption[]>(() => {
return modelConfigs.filter(optionsFilter).map((model) => ({
label: model.name,
value: model.key,
description: (shouldShowModelDescriptions && model.description) || undefined,
isDisabled: getIsDisabled ? getIsDisabled(model) : false,
}));
}, [optionsFilter, getIsDisabled, modelConfigs]);
}, [optionsFilter, getIsDisabled, modelConfigs, shouldShowModelDescriptions]);
const value = useMemo(
() => options.find((m) => (selectedModel ? m.value === selectedModel.key : false)),

View File

@@ -13,7 +13,7 @@ export const CanvasAlertsPreserveMask = memo(() => {
}
return (
<Alert status="warning" borderRadius="base" fontSize="sm" shadow="md" w="fit-content" alignSelf="flex-end">
<Alert status="warning" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
<AlertIcon />
<AlertTitle>{t('controlLayers.settings.preserveMask.alert')}</AlertTitle>
</Alert>

View File

@@ -98,7 +98,7 @@ const CanvasAlertsSelectedEntityStatusContent = memo(({ entityIdentifier, adapte
}
return (
<Alert status={alert.status} borderRadius="base" fontSize="sm" shadow="md" w="fit-content" alignSelf="flex-end">
<Alert status={alert.status} borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
<AlertIcon />
<AlertTitle>{alert.title}</AlertTitle>
</Alert>

View File

@@ -132,7 +132,6 @@ const AlertWrapper = ({
fontSize="sm"
shadow="md"
w="fit-content"
alignSelf="flex-end"
>
<Flex w="full" alignItems="center">
<AlertIcon />

View File

@@ -1,3 +1,4 @@
import { MenuGroup } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { ControlLayerMenuItems } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItems';
import { InpaintMaskMenuItems } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItems';
@@ -8,7 +9,9 @@ import {
EntityIdentifierContext,
useEntityIdentifierContext,
} from 'features/controlLayers/contexts/EntityIdentifierContext';
import { useEntityTypeString } from 'features/controlLayers/hooks/useEntityTypeString';
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import type { PropsWithChildren } from 'react';
import { memo } from 'react';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -46,9 +49,20 @@ export const CanvasContextMenuSelectedEntityMenuItems = memo(() => {
return (
<EntityIdentifierContext.Provider value={selectedEntityIdentifier}>
<CanvasContextMenuSelectedEntityMenuItemsContent />
<CanvasContextMenuSelectedEntityMenuGroup>
<CanvasContextMenuSelectedEntityMenuItemsContent />
</CanvasContextMenuSelectedEntityMenuGroup>
</EntityIdentifierContext.Provider>
);
});
CanvasContextMenuSelectedEntityMenuItems.displayName = 'CanvasContextMenuSelectedEntityMenuItems';
const CanvasContextMenuSelectedEntityMenuGroup = memo((props: PropsWithChildren) => {
const entityIdentifier = useEntityIdentifierContext();
const title = useEntityTypeString(entityIdentifier.type);
return <MenuGroup title={title}>{props.children}</MenuGroup>;
});
CanvasContextMenuSelectedEntityMenuGroup.displayName = 'CanvasContextMenuSelectedEntityMenuGroup';

View File

@@ -62,6 +62,7 @@ export const CanvasDropArea = memo(() => {
data={addControlLayerFromImageDropData}
/>
</GridItem>
<GridItem position="relative">
<IAIDroppable
dropLabel={t('controlLayers.canvasContextMenu.newRegionalReferenceImage')}

View File

@@ -29,7 +29,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
<Menu>
<MenuButton
as={IconButton}
size="sm"
minW={8}
variant="link"
alignSelf="stretch"
tooltip={t('controlLayers.addLayer')}

View File

@@ -1,10 +1,10 @@
import { Flex, Spacer } from '@invoke-ai/ui-library';
import { EntityListGlobalActionBarAddLayerMenu } from 'features/controlLayers/components/CanvasEntityList/EntityListGlobalActionBarAddLayerMenu';
import { EntityListSelectedEntityActionBarAutoMaskButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarAutoMaskButton';
import { EntityListSelectedEntityActionBarDuplicateButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarDuplicateButton';
import { EntityListSelectedEntityActionBarFill } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFill';
import { EntityListSelectedEntityActionBarFilterButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFilterButton';
import { EntityListSelectedEntityActionBarOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarOpacity';
import { EntityListSelectedEntityActionBarSelectObjectButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarSelectObjectButton';
import { EntityListSelectedEntityActionBarTransformButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarTransformButton';
import { memo } from 'react';
@@ -17,7 +17,7 @@ export const EntityListSelectedEntityActionBar = memo(() => {
<Spacer />
<EntityListSelectedEntityActionBarFill />
<Flex h="full">
<EntityListSelectedEntityActionBarAutoMaskButton />
<EntityListSelectedEntityActionBarSelectObjectButton />
<EntityListSelectedEntityActionBarFilterButton />
<EntityListSelectedEntityActionBarTransformButton />
<EntityListSelectedEntityActionBarSaveToAssetsButton />

View File

@@ -23,7 +23,7 @@ export const EntityListSelectedEntityActionBarDuplicateButton = memo(() => {
<IconButton
onClick={onClick}
isDisabled={!selectedEntityIdentifier || isBusy}
size="sm"
minW={8}
variant="link"
alignSelf="stretch"
aria-label={t('controlLayers.duplicate')}

View File

@@ -5,7 +5,7 @@ import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/sel
import { isFilterableEntityIdentifier } from 'features/controlLayers/store/types';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShootingStarBold } from 'react-icons/pi';
import { PiShootingStarFill } from 'react-icons/pi';
export const EntityListSelectedEntityActionBarFilterButton = memo(() => {
const { t } = useTranslation();
@@ -24,12 +24,12 @@ export const EntityListSelectedEntityActionBarFilterButton = memo(() => {
<IconButton
onClick={filter.start}
isDisabled={filter.isDisabled}
size="sm"
minW={8}
variant="link"
alignSelf="stretch"
aria-label={t('controlLayers.filter.filter')}
tooltip={t('controlLayers.filter.filter')}
icon={<PiShootingStarBold />}
icon={<PiShootingStarFill />}
/>
);
});

View File

@@ -31,7 +31,7 @@ export const EntityListSelectedEntityActionBarSaveToAssetsButton = memo(() => {
<IconButton
onClick={onClick}
isDisabled={!selectedEntityIdentifier || isBusy}
size="sm"
minW={8}
variant="link"
alignSelf="stretch"
aria-label={t('controlLayers.saveLayerToAssets')}

View File

@@ -5,9 +5,9 @@ import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/sel
import { isSegmentableEntityIdentifier } from 'features/controlLayers/store/types';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiMaskHappyBold } from 'react-icons/pi';
import { PiShapesFill } from 'react-icons/pi';
export const EntityListSelectedEntityActionBarAutoMaskButton = memo(() => {
export const EntityListSelectedEntityActionBarSelectObjectButton = memo(() => {
const { t } = useTranslation();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const segment = useEntitySegmentAnything(selectedEntityIdentifier);
@@ -24,14 +24,14 @@ export const EntityListSelectedEntityActionBarAutoMaskButton = memo(() => {
<IconButton
onClick={segment.start}
isDisabled={segment.isDisabled}
size="sm"
minW={8}
variant="link"
alignSelf="stretch"
aria-label={t('controlLayers.segment.autoMask')}
tooltip={t('controlLayers.segment.autoMask')}
icon={<PiMaskHappyBold />}
aria-label={t('controlLayers.selectObject.selectObject')}
tooltip={t('controlLayers.selectObject.selectObject')}
icon={<PiShapesFill />}
/>
);
});
EntityListSelectedEntityActionBarAutoMaskButton.displayName = 'EntityListSelectedEntityActionBarAutoMaskButton';
EntityListSelectedEntityActionBarSelectObjectButton.displayName = 'EntityListSelectedEntityActionBarSelectObjectButton';

View File

@@ -24,7 +24,7 @@ export const EntityListSelectedEntityActionBarTransformButton = memo(() => {
<IconButton
onClick={transform.start}
isDisabled={transform.isDisabled}
size="sm"
minW={8}
variant="link"
alignSelf="stretch"
aria-label={t('controlLayers.transform.transform')}

View File

@@ -10,7 +10,7 @@ import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea
import { Filter } from 'features/controlLayers/components/Filters/Filter';
import { CanvasHUD } from 'features/controlLayers/components/HUD/CanvasHUD';
import { InvokeCanvasComponent } from 'features/controlLayers/components/InvokeCanvasComponent';
import { SegmentAnything } from 'features/controlLayers/components/SegmentAnything/SegmentAnything';
import { SelectObject } from 'features/controlLayers/components/SelectObject/SelectObject';
import { StagingAreaIsStagingGate } from 'features/controlLayers/components/StagingArea/StagingAreaIsStagingGate';
import { StagingAreaToolbar } from 'features/controlLayers/components/StagingArea/StagingAreaToolbar';
import { CanvasToolbar } from 'features/controlLayers/components/Toolbar/CanvasToolbar';
@@ -71,12 +71,16 @@ export const CanvasMainPanelContent = memo(() => {
>
<InvokeCanvasComponent />
<CanvasManagerProviderGate>
{showHUD && (
<Flex position="absolute" top={1} insetInlineStart={1} pointerEvents="none">
<CanvasHUD />
</Flex>
)}
<Flex flexDir="column" position="absolute" top={1} insetInlineEnd={1} pointerEvents="none" gap={2}>
<Flex
position="absolute"
flexDir="column"
top={1}
insetInlineStart={1}
pointerEvents="none"
gap={2}
alignItems="flex-start"
>
{showHUD && <CanvasHUD />}
<CanvasAlertsSelectedEntityStatus />
<CanvasAlertsPreserveMask />
<CanvasAlertsSendingToGallery />
@@ -102,7 +106,7 @@ export const CanvasMainPanelContent = memo(() => {
<CanvasManagerProviderGate>
<Filter />
<Transform />
<SegmentAnything />
<SelectObject />
</CanvasManagerProviderGate>
</Flex>
<CanvasDropArea />

View File

@@ -21,7 +21,7 @@ import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/s
import type { CanvasEntityIdentifier, ControlModeV2 } from 'features/controlLayers/store/types';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiBoundingBoxBold, PiShootingStarBold, PiUploadBold } from 'react-icons/pi';
import { PiBoundingBoxBold, PiShootingStarFill, PiUploadBold } from 'react-icons/pi';
import type { ControlNetModelConfig, PostUploadAction, T2IAdapterModelConfig } from 'services/api/types';
const useControlLayerControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) => {
@@ -93,7 +93,7 @@ export const ControlLayerControlAdapter = memo(() => {
variant="link"
aria-label={t('controlLayers.filter.filter')}
tooltip={t('controlLayers.filter.filter')}
icon={<PiShootingStarBold />}
icon={<PiShootingStarFill />}
/>
<IconButton
onClick={pullBboxIntoLayer}

View File

@@ -6,7 +6,7 @@ import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/c
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/common/CanvasEntityMenuItemsFilter';
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
import { CanvasEntityMenuItemsSegment } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSegment';
import { CanvasEntityMenuItemsSelectObject } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSelectObject';
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
import { ControlLayerMenuItemsConvertToSubMenu } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsConvertToSubMenu';
import { ControlLayerMenuItemsCopyToSubMenu } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsCopyToSubMenu';
@@ -24,14 +24,13 @@ export const ControlLayerMenuItems = memo(() => {
<MenuDivider />
<CanvasEntityMenuItemsTransform />
<CanvasEntityMenuItemsFilter />
<CanvasEntityMenuItemsSegment />
<CanvasEntityMenuItemsSelectObject />
<ControlLayerMenuItemsTransparencyEffect />
<MenuDivider />
<ControlLayerMenuItemsCopyToSubMenu />
<ControlLayerMenuItemsConvertToSubMenu />
<CanvasEntityMenuItemsCropToBbox />
<CanvasEntityMenuItemsSave />
<MenuDivider />
<ControlLayerMenuItemsConvertToSubMenu />
<ControlLayerMenuItemsCopyToSubMenu />
</>
);
});

View File

@@ -1,4 +1,15 @@
import { Button, ButtonGroup, Flex, Heading, Spacer } from '@invoke-ai/ui-library';
import {
Button,
ButtonGroup,
Flex,
Heading,
Menu,
MenuButton,
MenuItem,
MenuList,
Spacer,
Spinner,
} from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
@@ -15,7 +26,7 @@ import { IMAGE_FILTERS } from 'features/controlLayers/store/filters';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { memo, useCallback, useMemo, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowsCounterClockwiseBold, PiCheckBold, PiShootingStarBold, PiXBold } from 'react-icons/pi';
import { PiCaretDownBold } from 'react-icons/pi';
const FilterContent = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
@@ -25,7 +36,7 @@ const FilterContent = memo(
const config = useStore(adapter.filterer.$filterConfig);
const isCanvasFocused = useIsRegionFocused('canvas');
const isProcessing = useStore(adapter.filterer.$isProcessing);
const hasProcessed = useStore(adapter.filterer.$hasProcessed);
const hasImageState = useStore(adapter.filterer.$hasImageState);
const autoProcess = useAppSelector(selectAutoProcess);
const onChangeFilterConfig = useCallback(
@@ -46,6 +57,22 @@ const FilterContent = memo(
return IMAGE_FILTERS[config.type].validateConfig?.(config as never) ?? true;
}, [config]);
const saveAsInpaintMask = useCallback(() => {
adapter.filterer.saveAs('inpaint_mask');
}, [adapter.filterer]);
const saveAsRegionalGuidance = useCallback(() => {
adapter.filterer.saveAs('regional_guidance');
}, [adapter.filterer]);
const saveAsRasterLayer = useCallback(() => {
adapter.filterer.saveAs('raster_layer');
}, [adapter.filterer]);
const saveAsControlLayer = useCallback(() => {
adapter.filterer.saveAs('control_layer');
}, [adapter.filterer]);
useRegisteredHotkeys({
id: 'applyFilter',
category: 'canvas',
@@ -89,40 +116,56 @@ const FilterContent = memo(
<ButtonGroup isAttached={false} size="sm" w="full">
<Button
variant="ghost"
leftIcon={<PiShootingStarBold />}
onClick={adapter.filterer.processImmediate}
isLoading={isProcessing}
loadingText={t('controlLayers.filter.process')}
isDisabled={!isValid || autoProcess}
isDisabled={isProcessing || !isValid || (autoProcess && hasImageState)}
>
{t('controlLayers.filter.process')}
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}
</Button>
<Spacer />
<Button
leftIcon={<PiArrowsCounterClockwiseBold />}
onClick={adapter.filterer.reset}
isLoading={isProcessing}
isDisabled={isProcessing}
loadingText={t('controlLayers.filter.reset')}
variant="ghost"
>
{t('controlLayers.filter.reset')}
</Button>
<Button
variant="ghost"
leftIcon={<PiCheckBold />}
onClick={adapter.filterer.apply}
isLoading={isProcessing}
loadingText={t('controlLayers.filter.apply')}
isDisabled={!isValid || !hasProcessed}
variant="ghost"
isDisabled={isProcessing || !isValid || !hasImageState}
>
{t('controlLayers.filter.apply')}
</Button>
<Button
variant="ghost"
leftIcon={<PiXBold />}
onClick={adapter.filterer.cancel}
loadingText={t('controlLayers.filter.cancel')}
>
<Menu>
<MenuButton
as={Button}
loadingText={t('controlLayers.selectObject.saveAs')}
variant="ghost"
isDisabled={isProcessing || !isValid || !hasImageState}
rightIcon={<PiCaretDownBold />}
>
{t('controlLayers.selectObject.saveAs')}
</MenuButton>
<MenuList>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsInpaintMask}>
{t('controlLayers.newInpaintMask')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsRegionalGuidance}>
{t('controlLayers.newRegionalGuidance')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsControlLayer}>
{t('controlLayers.newControlLayer')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !isValid || !hasImageState} onClick={saveAsRasterLayer}>
{t('controlLayers.newRasterLayer')}
</MenuItem>
</MenuList>
</Menu>
<Button variant="ghost" onClick={adapter.filterer.cancel} loadingText={t('controlLayers.filter.cancel')}>
{t('controlLayers.filter.cancel')}
</Button>
</ButtonGroup>

View File

@@ -14,7 +14,7 @@ type Props = {
};
export const InpaintMask = memo(({ id }: Props) => {
const entityIdentifier = useMemo<CanvasEntityIdentifier>(() => ({ id, type: 'inpaint_mask' }), [id]);
const entityIdentifier = useMemo<CanvasEntityIdentifier<'inpaint_mask'>>(() => ({ id, type: 'inpaint_mask' }), [id]);
return (
<EntityIdentifierContext.Provider value={entityIdentifier}>

View File

@@ -20,10 +20,9 @@ export const InpaintMaskMenuItems = memo(() => {
<MenuDivider />
<CanvasEntityMenuItemsTransform />
<MenuDivider />
<CanvasEntityMenuItemsCropToBbox />
<MenuDivider />
<InpaintMaskMenuItemsConvertToSubMenu />
<InpaintMaskMenuItemsCopyToSubMenu />
<InpaintMaskMenuItemsConvertToSubMenu />
<CanvasEntityMenuItemsCropToBbox />
</>
);
});

View File

@@ -6,7 +6,7 @@ import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/c
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/common/CanvasEntityMenuItemsFilter';
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
import { CanvasEntityMenuItemsSegment } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSegment';
import { CanvasEntityMenuItemsSelectObject } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSelectObject';
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
import { RasterLayerMenuItemsConvertToSubMenu } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsConvertToSubMenu';
import { RasterLayerMenuItemsCopyToSubMenu } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsCopyToSubMenu';
@@ -23,13 +23,12 @@ export const RasterLayerMenuItems = memo(() => {
<MenuDivider />
<CanvasEntityMenuItemsTransform />
<CanvasEntityMenuItemsFilter />
<CanvasEntityMenuItemsSegment />
<CanvasEntityMenuItemsSelectObject />
<MenuDivider />
<RasterLayerMenuItemsCopyToSubMenu />
<RasterLayerMenuItemsConvertToSubMenu />
<CanvasEntityMenuItemsCropToBbox />
<CanvasEntityMenuItemsSave />
<MenuDivider />
<RasterLayerMenuItemsConvertToSubMenu />
<RasterLayerMenuItemsCopyToSubMenu />
</>
);
});

View File

@@ -16,7 +16,10 @@ type Props = {
};
export const RegionalGuidance = memo(({ id }: Props) => {
const entityIdentifier = useMemo<CanvasEntityIdentifier>(() => ({ id, type: 'regional_guidance' }), [id]);
const entityIdentifier = useMemo<CanvasEntityIdentifier<'regional_guidance'>>(
() => ({ id, type: 'regional_guidance' }),
[id]
);
return (
<EntityIdentifierContext.Provider value={entityIdentifier}>

View File

@@ -25,10 +25,9 @@ export const RegionalGuidanceMenuItems = memo(() => {
<CanvasEntityMenuItemsTransform />
<RegionalGuidanceMenuItemsAutoNegative />
<MenuDivider />
<CanvasEntityMenuItemsCropToBbox />
<MenuDivider />
<RegionalGuidanceMenuItemsConvertToSubMenu />
<RegionalGuidanceMenuItemsCopyToSubMenu />
<RegionalGuidanceMenuItemsConvertToSubMenu />
<CanvasEntityMenuItemsCropToBbox />
</>
);
});

View File

@@ -3,28 +3,36 @@ import {
ButtonGroup,
Flex,
Heading,
Icon,
ListItem,
Menu,
MenuButton,
MenuItem,
MenuList,
Spacer,
Spinner,
Text,
Tooltip,
UnorderedList,
} from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
import { CanvasAutoProcessSwitch } from 'features/controlLayers/components/CanvasAutoProcessSwitch';
import { CanvasOperationIsolatedLayerPreviewSwitch } from 'features/controlLayers/components/CanvasOperationIsolatedLayerPreviewSwitch';
import { SegmentAnythingPointType } from 'features/controlLayers/components/SegmentAnything/SegmentAnythingPointType';
import { SelectObjectInvert } from 'features/controlLayers/components/SelectObject/SelectObjectInvert';
import { SelectObjectPointType } from 'features/controlLayers/components/SelectObject/SelectObjectPointType';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import type { PropsWithChildren } from 'react';
import { memo, useCallback, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowsCounterClockwiseBold, PiFloppyDiskBold, PiStarBold, PiXBold } from 'react-icons/pi';
import { Trans, useTranslation } from 'react-i18next';
import { PiCaretDownBold, PiInfoBold } from 'react-icons/pi';
const SegmentAnythingContent = memo(
const SelectObjectContent = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
const { t } = useTranslation();
const ref = useRef<HTMLDivElement>(null);
@@ -81,72 +89,86 @@ const SegmentAnythingContent = memo(
transitionProperty="height"
transitionDuration="normal"
>
<Flex w="full" gap={4}>
<Heading size="md" color="base.300" userSelect="none">
{t('controlLayers.segment.autoMask')}
</Heading>
<Flex w="full" gap={4} alignItems="center">
<Flex gap={2}>
<Heading size="md" color="base.300" userSelect="none">
{t('controlLayers.selectObject.selectObject')}
</Heading>
<Tooltip label={<SelectObjectHelpTooltipContent />}>
<Flex alignItems="center">
<Icon as={PiInfoBold} color="base.500" />
</Flex>
</Tooltip>
</Flex>
<Spacer />
<CanvasAutoProcessSwitch />
<CanvasOperationIsolatedLayerPreviewSwitch />
</Flex>
<SegmentAnythingPointType adapter={adapter} />
<Flex w="full" justifyContent="space-between" py={2}>
<SelectObjectPointType adapter={adapter} />
<SelectObjectInvert adapter={adapter} />
</Flex>
<ButtonGroup isAttached={false} size="sm" w="full">
<Button
leftIcon={<PiStarBold />}
onClick={adapter.segmentAnything.processImmediate}
isLoading={isProcessing}
loadingText={t('controlLayers.segment.process')}
loadingText={t('controlLayers.selectObject.process')}
variant="ghost"
isDisabled={!hasPoints || autoProcess}
isDisabled={isProcessing || !hasPoints || (autoProcess && hasImageState)}
>
{t('controlLayers.segment.process')}
{t('controlLayers.selectObject.process')}
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}
</Button>
<Spacer />
<Button
leftIcon={<PiArrowsCounterClockwiseBold />}
onClick={adapter.segmentAnything.reset}
isLoading={isProcessing}
loadingText={t('controlLayers.segment.reset')}
isDisabled={isProcessing || !hasPoints}
loadingText={t('controlLayers.selectObject.reset')}
variant="ghost"
>
{t('controlLayers.segment.reset')}
{t('controlLayers.selectObject.reset')}
</Button>
<Button
onClick={adapter.segmentAnything.apply}
loadingText={t('controlLayers.selectObject.apply')}
variant="ghost"
isDisabled={isProcessing || !hasImageState}
>
{t('controlLayers.selectObject.apply')}
</Button>
<Menu>
<MenuButton
as={Button}
leftIcon={<PiFloppyDiskBold />}
isLoading={isProcessing}
loadingText={t('controlLayers.segment.saveAs')}
loadingText={t('controlLayers.selectObject.saveAs')}
variant="ghost"
isDisabled={!hasImageState}
isDisabled={isProcessing || !hasImageState}
rightIcon={<PiCaretDownBold />}
>
{t('controlLayers.segment.saveAs')}
{t('controlLayers.selectObject.saveAs')}
</MenuButton>
<MenuList>
<MenuItem isDisabled={!hasImageState} onClick={saveAsInpaintMask}>
{t('controlLayers.inpaintMask')}
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsInpaintMask}>
{t('controlLayers.newInpaintMask')}
</MenuItem>
<MenuItem isDisabled={!hasImageState} onClick={saveAsRegionalGuidance}>
{t('controlLayers.regionalGuidance')}
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsRegionalGuidance}>
{t('controlLayers.newRegionalGuidance')}
</MenuItem>
<MenuItem isDisabled={!hasImageState} onClick={saveAsControlLayer}>
{t('controlLayers.controlLayer')}
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsControlLayer}>
{t('controlLayers.newControlLayer')}
</MenuItem>
<MenuItem isDisabled={!hasImageState} onClick={saveAsRasterLayer}>
{t('controlLayers.rasterLayer')}
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsRasterLayer}>
{t('controlLayers.newRasterLayer')}
</MenuItem>
</MenuList>
</Menu>
<Button
leftIcon={<PiXBold />}
onClick={adapter.segmentAnything.cancel}
isLoading={isProcessing}
isDisabled={isProcessing}
loadingText={t('common.cancel')}
variant="ghost"
>
{t('controlLayers.segment.cancel')}
{t('controlLayers.selectObject.cancel')}
</Button>
</ButtonGroup>
</Flex>
@@ -154,9 +176,9 @@ const SegmentAnythingContent = memo(
}
);
SegmentAnythingContent.displayName = 'SegmentAnythingContent';
SelectObjectContent.displayName = 'SegmentAnythingContent';
export const SegmentAnything = () => {
export const SelectObject = memo(() => {
const canvasManager = useCanvasManager();
const adapter = useStore(canvasManager.stateApi.$segmentingAdapter);
@@ -164,5 +186,38 @@ export const SegmentAnything = () => {
return null;
}
return <SegmentAnythingContent adapter={adapter} />;
};
return <SelectObjectContent adapter={adapter} />;
});
SelectObject.displayName = 'SelectObject';
const Bold = (props: PropsWithChildren) => (
<Text as="span" fontWeight="semibold">
{props.children}
</Text>
);
const SelectObjectHelpTooltipContent = memo(() => {
const { t } = useTranslation();
return (
<Flex gap={3} flexDir="column">
<Text>
<Trans i18nKey="controlLayers.selectObject.help1" components={{ Bold: <Bold /> }} />
</Text>
<Text>
<Trans i18nKey="controlLayers.selectObject.help2" components={{ Bold: <Bold /> }} />
</Text>
<Text>
<Trans i18nKey="controlLayers.selectObject.help3" />
</Text>
<UnorderedList>
<ListItem>{t('controlLayers.selectObject.clickToAdd')}</ListItem>
<ListItem>{t('controlLayers.selectObject.dragToMove')}</ListItem>
<ListItem>{t('controlLayers.selectObject.clickToRemove')}</ListItem>
</UnorderedList>
</Flex>
);
});
SelectObjectHelpTooltipContent.displayName = 'SelectObjectHelpTooltipContent';

View File

@@ -0,0 +1,26 @@
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const SelectObjectInvert = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
const { t } = useTranslation();
const invert = useStore(adapter.segmentAnything.$invert);
const onChange = useCallback(() => {
adapter.segmentAnything.$invert.set(!adapter.segmentAnything.$invert.get());
}, [adapter.segmentAnything.$invert]);
return (
<FormControl w="min-content">
<FormLabel m={0}>{t('controlLayers.selectObject.invertSelection')}</FormLabel>
<Switch size="sm" isChecked={invert} onChange={onChange} />
</FormControl>
);
}
);
SelectObjectInvert.displayName = 'SelectObjectInvert';

View File

@@ -6,7 +6,7 @@ import { SAM_POINT_LABEL_STRING_TO_NUMBER, zSAMPointLabelString } from 'features
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const SegmentAnythingPointType = memo(
export const SelectObjectPointType = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
const { t } = useTranslation();
const pointType = useStore(adapter.segmentAnything.$pointTypeString);
@@ -21,15 +21,15 @@ export const SegmentAnythingPointType = memo(
);
return (
<FormControl w="full">
<FormLabel>{t('controlLayers.segment.pointType')}</FormLabel>
<FormControl w="min-content">
<FormLabel m={0}>{t('controlLayers.selectObject.pointType')}</FormLabel>
<RadioGroup value={pointType} onChange={onChange} w="full" size="md">
<Flex alignItems="center" w="full" gap={4} fontWeight="semibold" color="base.300">
<Radio value="foreground">
<Text>{t('controlLayers.segment.include')}</Text>
<Text>{t('controlLayers.selectObject.include')}</Text>
</Radio>
<Radio value="background">
<Text>{t('controlLayers.segment.exclude')}</Text>
<Text>{t('controlLayers.selectObject.exclude')}</Text>
</Radio>
</Flex>
</RadioGroup>
@@ -38,4 +38,4 @@ export const SegmentAnythingPointType = memo(
}
);
SegmentAnythingPointType.displayName = 'SegmentAnythingPointType';
SelectObjectPointType.displayName = 'SelectObject';

View File

@@ -1,4 +1,4 @@
import { Button, ButtonGroup, Flex, Heading, Spacer } from '@invoke-ai/ui-library';
import { Button, ButtonGroup, Flex, Heading, Spacer, Spinner } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
import { CanvasOperationIsolatedLayerPreviewSwitch } from 'features/controlLayers/components/CanvasOperationIsolatedLayerPreviewSwitch';
@@ -8,7 +8,6 @@ import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEnt
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { memo, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowsCounterClockwiseBold, PiCheckBold, PiXBold } from 'react-icons/pi';
const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) => {
const { t } = useTranslation();
@@ -62,30 +61,28 @@ const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) =>
<TransformFitToBboxButtons adapter={adapter} />
<ButtonGroup isAttached={false} size="sm" w="full">
<ButtonGroup isAttached={false} size="sm" w="full" alignItems="center">
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}
<Spacer />
<Button
leftIcon={<PiArrowsCounterClockwiseBold />}
onClick={adapter.transformer.resetTransform}
isLoading={isProcessing}
isDisabled={isProcessing}
loadingText={t('controlLayers.transform.reset')}
variant="ghost"
>
{t('controlLayers.transform.reset')}
</Button>
<Button
leftIcon={<PiCheckBold />}
onClick={adapter.transformer.applyTransform}
isLoading={isProcessing}
isDisabled={isProcessing}
loadingText={t('controlLayers.transform.apply')}
variant="ghost"
>
{t('controlLayers.transform.apply')}
</Button>
<Button
leftIcon={<PiXBold />}
onClick={adapter.transformer.stopTransform}
isLoading={isProcessing}
isDisabled={isProcessing}
loadingText={t('common.cancel')}
variant="ghost"
>

View File

@@ -4,7 +4,6 @@ import { useStore } from '@nanostores/react';
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
import { memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowsOutBold } from 'react-icons/pi';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { z } from 'zod';
@@ -60,10 +59,9 @@ export const TransformFitToBboxButtons = memo(({ adapter }: { adapter: CanvasEnt
<Combobox options={options} value={value} onChange={onChange} isSearchable={false} isClearable={false} />
</FormControl>
<Button
leftIcon={<PiArrowsOutBold />}
size="sm"
onClick={onClick}
isLoading={isProcessing}
isDisabled={isProcessing}
loadingText={t('controlLayers.transform.fitToBbox')}
variant="ghost"
>

View File

@@ -2,6 +2,7 @@ import { MenuItem } from '@invoke-ai/ui-library';
import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdapterContext';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { useCopyLayerToClipboard } from 'features/controlLayers/hooks/useCopyLayerToClipboard';
import { useEntityIsEmpty } from 'features/controlLayers/hooks/useEntityIsEmpty';
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@@ -12,6 +13,7 @@ export const CanvasEntityMenuItemsCopyToClipboard = memo(() => {
const entityIdentifier = useEntityIdentifierContext();
const adapter = useEntityAdapterSafe(entityIdentifier);
const isInteractable = useIsEntityInteractable(entityIdentifier);
const isEmpty = useEntityIsEmpty(entityIdentifier);
const copyLayerToClipboard = useCopyLayerToClipboard();
const onClick = useCallback(() => {
@@ -19,7 +21,7 @@ export const CanvasEntityMenuItemsCopyToClipboard = memo(() => {
}, [copyLayerToClipboard, adapter]);
return (
<MenuItem onClick={onClick} icon={<PiCopyBold />} isDisabled={!isInteractable}>
<MenuItem onClick={onClick} icon={<PiCopyBold />} isDisabled={!isInteractable || isEmpty}>
{t('common.clipboard')}
</MenuItem>
);

View File

@@ -3,7 +3,7 @@ import { useEntityIdentifierContext } from 'features/controlLayers/contexts/Enti
import { useEntityFilter } from 'features/controlLayers/hooks/useEntityFilter';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShootingStarBold } from 'react-icons/pi';
import { PiShootingStarFill } from 'react-icons/pi';
export const CanvasEntityMenuItemsFilter = memo(() => {
const { t } = useTranslation();
@@ -11,7 +11,7 @@ export const CanvasEntityMenuItemsFilter = memo(() => {
const filter = useEntityFilter(entityIdentifier);
return (
<MenuItem onClick={filter.start} icon={<PiShootingStarBold />} isDisabled={filter.isDisabled}>
<MenuItem onClick={filter.start} icon={<PiShootingStarFill />} isDisabled={filter.isDisabled}>
{t('controlLayers.filter.filter')}
</MenuItem>
);

View File

@@ -3,18 +3,18 @@ import { useEntityIdentifierContext } from 'features/controlLayers/contexts/Enti
import { useEntitySegmentAnything } from 'features/controlLayers/hooks/useEntitySegmentAnything';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiMaskHappyBold } from 'react-icons/pi';
import { PiShapesFill } from 'react-icons/pi';
export const CanvasEntityMenuItemsSegment = memo(() => {
export const CanvasEntityMenuItemsSelectObject = memo(() => {
const { t } = useTranslation();
const entityIdentifier = useEntityIdentifierContext();
const segmentAnything = useEntitySegmentAnything(entityIdentifier);
return (
<MenuItem onClick={segmentAnything.start} icon={<PiMaskHappyBold />} isDisabled={segmentAnything.isDisabled}>
{t('controlLayers.segment.autoMask')}
<MenuItem onClick={segmentAnything.start} icon={<PiShapesFill />} isDisabled={segmentAnything.isDisabled}>
{t('controlLayers.selectObject.selectObject')}
</MenuItem>
);
});
CanvasEntityMenuItemsSegment.displayName = 'CanvasEntityMenuItemsSegment';
CanvasEntityMenuItemsSelectObject.displayName = 'CanvasEntityMenuItemsSelectObject';

View File

@@ -24,7 +24,9 @@ import {
selectEntityOrThrow,
} from 'features/controlLayers/store/selectors';
import type {
CanvasControlLayerState,
CanvasEntityIdentifier,
CanvasInpaintMaskState,
CanvasRasterLayerState,
CanvasRegionalGuidanceState,
ControlNetConfig,
@@ -44,6 +46,8 @@ import { useCallback } from 'react';
import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
import type { ControlNetModelConfig, ImageDTO, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
import { isControlNetOrT2IAdapterModelConfig, isIPAdapterModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
export const selectDefaultControlAdapter = createSelector(
selectModelConfigsQuery,
@@ -124,6 +128,60 @@ export const useNewRasterLayerFromImage = () => {
return func;
};
export const useNewControlLayerFromImage = () => {
const dispatch = useAppDispatch();
const bboxRect = useAppSelector(selectBboxRect);
const func = useCallback(
(imageDTO: ImageDTO) => {
const imageObject = imageDTOToImageObject(imageDTO);
const overrides: Partial<CanvasControlLayerState> = {
position: { x: bboxRect.x, y: bboxRect.y },
objects: [imageObject],
};
dispatch(controlLayerAdded({ overrides, isSelected: true }));
},
[bboxRect.x, bboxRect.y, dispatch]
);
return func;
};
export const useNewInpaintMaskFromImage = () => {
const dispatch = useAppDispatch();
const bboxRect = useAppSelector(selectBboxRect);
const func = useCallback(
(imageDTO: ImageDTO) => {
const imageObject = imageDTOToImageObject(imageDTO);
const overrides: Partial<CanvasInpaintMaskState> = {
position: { x: bboxRect.x, y: bboxRect.y },
objects: [imageObject],
};
dispatch(inpaintMaskAdded({ overrides, isSelected: true }));
},
[bboxRect.x, bboxRect.y, dispatch]
);
return func;
};
export const useNewRegionalGuidanceFromImage = () => {
const dispatch = useAppDispatch();
const bboxRect = useAppSelector(selectBboxRect);
const func = useCallback(
(imageDTO: ImageDTO) => {
const imageObject = imageDTOToImageObject(imageDTO);
const overrides: Partial<CanvasRegionalGuidanceState> = {
position: { x: bboxRect.x, y: bboxRect.y },
objects: [imageObject],
};
dispatch(rgAdded({ overrides, isSelected: true }));
},
[bboxRect.x, bboxRect.y, dispatch]
);
return func;
};
/**
* Returns a function that adds a new canvas with the given image as the initial image, replicating the img2img flow:
* - Reset the canvas
@@ -138,18 +196,31 @@ export const useNewCanvasFromImage = () => {
const bboxRect = useAppSelector(selectBboxRect);
const base = useAppSelector(selectBboxModelBase);
const func = useCallback(
(imageDTO: ImageDTO) => {
(imageDTO: ImageDTO, type: CanvasRasterLayerState['type'] | CanvasControlLayerState['type']) => {
// Calculate the new bbox dimensions to fit the image's aspect ratio at the optimal size
const ratio = imageDTO.width / imageDTO.height;
const optimalDimension = getOptimalDimension(base);
const { width, height } = calculateNewSize(ratio, optimalDimension ** 2, base);
// The overrides need to include the layer's ID so we can transform the layer it is initialized
const overrides = {
id: getPrefixedId('raster_layer'),
position: { x: bboxRect.x, y: bboxRect.y },
objects: [imageDTOToImageObject(imageDTO)],
} satisfies Partial<CanvasRasterLayerState>;
let overrides: Partial<CanvasRasterLayerState> | Partial<CanvasControlLayerState>;
if (type === 'raster_layer') {
overrides = {
id: getPrefixedId('raster_layer'),
position: { x: bboxRect.x, y: bboxRect.y },
objects: [imageDTOToImageObject(imageDTO)],
} satisfies Partial<CanvasRasterLayerState>;
} else if (type === 'control_layer') {
overrides = {
id: getPrefixedId('control_layer'),
position: { x: bboxRect.x, y: bboxRect.y },
objects: [imageDTOToImageObject(imageDTO)],
} satisfies Partial<CanvasControlLayerState>;
} else {
// Catch unhandled types
assert<Equals<typeof type, never>>(false);
}
CanvasEntityAdapterBase.registerInitCallback(async (adapter) => {
// Skip the callback if the adapter is not the one we are creating
@@ -166,7 +237,16 @@ export const useNewCanvasFromImage = () => {
dispatch(canvasReset());
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
dispatch(rasterLayerAdded({ overrides, isSelected: true }));
// The type casts are safe because the type is checked above
if (type === 'raster_layer') {
dispatch(rasterLayerAdded({ overrides: overrides as Partial<CanvasRasterLayerState>, isSelected: true }));
} else if (type === 'control_layer') {
dispatch(controlLayerAdded({ overrides: overrides as Partial<CanvasControlLayerState>, isSelected: true }));
} else {
// Catch unhandled types
assert<Equals<typeof type, never>>(false);
}
},
[base, bboxRect.x, bboxRect.y, dispatch]
);

View File

@@ -1,3 +1,4 @@
import { logger } from 'app/logging/logger';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
@@ -7,6 +8,9 @@ import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
import { toast } from 'features/toast/toast';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { serializeError } from 'serialize-error';
const log = logger('canvas');
export const useCopyLayerToClipboard = () => {
const { t } = useTranslation();
@@ -26,11 +30,13 @@ export const useCopyLayerToClipboard = () => {
const canvas = adapter.getCanvas();
const blob = await canvasToBlob(canvas);
copyBlobToClipboard(blob);
log.trace('Layer copied to clipboard');
toast({
status: 'info',
title: t('toast.layerCopiedToClipboard'),
});
} catch (error) {
log.error({ error: serializeError(error) }, 'Problem copying layer to clipboard');
toast({
status: 'error',
title: t('toast.problemCopyingLayer'),

View File

@@ -0,0 +1,11 @@
import { useAppSelector } from 'app/store/storeHooks';
import { buildSelectHasObjects } from 'features/controlLayers/store/selectors';
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
import { useMemo } from 'react';
export const useEntityIsEmpty = (entityIdentifier: CanvasEntityIdentifier) => {
const selectHasObjects = useMemo(() => buildSelectHasObjects(entityIdentifier), [entityIdentifier]);
const hasObjects = useAppSelector(selectHasObjects);
return !hasObjects;
};

View File

@@ -52,8 +52,9 @@ export const useEntityTransform = (entityIdentifier: CanvasEntityIdentifier | nu
if (!adapter) {
return;
}
imageViewer.close();
await adapter.transformer.startTransform();
}, [isDisabled, entityIdentifier, canvasManager]);
}, [isDisabled, entityIdentifier, canvasManager, imageViewer]);
const fitToBbox = useCallback(async () => {
if (isDisabled) {

View File

@@ -1,27 +1,36 @@
import { deepClone } from 'common/util/deepClone';
import { withResult, withResultAsync } from 'common/util/result';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { CanvasObjectImage } from 'features/controlLayers/konva/CanvasObject/CanvasObjectImage';
import { addCoords, getKonvaNodeDebugAttrs, getPrefixedId } from 'features/controlLayers/konva/util';
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
import type { FilterConfig } from 'features/controlLayers/store/filters';
import { getFilterForModel, IMAGE_FILTERS } from 'features/controlLayers/store/filters';
import type { CanvasImageState } from 'features/controlLayers/store/types';
import type { CanvasEntityType, CanvasImageState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import Konva from 'konva';
import { debounce } from 'lodash-es';
import { atom } from 'nanostores';
import { atom, computed } from 'nanostores';
import type { Logger } from 'roarr';
import { serializeError } from 'serialize-error';
import { buildSelectModelConfig } from 'services/api/hooks/modelsByType';
import { isControlNetOrT2IAdapterModelConfig } from 'services/api/types';
import stableHash from 'stable-hash';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
type CanvasEntityFiltererConfig = {
processDebounceMs: number;
/**
* The debounce time in milliseconds for processing the filter.
*/
PROCESS_DEBOUNCE_MS: number;
};
const DEFAULT_CONFIG: CanvasEntityFiltererConfig = {
processDebounceMs: 1000,
PROCESS_DEBOUNCE_MS: 1000,
};
export class CanvasEntityFilterer extends CanvasModuleBase {
@@ -32,20 +41,65 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
readonly manager: CanvasManager;
readonly log: Logger;
imageState: CanvasImageState | null = null;
subscriptions = new Set<() => void>();
config: CanvasEntityFiltererConfig = DEFAULT_CONFIG;
subscriptions = new Set<() => void>();
/**
* The AbortController used to cancel the filter processing.
*/
abortController: AbortController | null = null;
/**
* Whether the module is currently filtering an image.
*/
$isFiltering = atom<boolean>(false);
$hasProcessed = atom<boolean>(false);
/**
* The hash of the last processed config. This is used to prevent re-processing the same config.
*/
$lastProcessedHash = atom<string>('');
/**
* Whether the module is currently processing the filter.
*/
$isProcessing = atom<boolean>(false);
/**
* The config for the filter.
*/
$filterConfig = atom<FilterConfig>(IMAGE_FILTERS.canny_edge_detection.buildDefaults());
/**
* The initial filter config, used to reset the filter config.
*/
$initialFilterConfig = atom<FilterConfig | null>(null);
/**
* The ephemeral image state of the filtered image.
*/
$imageState = atom<CanvasImageState | null>(null);
/**
* Whether the module has an image state. This is a computed value based on $imageState.
*/
$hasImageState = computed(this.$imageState, (imageState) => imageState !== null);
/**
* The filtered image object module, if it exists.
*/
imageModule: CanvasObjectImage | null = null;
/**
* The Konva nodes for the module.
*/
konva: {
/**
* The main Konva group node for the module. This is added to the parent layer on start, and removed on teardown.
*/
group: Konva.Group;
};
KONVA_GROUP_NAME = `${this.type}:group`;
constructor(parent: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer) {
super();
this.id = getPrefixedId(this.type);
@@ -55,9 +109,17 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
this.log = this.manager.buildLogger(this);
this.log.debug('Creating filter module');
this.konva = {
group: new Konva.Group({ name: this.KONVA_GROUP_NAME }),
};
}
/**
* Adds event listeners needed while filtering the entity.
*/
subscribe = () => {
// As the filter config changes, process the filter
this.subscriptions.add(
this.$filterConfig.listen(() => {
if (this.manager.stateApi.getSettings().autoProcess && this.$isFiltering.get()) {
@@ -65,6 +127,7 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
}
})
);
// When auto-process is enabled, process the filter
this.subscriptions.add(
this.manager.stateApi.createStoreSubscription(selectAutoProcess, (autoProcess) => {
if (autoProcess && this.$isFiltering.get()) {
@@ -74,11 +137,18 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
);
};
/**
* Removes event listeners used while filtering the entity.
*/
unsubscribe = () => {
this.subscriptions.forEach((unsubscribe) => unsubscribe());
this.subscriptions.clear();
};
/**
* Starts the filter module.
* @param config The filter config to start with. If omitted, the default filter config is used.
*/
start = (config?: FilterConfig) => {
const filteringAdapter = this.manager.stateApi.$filteringAdapter.get();
if (filteringAdapter) {
@@ -88,30 +158,57 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
this.log.trace('Initializing filter');
this.subscribe();
// Reset any previous state
this.resetEphemeralState();
this.$isFiltering.set(true);
// Update the konva group's position to match the parent entity
const pixelRect = this.parent.transformer.$pixelRect.get();
const position = addCoords(this.parent.state.position, pixelRect);
this.konva.group.setAttrs(position);
// Add the group to the parent layer
this.parent.konva.layer.add(this.konva.group);
if (config) {
// If a config is provided, use it
this.$filterConfig.set(config);
} else if (this.parent.type === 'control_layer_adapter' && this.parent.state.controlAdapter.model) {
this.$initialFilterConfig.set(config);
} else {
this.$filterConfig.set(this.createInitialFilterConfig());
}
this.$initialFilterConfig.set(this.$filterConfig.get());
this.subscribe();
this.manager.stateApi.$filteringAdapter.set(this.parent);
if (this.manager.stateApi.getSettings().autoProcess) {
this.processImmediate();
}
};
createInitialFilterConfig = (): FilterConfig => {
if (this.parent.type === 'control_layer_adapter' && this.parent.state.controlAdapter.model) {
// If the parent is a control layer adapter, we should check if the model has a default filter and set it if so
const selectModelConfig = buildSelectModelConfig(
this.parent.state.controlAdapter.model.key,
isControlNetOrT2IAdapterModelConfig
);
const modelConfig = this.manager.stateApi.runSelector(selectModelConfig);
// This always returns a filter
const filter = getFilterForModel(modelConfig);
this.$filterConfig.set(filter.buildDefaults());
return filter.buildDefaults();
} else {
// Otherwise, set the default filter
this.$filterConfig.set(IMAGE_FILTERS.canny_edge_detection.buildDefaults());
}
this.$isFiltering.set(true);
this.manager.stateApi.$filteringAdapter.set(this.parent);
if (this.manager.stateApi.getSettings().autoProcess) {
this.processImmediate();
// Otherwise, used the default filter
return IMAGE_FILTERS.canny_edge_detection.buildDefaults();
}
};
/**
* Processes the filter, updating the module's state and rendering the filtered image.
*/
processImmediate = async () => {
const config = this.$filterConfig.get();
const filterData = IMAGE_FILTERS[config.type];
@@ -123,6 +220,12 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
return;
}
const hash = stableHash({ config });
if (hash === this.$lastProcessedHash.get()) {
this.log.trace('Already processed config');
return;
}
this.log.trace({ config }, 'Processing filter');
const rect = this.parent.transformer.getRelativeRect();
@@ -156,91 +259,181 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
this.manager.stateApi.runGraphAndReturnImageOutput({
graph,
outputNodeId,
// The filter graph should always be prepended to the queue so it's processed ASAP.
prepend: true,
/**
* The filter node may need to download a large model. Currently, the models required by the filter nodes are
* downloaded just-in-time, as required by the filter. If we use a timeout here, we might get into a catch-22
* where the filter node is waiting for the model to download, but the download gets canceled if the filter
* node times out.
*
* (I suspect the model download will actually _not_ be canceled if the graph is canceled, but let's not chance it!)
*
* TODO(psyche): Figure out a better way to handle this. Probably need to download the models ahead of time.
*/
// timeout: 5000,
/**
* The filter node should be able to cancel the request if it's taking too long. This will cancel the graph's
* queue item and clear any event listeners on the request.
*/
signal: controller.signal,
})
);
// If there is an error, log it and bail out of this processing run
if (filterResult.isErr()) {
this.log.error({ error: serializeError(filterResult.error) }, 'Error processing filter');
this.log.error({ error: serializeError(filterResult.error) }, 'Error filtering');
this.$isProcessing.set(false);
// Clean up the abort controller as needed
if (!this.abortController.signal.aborted) {
this.abortController.abort();
}
this.abortController = null;
return;
}
this.log.trace({ imageDTO: filterResult.value }, 'Filter processed');
this.imageState = imageDTOToImageObject(filterResult.value);
this.log.trace({ imageDTO: filterResult.value }, 'Filtered');
await this.parent.bufferRenderer.setBuffer(this.imageState, true);
// Prepare the ephemeral image state
const imageState = imageDTOToImageObject(filterResult.value);
this.$imageState.set(imageState);
// Destroy any existing masked image and create a new one
if (this.imageModule) {
this.imageModule.destroy();
}
this.imageModule = new CanvasObjectImage(imageState, this);
// Force update the masked image - after awaiting, the image will be rendered (in memory)
await this.imageModule.update(imageState, true);
this.konva.group.add(this.imageModule.konva.group);
// The porcessing is complete, set can set the last processed hash and isProcessing to false
this.$lastProcessedHash.set(hash);
this.$isProcessing.set(false);
this.$hasProcessed.set(true);
// Clean up the abort controller as needed
if (!this.abortController.signal.aborted) {
this.abortController.abort();
}
this.abortController = null;
};
process = debounce(this.processImmediate, this.config.processDebounceMs);
/**
* Debounced version of processImmediate.
*/
process = debounce(this.processImmediate, this.config.PROCESS_DEBOUNCE_MS);
/**
* Applies the filter image to the entity, replacing the entity's objects with the filtered image.
*/
apply = () => {
const imageState = this.imageState;
if (!imageState) {
const filteredImageObjectState = this.$imageState.get();
if (!filteredImageObjectState) {
this.log.warn('No image state to apply filter to');
return;
}
this.log.trace('Applying filter');
this.parent.bufferRenderer.commitBuffer();
this.log.trace('Applying');
// Have the parent adopt the image module - this prevents a flash of the original layer content before the filtered
// image is rendered
if (this.imageModule) {
this.parent.renderer.adoptObjectRenderer(this.imageModule);
}
// Rasterize the entity, replacing the objects with the masked image
const rect = this.parent.transformer.getRelativeRect();
this.manager.stateApi.rasterizeEntity({
entityIdentifier: this.parent.entityIdentifier,
imageObject: imageState,
imageObject: filteredImageObjectState,
position: {
x: Math.round(rect.x),
y: Math.round(rect.y),
},
replaceObjects: true,
});
this.imageState = null;
// Final cleanup and teardown, returning user to main canvas UI
this.resetEphemeralState();
this.teardown();
};
/**
* Saves the filtered image as a new entity of the given type.
* @param type The type of entity to save the filtered image as.
*/
saveAs = (type: Exclude<CanvasEntityType, 'reference_image'>) => {
const imageState = this.$imageState.get();
if (!imageState) {
this.log.warn('No image state to apply filter to');
return;
}
this.log.trace(`Saving as ${type}`);
const rect = this.parent.transformer.getRelativeRect();
const arg = {
overrides: {
objects: [imageState],
position: {
x: Math.round(rect.x),
y: Math.round(rect.y),
},
},
isSelected: true,
};
switch (type) {
case 'raster_layer':
this.manager.stateApi.addRasterLayer(arg);
break;
case 'control_layer':
this.manager.stateApi.addControlLayer(arg);
break;
case 'inpaint_mask':
this.manager.stateApi.addInpaintMask(arg);
break;
case 'regional_guidance':
this.manager.stateApi.addRegionalGuidance(arg);
break;
default:
assert<Equals<typeof type, never>>(false);
}
// Final cleanup and teardown, returning user to main canvas UI
this.resetEphemeralState();
this.teardown();
};
resetEphemeralState = () => {
// First we need to bail out of any processing
if (this.abortController && !this.abortController.signal.aborted) {
this.abortController.abort();
}
this.abortController = null;
// If the image module exists, and is a child of the group, destroy it. It might not be a child of the group if
// the user has applied the filter and the image has been adopted by the parent entity.
if (this.imageModule && this.imageModule.konva.group.parent === this.konva.group) {
this.imageModule.destroy();
this.imageModule = null;
}
const initialFilterConfig = this.$initialFilterConfig.get() ?? this.createInitialFilterConfig();
this.$filterConfig.set(initialFilterConfig);
this.$imageState.set(null);
this.$lastProcessedHash.set('');
this.$isProcessing.set(false);
};
teardown = () => {
this.$initialFilterConfig.set(null);
this.konva.group.remove();
this.unsubscribe();
this.$isFiltering.set(false);
this.$hasProcessed.set(false);
this.manager.stateApi.$filteringAdapter.set(null);
};
/**
* Resets the module (e.g. remove all points and the mask image).
*
* Does not cancel or otherwise complete the segmenting process.
*/
reset = () => {
this.log.trace('Resetting filter');
this.abortController?.abort();
this.abortController = null;
this.parent.bufferRenderer.clearBuffer();
this.parent.transformer.updatePosition();
this.parent.renderer.syncKonvaCache(true);
this.imageState = null;
this.$hasProcessed.set(false);
this.log.trace('Resetting');
this.resetEphemeralState();
};
cancel = () => {
this.log.trace('Cancelling filter');
this.reset();
this.unsubscribe();
this.$isProcessing.set(false);
this.$isFiltering.set(false);
this.$hasProcessed.set(false);
this.manager.stateApi.$filteringAdapter.set(null);
this.log.trace('Canceling');
this.resetEphemeralState();
this.teardown();
};
repr = () => {
@@ -248,11 +441,14 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
id: this.id,
type: this.type,
path: this.path,
parent: this.parent.id,
config: this.config,
imageState: deepClone(this.$imageState.get()),
$isFiltering: this.$isFiltering.get(),
$hasProcessed: this.$hasProcessed.get(),
$lastProcessedHash: this.$lastProcessedHash.get(),
$isProcessing: this.$isProcessing.get(),
$filterConfig: this.$filterConfig.get(),
konva: { group: getKonvaNodeDebugAttrs(this.konva.group) },
};
};
@@ -263,5 +459,6 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
}
this.abortController = null;
this.unsubscribe();
this.konva.group.destroy();
};
}

View File

@@ -1,6 +1,7 @@
import { Mutex } from 'async-mutex';
import { deepClone } from 'common/util/deepClone';
import type { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer';
import type { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
import type { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
@@ -21,7 +22,8 @@ export class CanvasObjectImage extends CanvasModuleBase {
| CanvasEntityObjectRenderer
| CanvasEntityBufferObjectRenderer
| CanvasStagingAreaModule
| CanvasSegmentAnythingModule;
| CanvasSegmentAnythingModule
| CanvasEntityFilterer;
readonly manager: CanvasManager;
readonly log: Logger;
@@ -43,6 +45,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
| CanvasEntityBufferObjectRenderer
| CanvasStagingAreaModule
| CanvasSegmentAnythingModule
| CanvasEntityFilterer
) {
super();
this.id = state.id;

View File

@@ -114,7 +114,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
subscriptions = new Set<() => void>();
/**
* The AbortController used to cancel the filter processing.
* The AbortController used to cancel the segment processing.
*/
abortController: AbortController | null = null;
@@ -173,16 +173,21 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
$hasPoints = computed(this.$points, (points) => points.length > 0);
/**
* The masked image object, if it exists.
* Whether the module should invert the mask image.
*/
maskedImage: CanvasObjectImage | null = null;
$invert = atom<boolean>(false);
/**
* The masked image object module, if it exists.
*/
imageModule: CanvasObjectImage | null = null;
/**
* The Konva nodes for the module.
*/
konva: {
/**
* The main Konva group node for the module.
* The main Konva group node for the module. This is added to the parent layer on start, and removed on teardown.
*/
group: Konva.Group;
/**
@@ -293,6 +298,9 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
if (this.$isDraggingPoint.get()) {
return;
}
if (e.evt.button !== 0) {
return;
}
// This event should not bubble up to the parent, stage or any other nodes
e.cancelBubble = true;
circle.destroy();
@@ -453,6 +461,19 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
})
);
// When the invert flag changes, process if autoProcess is enabled
this.subscriptions.add(
this.$invert.listen(() => {
if (this.$points.get().length === 0) {
return;
}
if (this.manager.stateApi.getSettings().autoProcess) {
this.process();
}
})
);
// When auto-process is enabled, process the points if they have not been processed
this.subscriptions.add(
this.manager.stateApi.createStoreSubscription(selectAutoProcess, (autoProcess) => {
@@ -467,7 +488,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
};
/**
* Adds event listeners needed while segmenting the entity.
* Removes event listeners used while segmenting the entity.
*/
unsubscribe = () => {
this.subscriptions.forEach((unsubscribe) => unsubscribe());
@@ -526,7 +547,9 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
return;
}
const hash = stableHash(points);
const invert = this.$invert.get();
const hash = stableHash({ points, invert });
if (hash === this.$lastProcessedHash.get()) {
this.log.trace('Already processed points');
return;
@@ -553,7 +576,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
this.abortController = controller;
// Build the graph for segmenting the image, using the rasterized image DTO
const { graph, outputNodeId } = this.buildGraph(rasterizeResult.value, points);
const { graph, outputNodeId } = CanvasSegmentAnythingModule.buildGraph(rasterizeResult.value, points, invert);
// Run the graph and get the segmented image output
const segmentResult = await withResultAsync(() =>
@@ -584,18 +607,18 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
this.$imageState.set(imageState);
// Destroy any existing masked image and create a new one
if (this.maskedImage) {
this.maskedImage.destroy();
if (this.imageModule) {
this.imageModule.destroy();
}
if (this.konva.maskTween) {
this.konva.maskTween.destroy();
this.konva.maskTween = null;
}
this.maskedImage = new CanvasObjectImage(imageState, this);
this.imageModule = new CanvasObjectImage(imageState, this);
// Force update the masked image - after awaiting, the image will be rendered (in memory)
await this.maskedImage.update(imageState, true);
await this.imageModule.update(imageState, true);
// Update the compositing rect to match the image size
this.konva.compositingRect.setAttrs({
@@ -606,7 +629,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
// Now we can add the masked image to the mask group. It will be rendered above the compositing rect, but should be
// under it, so we will move the compositing rect to the top
this.konva.maskGroup.add(this.maskedImage.konva.group);
this.konva.maskGroup.add(this.imageModule.konva.group);
this.konva.compositingRect.moveToTop();
// Cache the group to ensure the mask is rendered correctly w/ opacity
@@ -643,7 +666,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
process = debounce(this.processImmediate, this.config.PROCESS_DEBOUNCE_MS);
/**
* Applies the segmented image to the entity.
* Applies the segmented image to the entity, replacing the entity's objects with the masked image.
*/
apply = () => {
const imageState = this.$imageState.get();
@@ -653,10 +676,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
}
this.log.trace('Applying');
// Commit the buffer, which will move the buffer to from the layers' buffer renderer to its main renderer
this.parent.bufferRenderer.commitBuffer();
// Rasterize the entity, this time replacing the objects with the masked image
// Rasterize the entity, replacing the objects with the masked image
const rect = this.parent.transformer.getRelativeRect();
this.manager.stateApi.rasterizeEntity({
entityIdentifier: this.parent.entityIdentifier,
@@ -674,7 +694,8 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
};
/**
* Applies the segmented image to the entity.
* Saves the segmented image as a new entity of the given type.
* @param type The type of entity to save the segmented image as.
*/
saveAs = (type: Exclude<CanvasEntityType, 'reference_image'>) => {
const imageState = this.$imageState.get();
@@ -684,8 +705,11 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
}
this.log.trace(`Saving as ${type}`);
// Clear the buffer - we are creating a new entity, so we don't want to keep the old one
this.parent.bufferRenderer.clearBuffer();
// Have the parent adopt the image module - this prevents a flash of the original layer content before the
// segmented image is rendered
if (this.imageModule) {
this.parent.renderer.adoptObjectRenderer(this.imageModule);
}
// Create the new entity with the masked image as its only object
const rect = this.parent.transformer.getRelativeRect();
@@ -778,8 +802,12 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
for (const point of this.$points.get()) {
point.konva.circle.destroy();
}
if (this.maskedImage) {
this.maskedImage.destroy();
// If the image module exists, and is a child of the group, destroy it. It might not be a child of the group if
// the user has applied the segmented image and the image has been adopted by the parent entity.
if (this.imageModule && this.imageModule.konva.group.parent === this.konva.group) {
this.imageModule.destroy();
this.imageModule = null;
}
if (this.konva.maskTween) {
this.konva.maskTween.destroy();
@@ -790,22 +818,23 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
this.$points.set([]);
this.$imageState.set(null);
this.$pointType.set(1);
this.$invert.set(false);
this.$lastProcessedHash.set('');
this.$isProcessing.set(false);
// Reset non-ephemeral konva nodes
this.konva.compositingRect.visible(false);
this.konva.maskGroup.clearCache();
// The parent module's buffer should be reset & forcibly sync the cache
this.parent.bufferRenderer.clearBuffer();
this.parent.renderer.syncKonvaCache(true);
};
/**
* Builds a graph for segmenting an image with the given image DTO.
*/
buildGraph = ({ image_name }: ImageDTO, points: SAMPointWithId[]): { graph: Graph; outputNodeId: string } => {
static buildGraph = (
{ image_name }: ImageDTO,
points: SAMPointWithId[],
invert: boolean
): { graph: Graph; outputNodeId: string } => {
const graph = new Graph(getPrefixedId('canvas_segment_anything'));
// TODO(psyche): When SAM2 is available in transformers, use it here
@@ -824,6 +853,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
id: getPrefixedId('apply_tensor_mask_to_image'),
type: 'apply_tensor_mask_to_image',
image: { image_name },
invert,
});
graph.addEdge(segmentAnything, 'mask', applyMask, 'mask');
@@ -859,7 +889,7 @@ export class CanvasSegmentAnythingModule extends CanvasModuleBase {
circle: getKonvaNodeDebugAttrs(konva.circle),
})),
imageState: deepClone(this.$imageState.get()),
maskedImage: this.maskedImage?.repr(),
imageModule: this.imageModule?.repr(),
config: deepClone(this.config),
$isSegmenting: this.$isSegmenting.get(),
$lastProcessedHash: this.$lastProcessedHash.get(),

View File

@@ -51,10 +51,16 @@ export class CanvasStagingAreaModule extends CanvasModuleBase {
/**
* Sync the $isStaging flag with the redux state. $isStaging is used by the manager to determine the global busy
* state of the canvas.
*
* We also set the $shouldShowStagedImage flag when we enter staging mode, so that the staged images are shown,
* even if the user disabled this in the last staging session.
*/
this.subscriptions.add(
this.manager.stateApi.createStoreSubscription(selectIsStaging, (isStaging) => {
this.manager.stateApi.createStoreSubscription(selectIsStaging, (isStaging, oldIsStaging) => {
this.$isStaging.set(isStaging);
if (isStaging && !oldIsStaging) {
this.$shouldShowStagedImage.set(true);
}
})
);
}

View File

@@ -293,6 +293,8 @@ export class CanvasStateApiModule extends CanvasModuleBase {
},
};
let didSuceed = false;
/**
* If a timeout is provided, we will cancel the graph if it takes too long - but we need a way to clear the timeout
* if the graph completes or errors before the timeout.
@@ -344,6 +346,8 @@ export class CanvasStateApiModule extends CanvasModuleBase {
return;
}
didSuceed = true;
// Ok!
resolve(getImageDTOResult.value);
};
@@ -434,6 +438,10 @@ export class CanvasStateApiModule extends CanvasModuleBase {
if (timeout) {
timeoutId = window.setTimeout(() => {
if (didSuceed) {
// If we already succeeded, we don't need to do anything
return;
}
this.log.trace('Graph canceled by timeout');
clearListeners();
cancelGraph();
@@ -443,6 +451,10 @@ export class CanvasStateApiModule extends CanvasModuleBase {
if (signal) {
signal.addEventListener('abort', () => {
if (didSuceed) {
// If we already succeeded, we don't need to do anything
return;
}
this.log.trace('Graph canceled by signal');
_clearTimeout();
clearListeners();

View File

@@ -216,12 +216,14 @@ export class CanvasEraserToolModule extends CanvasModuleBase {
*/
onStagePointerDown = async (e: KonvaEventObject<PointerEvent>) => {
const cursorPos = this.parent.$cursorPos.get();
const isPrimaryPointerDown = this.parent.$isPrimaryPointerDown.get();
const selectedEntity = this.manager.stateApi.getSelectedEntityAdapter();
if (!cursorPos || !selectedEntity) {
if (!cursorPos || !selectedEntity || !isPrimaryPointerDown) {
/**
* Can't do anything without:
* - A cursor position: the cursor is not on the stage
* - The mouse is down: the user is not drawing
* - A selected entity: there is no entity to draw on
*/
return;

View File

@@ -160,11 +160,16 @@ export class CanvasToolModule extends CanvasModuleBase {
const stage = this.manager.stage;
const tool = this.$tool.get();
const segmentingAdapter = this.manager.stateApi.$segmentingAdapter.get();
const transformingAdapter = this.manager.stateApi.$transformingAdapter.get();
if ((this.manager.stage.getIsDragging() || tool === 'view') && !segmentingAdapter) {
if (this.manager.stage.getIsDragging()) {
this.tools.view.syncCursorStyle();
} else if (tool === 'view') {
this.tools.view.syncCursorStyle();
} else if (segmentingAdapter) {
segmentingAdapter.segmentAnything.syncCursorStyle();
} else if (transformingAdapter) {
// The transformer handles cursor style via events
} else if (this.manager.stateApi.$isFiltering.get()) {
stage.setCursor('not-allowed');
} else if (this.manager.stagingArea.$isStaging.get()) {

View File

@@ -349,6 +349,27 @@ export const buildSelectIsSelected = (entityIdentifier: CanvasEntityIdentifier)
);
};
/**
* Builds a selector that selects if the entity is empty.
*
* Reference images are considered empty if the IP adapter is empty.
*
* Other entities are considered empty if they have no objects.
*/
export const buildSelectHasObjects = (entityIdentifier: CanvasEntityIdentifier) => {
return createSelector(selectCanvasSlice, (canvas) => {
const entity = selectEntity(canvas, entityIdentifier);
if (!entity) {
return false;
}
if (entity.type === 'reference_image') {
return entity.ipAdapter.image !== null;
}
return entity.objects.length > 0;
});
};
export const selectWidth = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.rect.width);
export const selectHeight = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.rect.height);
export const selectAspectRatioID = createSelector(selectCanvasSlice, (canvas) => canvas.bbox.aspectRatio.id);

View File

@@ -42,6 +42,14 @@ export type AddControlLayerFromImageDropData = BaseDropData & {
actionType: 'ADD_CONTROL_LAYER_FROM_IMAGE';
};
type AddInpaintMaskFromImageDropData = BaseDropData & {
actionType: 'ADD_INPAINT_MASK_FROM_IMAGE';
};
type AddRegionalGuidanceFromImageDropData = BaseDropData & {
actionType: 'ADD_REGIONAL_GUIDANCE_FROM_IMAGE';
};
export type AddRegionalReferenceImageFromImageDropData = BaseDropData & {
actionType: 'ADD_REGIONAL_REFERENCE_IMAGE_FROM_IMAGE';
};
@@ -53,7 +61,7 @@ export type AddGlobalReferenceImageFromImageDropData = BaseDropData & {
export type ReplaceLayerImageDropData = BaseDropData & {
actionType: 'REPLACE_LAYER_WITH_IMAGE';
context: {
entityIdentifier: CanvasEntityIdentifier<'control_layer' | 'raster_layer'>;
entityIdentifier: CanvasEntityIdentifier<'control_layer' | 'raster_layer' | 'inpaint_mask' | 'regional_guidance'>;
};
};
@@ -98,7 +106,9 @@ export type TypesafeDroppableData =
| AddControlLayerFromImageDropData
| ReplaceLayerImageDropData
| AddRegionalReferenceImageFromImageDropData
| AddGlobalReferenceImageFromImageDropData;
| AddGlobalReferenceImageFromImageDropData
| AddInpaintMaskFromImageDropData
| AddRegionalGuidanceFromImageDropData;
type BaseDragData = {
id: string;

View File

@@ -17,6 +17,8 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?
case 'SET_RG_IP_ADAPTER_IMAGE':
case 'ADD_RASTER_LAYER_FROM_IMAGE':
case 'ADD_CONTROL_LAYER_FROM_IMAGE':
case 'ADD_INPAINT_MASK_FROM_IMAGE':
case 'ADD_REGIONAL_GUIDANCE_FROM_IMAGE':
case 'SET_UPSCALE_INITIAL_IMAGE':
case 'SET_NODES_IMAGE':
case 'SELECT_FOR_COMPARE':

View File

@@ -1,39 +0,0 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useNewCanvasFromImage } from 'features/controlLayers/hooks/addLayerHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { toast } from 'features/toast/toast';
import { setActiveTab } from 'features/ui/store/uiSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFileBold } from 'react-icons/pi';
export const ImageMenuItemNewCanvasFromImage = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const imageDTO = useImageDTOContext();
const imageViewer = useImageViewer();
const newCanvasFromImage = useNewCanvasFromImage();
const isBusy = useCanvasIsBusy();
const onClick = useCallback(() => {
newCanvasFromImage(imageDTO);
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newCanvasFromImage, t]);
return (
<MenuItem icon={<PiFileBold />} onClickCapture={onClick} isDisabled={isBusy}>
{t('controlLayers.newCanvasFromImage')}
</MenuItem>
);
});
ImageMenuItemNewCanvasFromImage.displayName = 'ImageMenuItemNewCanvasFromImage';

View File

@@ -0,0 +1,140 @@
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
import { NewLayerIcon } from 'features/controlLayers/components/common/icons';
import {
useNewCanvasFromImage,
useNewControlLayerFromImage,
useNewInpaintMaskFromImage,
useNewRasterLayerFromImage,
useNewRegionalGuidanceFromImage,
} from 'features/controlLayers/hooks/addLayerHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { sentImageToCanvas } from 'features/gallery/store/actions';
import { toast } from 'features/toast/toast';
import { setActiveTab } from 'features/ui/store/uiSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFileBold, PiPlusBold } from 'react-icons/pi';
export const ImageMenuItemNewFromImageSubMenu = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
const dispatch = useAppDispatch();
const imageDTO = useImageDTOContext();
const imageViewer = useImageViewer();
const isBusy = useCanvasIsBusy();
const newRasterLayerFromImage = useNewRasterLayerFromImage();
const newControlLayerFromImage = useNewControlLayerFromImage();
const newInpaintMaskFromImage = useNewInpaintMaskFromImage();
const newRegionalGuidanceFromImage = useNewRegionalGuidanceFromImage();
const newCanvasFromImage = useNewCanvasFromImage();
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
newCanvasFromImage(imageDTO, 'raster_layer');
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newCanvasFromImage, t]);
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
newCanvasFromImage(imageDTO, 'control_layer');
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newCanvasFromImage, t]);
const onClickNewRasterLayerFromImage = useCallback(() => {
dispatch(sentImageToCanvas());
newRasterLayerFromImage(imageDTO);
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newRasterLayerFromImage, t]);
const onClickNewControlLayerFromImage = useCallback(() => {
dispatch(sentImageToCanvas());
newControlLayerFromImage(imageDTO);
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newControlLayerFromImage, t]);
const onClickNewInpaintMaskFromImage = useCallback(() => {
dispatch(sentImageToCanvas());
newInpaintMaskFromImage(imageDTO);
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newInpaintMaskFromImage, t]);
const onClickNewRegionalGuidanceFromImage = useCallback(() => {
dispatch(sentImageToCanvas());
newRegionalGuidanceFromImage(imageDTO);
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newRegionalGuidanceFromImage, t]);
return (
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiPlusBold />}>
<Menu {...subMenu.menuProps}>
<MenuButton {...subMenu.menuButtonProps}>
<SubMenuButtonContent label="New from Image" />
</MenuButton>
<MenuList {...subMenu.menuListProps}>
<MenuItem icon={<PiFileBold />} onClickCapture={onClickNewCanvasWithRasterLayerFromImage} isDisabled={isBusy}>
{t('controlLayers.canvasAsRasterLayer')}
</MenuItem>
<MenuItem
icon={<PiFileBold />}
onClickCapture={onClickNewCanvasWithControlLayerFromImage}
isDisabled={isBusy}
>
{t('controlLayers.canvasAsControlLayer')}
</MenuItem>
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewInpaintMaskFromImage} isDisabled={isBusy}>
{t('controlLayers.inpaintMask')}
</MenuItem>
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewRegionalGuidanceFromImage} isDisabled={isBusy}>
{t('controlLayers.regionalGuidance')}
</MenuItem>
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewControlLayerFromImage} isDisabled={isBusy}>
{t('controlLayers.controlLayer')}
</MenuItem>
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewRasterLayerFromImage} isDisabled={isBusy}>
{t('controlLayers.rasterLayer')}
</MenuItem>
</MenuList>
</Menu>
</MenuItem>
);
});
ImageMenuItemNewFromImageSubMenu.displayName = 'ImageMenuItemNewFromImageSubMenu';

View File

@@ -1,41 +0,0 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { NewLayerIcon } from 'features/controlLayers/components/common/icons';
import { useNewRasterLayerFromImage } from 'features/controlLayers/hooks/addLayerHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { sentImageToCanvas } from 'features/gallery/store/actions';
import { toast } from 'features/toast/toast';
import { setActiveTab } from 'features/ui/store/uiSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const ImageMenuItemNewLayerFromImage = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const imageDTO = useImageDTOContext();
const imageViewer = useImageViewer();
const newRasterLayerFromImage = useNewRasterLayerFromImage();
const isBusy = useCanvasIsBusy();
const onClick = useCallback(() => {
dispatch(sentImageToCanvas());
newRasterLayerFromImage(imageDTO);
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
id: 'SENT_TO_CANVAS',
title: t('toast.sentToCanvas'),
status: 'success',
});
}, [dispatch, imageDTO, imageViewer, newRasterLayerFromImage, t]);
return (
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClick} isDisabled={isBusy}>
{t('controlLayers.newLayerFromImage')}
</MenuItem>
);
});
ImageMenuItemNewLayerFromImage.displayName = 'ImageMenuItemNewLayerFromImage';

View File

@@ -7,8 +7,7 @@ import { ImageMenuItemDelete } from 'features/gallery/components/ImageContextMen
import { ImageMenuItemDownload } from 'features/gallery/components/ImageContextMenu/ImageMenuItemDownload';
import { ImageMenuItemLoadWorkflow } from 'features/gallery/components/ImageContextMenu/ImageMenuItemLoadWorkflow';
import { ImageMenuItemMetadataRecallActions } from 'features/gallery/components/ImageContextMenu/ImageMenuItemMetadataRecallActions';
import { ImageMenuItemNewCanvasFromImage } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewCanvasFromImage';
import { ImageMenuItemNewLayerFromImage } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewLayerFromImage';
import { ImageMenuItemNewFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewFromImageSubMenu';
import { ImageMenuItemOpenInNewTab } from 'features/gallery/components/ImageContextMenu/ImageMenuItemOpenInNewTab';
import { ImageMenuItemOpenInViewer } from 'features/gallery/components/ImageContextMenu/ImageMenuItemOpenInViewer';
import { ImageMenuItemSelectForCompare } from 'features/gallery/components/ImageContextMenu/ImageMenuItemSelectForCompare';
@@ -39,8 +38,7 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
<MenuDivider />
<ImageMenuItemSendToUpscale />
<CanvasManagerProviderGate>
<ImageMenuItemNewLayerFromImage />
<ImageMenuItemNewCanvasFromImage />
<ImageMenuItemNewFromImageSubMenu />
</CanvasManagerProviderGate>
<MenuDivider />
<ImageMenuItemChangeBoard />

View File

@@ -35,6 +35,8 @@ const ImageMetadataActions = (props: Props) => {
<MetadataItem metadata={metadata} handlers={handlers.cfgRescaleMultiplier} />
<MetadataItem metadata={metadata} handlers={handlers.guidance} />
{activeTabName !== 'canvas' && <MetadataItem metadata={metadata} handlers={handlers.strength} />}
<MetadataItem metadata={metadata} handlers={handlers.seamlessX} />
<MetadataItem metadata={metadata} handlers={handlers.seamlessY} />
<MetadataItem metadata={metadata} handlers={handlers.hrfEnabled} />
<MetadataItem metadata={metadata} handlers={handlers.hrfMethod} />
<MetadataItem metadata={metadata} handlers={handlers.hrfStrength} />

View File

@@ -3,6 +3,7 @@ import { useStore } from '@nanostores/react';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector } from 'app/store/storeHooks';
import IAIDndImage from 'common/components/IAIDndImage';
import { CanvasAlertsSendingToCanvas } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
import type { TypesafeDraggableData } from 'features/dnd/types';
import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer';
import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons';
@@ -73,6 +74,9 @@ const CurrentImagePreview = () => {
dataTestId="image-preview"
/>
)}
<Box position="absolute" top={0} insetInlineStart={0}>
<CanvasAlertsSendingToCanvas />
</Box>
{shouldShowImageDetails && imageDTO && (
<Box position="absolute" opacity={0.8} top={0} width="full" height="full" borderRadius="base">
<ImageMetadataViewer image={imageDTO} />

View File

@@ -2,7 +2,6 @@ import { Box, Flex, IconButton } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion } from 'common/hooks/focus';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { CanvasAlertsSendingToCanvas } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
import { CompareToolbar } from 'features/gallery/components/ImageViewer/CompareToolbar';
import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview';
import { ImageComparison } from 'features/gallery/components/ImageViewer/ImageComparison';
@@ -46,7 +45,7 @@ export const ImageViewer = memo(({ closeButton }: Props) => {
right={0}
bottom={0}
left={0}
rowGap={4}
rowGap={2}
alignItems="center"
justifyContent="center"
>
@@ -57,9 +56,6 @@ export const ImageViewer = memo(({ closeButton }: Props) => {
{hasImageToCompare && <ImageComparison containerDims={containerDims} />}
</Box>
<ImageComparisonDroppable />
<Box position="absolute" top={14} insetInlineEnd={2}>
<CanvasAlertsSendingToCanvas />
</Box>
</Flex>
);
});

View File

@@ -199,6 +199,16 @@ export const handlers = {
parser: parsers.sdxlPositiveStylePrompt,
recaller: recallers.sdxlPositiveStylePrompt,
}),
seamlessX: buildHandlers({
getLabel: () => t('metadata.seamlessXAxis'),
parser: parsers.seamlessX,
recaller: recallers.seamlessX,
}),
seamlessY: buildHandlers({
getLabel: () => t('metadata.seamlessYAxis'),
parser: parsers.seamlessY,
recaller: recallers.seamlessY,
}),
seed: buildHandlers({ getLabel: () => t('metadata.seed'), parser: parsers.seed, recaller: recallers.seed }),
steps: buildHandlers({ getLabel: () => t('metadata.steps'), parser: parsers.steps, recaller: recallers.steps }),
strength: buildHandlers({

View File

@@ -41,6 +41,8 @@ import type {
ParameterSDXLRefinerNegativeAestheticScore,
ParameterSDXLRefinerPositiveAestheticScore,
ParameterSDXLRefinerStart,
ParameterSeamlessX,
ParameterSeamlessY,
ParameterSeed,
ParameterSteps,
ParameterStrength,
@@ -63,6 +65,8 @@ import {
isParameterSDXLRefinerNegativeAestheticScore,
isParameterSDXLRefinerPositiveAestheticScore,
isParameterSDXLRefinerStart,
isParameterSeamlessX,
isParameterSeamlessY,
isParameterSeed,
isParameterSteps,
isParameterStrength,
@@ -160,6 +164,12 @@ const parseSteps: MetadataParseFunc<ParameterSteps> = (metadata) => getProperty(
const parseStrength: MetadataParseFunc<ParameterStrength> = (metadata) =>
getProperty(metadata, 'strength', isParameterStrength);
const parseSeamlessX: MetadataParseFunc<ParameterSeamlessX> = (metadata) =>
getProperty(metadata, 'seamless_x', isParameterSeamlessX);
const parseSeamlessY: MetadataParseFunc<ParameterSeamlessY> = (metadata) =>
getProperty(metadata, 'seamless_y', isParameterSeamlessY);
const parseHRFEnabled: MetadataParseFunc<ParameterHRFEnabled> = async (metadata) => {
try {
return await getProperty(metadata, 'hrf_enabled', isParameterHRFEnabled);
@@ -647,6 +657,8 @@ export const parsers = {
height: parseHeight,
steps: parseSteps,
strength: parseStrength,
seamlessX: parseSeamlessX,
seamlessY: parseSeamlessY,
hrfEnabled: parseHRFEnabled,
hrfStrength: parseHRFStrength,
hrfMethod: parseHRFMethod,

View File

@@ -18,6 +18,8 @@ import {
setRefinerStart,
setRefinerSteps,
setScheduler,
setSeamlessXAxis,
setSeamlessYAxis,
setSeed,
setSteps,
t5EncoderModelSelected,
@@ -44,6 +46,8 @@ import type {
ParameterSDXLRefinerNegativeAestheticScore,
ParameterSDXLRefinerPositiveAestheticScore,
ParameterSDXLRefinerStart,
ParameterSeamlessX,
ParameterSeamlessY,
ParameterSeed,
ParameterSteps,
ParameterStrength,
@@ -106,6 +110,14 @@ const recallStrength: MetadataRecallFunc<ParameterStrength> = (strength) => {
getStore().dispatch(setImg2imgStrength(strength));
};
const recallSeamlessX: MetadataRecallFunc<ParameterSeamlessX> = (enabled) => {
getStore().dispatch(setSeamlessXAxis(enabled));
};
const recallSeamlessY: MetadataRecallFunc<ParameterSeamlessY> = (enabled) => {
getStore().dispatch(setSeamlessYAxis(enabled));
};
const recallHRFEnabled: MetadataRecallFunc<ParameterHRFEnabled> = (hrfEnabled) => {
getStore().dispatch(setHrfEnabled(hrfEnabled));
};
@@ -211,6 +223,8 @@ export const recallers = {
height: recallHeight,
steps: recallSteps,
strength: recallStrength,
seamlessX: recallSeamlessX,
seamlessY: recallSeamlessY,
hrfEnabled: recallHRFEnabled,
hrfStrength: recallHRFStrength,
hrfMethod: recallHRFMethod,

View File

@@ -6,18 +6,31 @@ import { addImageToLatents } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addImageToImage = async (
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
bbox: CanvasState['bbox'],
denoising_start: number,
fp32: boolean
): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'>> => {
type AddImageToImageArg = {
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
bbox: CanvasState['bbox'];
denoising_start: number;
fp32: boolean;
};
export const addImageToImage = async ({
g,
manager,
l2i,
denoise,
vaeSource,
originalSize,
scaledSize,
bbox,
denoising_start,
fp32,
}: AddImageToImageArg): Promise<Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'>> => {
denoise.denoising_start = denoising_start;
const { image_name } = await manager.compositor.getCompositeRasterLayerImageDTO(bbox.rect);

View File

@@ -10,19 +10,33 @@ import { addImageToLatents } from 'features/nodes/util/graph/graphBuilderUtils';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addInpaint = async (
state: RootState,
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
denoising_start: number,
fp32: boolean
): Promise<Invocation<'canvas_v2_mask_and_crop'>> => {
type AddInpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
fp32: boolean;
};
export const addInpaint = async ({
state,
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32,
}: AddInpaintArg): Promise<Invocation<'canvas_v2_mask_and_crop' | 'img_resize'>> => {
denoise.denoising_start = denoising_start;
const params = selectParamsSlice(state);
@@ -55,16 +69,6 @@ export const addInpaint = async (
type: 'img_resize',
...scaledSize,
});
const resizeImageToOriginalSize = g.addNode({
id: getPrefixedId('resize_image_to_original_size'),
type: 'img_resize',
...originalSize,
});
const resizeMaskToOriginalSize = g.addNode({
id: getPrefixedId('resize_mask_to_original_size'),
type: 'img_resize',
...originalSize,
});
const createGradientMask = g.addNode({
id: getPrefixedId('create_gradient_mask'),
type: 'create_gradient_mask',
@@ -78,6 +82,11 @@ export const addInpaint = async (
type: 'canvas_v2_mask_and_crop',
mask_blur: params.maskBlur,
});
const resizeOutput = g.addNode({
id: getPrefixedId('resize_output'),
type: 'img_resize',
...originalSize,
});
// Resize initial image and mask to scaled size, feed into to gradient mask
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
@@ -94,21 +103,20 @@ export const addInpaint = async (
g.addEdge(createGradientMask, 'denoise_mask', denoise, 'denoise_mask');
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeMaskToOriginalSize, 'image');
// Paste the generated masked image back onto the original image
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
g.addEdge(resizeMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
// Finally, resize the output back to the original size
g.addEdge(canvasPasteBack, 'image', resizeOutput, 'image');
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
// to canvas but not outputting only masked regions
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
canvasPasteBack.source_image = { image_name: initialImage.image_name };
g.addEdge(resizeImageToScaledSize, 'image', canvasPasteBack, 'source_image');
}
return canvasPasteBack;
return resizeOutput;
} else {
// No scale before processing, much simpler
const i2l = addImageToLatents(g, modelLoader.type === 'flux_model_loader', fp32, initialImage.image_name);

View File

@@ -10,19 +10,33 @@ import { addImageToLatents, getInfill } from 'features/nodes/util/graph/graphBui
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addOutpaint = async (
state: RootState,
g: Graph,
manager: CanvasManager,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
denoise: Invocation<'denoise_latents' | 'flux_denoise'>,
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>,
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>,
originalSize: Dimensions,
scaledSize: Dimensions,
denoising_start: number,
fp32: boolean
): Promise<Invocation<'canvas_v2_mask_and_crop'>> => {
type AddOutpaintArg = {
state: RootState;
g: Graph;
manager: CanvasManager;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
denoise: Invocation<'denoise_latents' | 'flux_denoise'>;
vaeSource: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader' | 'seamless' | 'vae_loader'>;
modelLoader: Invocation<'main_model_loader' | 'sdxl_model_loader' | 'flux_model_loader'>;
originalSize: Dimensions;
scaledSize: Dimensions;
denoising_start: number;
fp32: boolean;
};
export const addOutpaint = async ({
state,
g,
manager,
l2i,
denoise,
vaeSource,
modelLoader,
originalSize,
scaledSize,
denoising_start,
fp32,
}: AddOutpaintArg): Promise<Invocation<'canvas_v2_mask_and_crop' | 'img_resize'>> => {
denoise.denoising_start = denoising_start;
const params = selectParamsSlice(state);
@@ -98,40 +112,33 @@ export const addOutpaint = async (
g.addEdge(vaeSource, 'vae', i2l, 'vae');
g.addEdge(i2l, 'latents', denoise, 'latents');
// Resize the output image back to the original size
const resizeOutputImageToOriginalSize = g.addNode({
id: getPrefixedId('resize_image_to_original_size'),
type: 'img_resize',
...originalSize,
});
const resizeOutputMaskToOriginalSize = g.addNode({
id: getPrefixedId('resize_mask_to_original_size'),
type: 'img_resize',
...originalSize,
});
const canvasPasteBack = g.addNode({
id: getPrefixedId('canvas_v2_mask_and_crop'),
type: 'canvas_v2_mask_and_crop',
mask_blur: params.maskBlur,
});
const resizeOutput = g.addNode({
id: getPrefixedId('resize_output'),
type: 'img_resize',
...originalSize,
});
// Resize initial image and mask to scaled size, feed into to gradient mask
// After denoising, resize the image and mask back to original size
g.addEdge(l2i, 'image', resizeOutputImageToOriginalSize, 'image');
g.addEdge(createGradientMask, 'expanded_mask_area', resizeOutputMaskToOriginalSize, 'image');
// Paste the generated masked image back onto the original image
g.addEdge(l2i, 'image', canvasPasteBack, 'generated_image');
g.addEdge(createGradientMask, 'expanded_mask_area', canvasPasteBack, 'mask');
// Finally, paste the generated masked image back onto the original image
g.addEdge(resizeOutputImageToOriginalSize, 'image', canvasPasteBack, 'generated_image');
g.addEdge(resizeOutputMaskToOriginalSize, 'image', canvasPasteBack, 'mask');
// Finally, resize the output back to the original size
g.addEdge(canvasPasteBack, 'image', resizeOutput, 'image');
// Do the paste back if we are sending to gallery (in which case we want to see the full image), or if we are sending
// to canvas but not outputting only masked regions
if (!canvasSettings.sendToCanvas || !canvasSettings.outputOnlyMaskedRegions) {
canvasPasteBack.source_image = { image_name: initialImage.image_name };
g.addEdge(resizeInputImageToScaledSize, 'image', canvasPasteBack, 'source_image');
}
return canvasPasteBack;
return resizeOutput;
} else {
infill.image = { image_name: initialImage.image_name };
// No scale before processing, much simpler

View File

@@ -23,6 +23,12 @@ export const addSeamless = (
): Invocation<'seamless'> | null => {
const { seamlessXAxis: seamless_x, seamlessYAxis: seamless_y } = state.params;
// Always write seamless metadata to ensure recalling all parameters will reset the seamless settings
g.upsertMetadata({
seamless_x,
seamless_y,
});
if (!seamless_x && !seamless_y) {
return null;
}
@@ -34,11 +40,6 @@ export const addSeamless = (
seamless_y,
});
g.upsertMetadata({
seamless_x: seamless_x || undefined,
seamless_y: seamless_y || undefined,
});
// Seamless slots into the graph between the model loader and the denoise node
g.deleteEdgesFrom(modelLoader, ['unet']);
g.deleteEdgesFrom(modelLoader, ['vae']);

View File

@@ -4,12 +4,19 @@ import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { isEqual } from 'lodash-es';
import type { Invocation } from 'services/api/types';
export const addTextToImage = (
g: Graph,
l2i: Invocation<'l2i' | 'flux_vae_decode'>,
originalSize: Dimensions,
scaledSize: Dimensions
): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'> => {
type AddTextToImageArg = {
g: Graph;
l2i: Invocation<'l2i' | 'flux_vae_decode'>;
originalSize: Dimensions;
scaledSize: Dimensions;
};
export const addTextToImage = ({
g,
l2i,
originalSize,
scaledSize,
}: AddTextToImageArg): Invocation<'img_resize' | 'l2i' | 'flux_vae_decode'> => {
if (!isEqual(scaledSize, originalSize)) {
// We need to resize the output image back to the original size
const resizeImageToOriginalSize = g.addNode({

View File

@@ -14,9 +14,15 @@ import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addControlNets } from './addControlAdapters';
@@ -74,7 +80,7 @@ export const buildFLUXGraph = async (
prompt: positivePrompt,
});
const noise = g.addNode({
const denoise = g.addNode({
type: 'flux_denoise',
id: getPrefixedId('flux_denoise'),
guidance,
@@ -91,23 +97,19 @@ export const buildFLUXGraph = async (
id: getPrefixedId('flux_vae_decode'),
});
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'transformer', noise, 'transformer');
g.addEdge(modelLoader, 'vae', noise, 'controlnet_vae');
g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
g.addEdge(modelLoader, 'vae', denoise, 'controlnet_vae');
g.addEdge(modelLoader, 'vae', l2i, 'vae');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 't5_encoder', posCond, 't5_encoder');
g.addEdge(modelLoader, 'max_seq_len', posCond, 't5_max_seq_len');
addFLUXLoRAs(state, g, noise, modelLoader, posCond);
addFLUXLoRAs(state, g, denoise, modelLoader, posCond);
g.addEdge(posCond, 'conditioning', noise, 'positive_text_conditioning');
g.addEdge(posCond, 'conditioning', denoise, 'positive_text_conditioning');
g.addEdge(noise, 'latents', l2i, 'latents');
g.addEdge(denoise, 'latents', l2i, 'latents');
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
assert(modelConfig.base === 'flux');
@@ -126,59 +128,65 @@ export const buildFLUXGraph = async (
clip_embed_model: clipEmbedModel,
});
let denoisingStart: number;
let denoising_start: number;
if (optimizedDenoisingEnabled) {
// We rescale the img2imgStrength (with exponent 0.2) to effectively use the entire range [0, 1] and make the scale
// more user-friendly for FLUX. Without this, most of the 'change' is concentrated in the high denoise strength
// range (>0.9).
denoisingStart = 1 - img2imgStrength ** 0.2;
denoising_start = 1 - img2imgStrength ** 0.2;
} else {
denoisingStart = 1 - img2imgStrength;
denoising_start = 1 - img2imgStrength;
}
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutput = await addImageToImage({
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
originalSize,
scaledSize,
bbox,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutput = await addInpaint({
state,
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutput = await addOutpaint({
state,
g,
manager,
l2i,
noise,
modelLoader,
denoise,
vaeSource: modelLoader,
modelLoader,
originalSize,
scaledSize,
denoisingStart,
false
);
denoising_start,
fp32: false,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -194,7 +202,7 @@ export const buildFLUXGraph = async (
modelConfig.base
);
if (controlNetResult.addedControlNets > 0) {
g.addEdge(controlNetCollector, 'collection', noise, 'control');
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
} else {
g.deleteNode(controlNetCollector.id);
}
@@ -221,14 +229,14 @@ export const buildFLUXGraph = async (
g.addEdge(modelLoader, 'clip', negCond, 'clip');
g.addEdge(modelLoader, 't5_encoder', negCond, 't5_encoder');
g.addEdge(modelLoader, 'max_seq_len', negCond, 't5_max_seq_len');
g.addEdge(negCond, 'conditioning', noise, 'negative_text_conditioning');
g.addEdge(negCond, 'conditioning', denoise, 'negative_text_conditioning');
g.updateNode(noise, {
g.updateNode(denoise, {
cfg_scale: 3,
cfg_scale_start_step,
cfg_scale_end_step,
});
g.addEdge(ipAdapterCollector, 'collection', noise, 'ip_adapter');
g.addEdge(ipAdapterCollector, 'collection', denoise, 'ip_adapter');
} else {
g.deleteNode(ipAdapterCollector.id);
}
@@ -250,12 +258,12 @@ export const buildFLUXGraph = async (
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,
});
g.setMetadataReceivingNode(canvasOutput);
return { g, noise, posCond };
return { g, noise: denoise, posCond };
};

View File

@@ -18,9 +18,15 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
@@ -120,10 +126,6 @@ export const buildSD1Graph = async (
})
: null;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', clipSkip, 'clip');
g.addEdge(clipSkip, 'clip', posCond, 'clip');
@@ -165,10 +167,16 @@ export const buildSD1Graph = async (
> = seamless ?? vaeLoader ?? modelLoader;
g.addEdge(vaeSource, 'vae', l2i, 'vae');
const denoising_start = 1 - params.img2imgStrength;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutput = await addImageToImage({
g,
manager,
l2i,
@@ -177,11 +185,11 @@ export const buildSD1Graph = async (
originalSize,
scaledSize,
bbox,
1 - params.img2imgStrength,
vaePrecision === 'fp32'
);
denoising_start,
fp32: vaePrecision === 'fp32',
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutput = await addInpaint({
state,
g,
manager,
@@ -191,11 +199,11 @@ export const buildSD1Graph = async (
modelLoader,
originalSize,
scaledSize,
1 - params.img2imgStrength,
vaePrecision === 'fp32'
);
denoising_start,
fp32: vaePrecision === 'fp32',
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutput = await addOutpaint({
state,
g,
manager,
@@ -205,9 +213,11 @@ export const buildSD1Graph = async (
modelLoader,
originalSize,
scaledSize,
1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -291,7 +301,7 @@ export const buildSD1Graph = async (
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,

View File

@@ -18,9 +18,15 @@ import { addSeamless } from 'features/nodes/util/graph/generation/addSeamless';
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
import { getBoardField, getPresetModifiedPrompts, getSizes } from 'features/nodes/util/graph/graphBuilderUtils';
import {
CANVAS_OUTPUT_PREFIX,
getBoardField,
getPresetModifiedPrompts,
getSizes,
} from 'features/nodes/util/graph/graphBuilderUtils';
import type { Invocation } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { addRegions } from './addRegions';
@@ -118,10 +124,6 @@ export const buildSDXLGraph = async (
})
: null;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
g.addEdge(modelLoader, 'unet', denoise, 'unet');
g.addEdge(modelLoader, 'clip', posCond, 'clip');
g.addEdge(modelLoader, 'clip', negCond, 'clip');
@@ -168,10 +170,18 @@ export const buildSDXLGraph = async (
await addSDXLRefiner(state, g, denoise, seamless, posCond, negCond, l2i);
}
const denoising_start = refinerModel
? Math.min(refinerStart, 1 - params.img2imgStrength)
: 1 - params.img2imgStrength;
let canvasOutput: Invocation<
'l2i' | 'img_nsfw' | 'img_watermark' | 'img_resize' | 'canvas_v2_mask_and_crop' | 'flux_vae_decode'
> = l2i;
if (generationMode === 'txt2img') {
canvasOutput = addTextToImage(g, l2i, originalSize, scaledSize);
canvasOutput = addTextToImage({ g, l2i, originalSize, scaledSize });
} else if (generationMode === 'img2img') {
canvasOutput = await addImageToImage(
canvasOutput = await addImageToImage({
g,
manager,
l2i,
@@ -180,11 +190,11 @@ export const buildSDXLGraph = async (
originalSize,
scaledSize,
bbox,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else if (generationMode === 'inpaint') {
canvasOutput = await addInpaint(
canvasOutput = await addInpaint({
state,
g,
manager,
@@ -194,11 +204,11 @@ export const buildSDXLGraph = async (
modelLoader,
originalSize,
scaledSize,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else if (generationMode === 'outpaint') {
canvasOutput = await addOutpaint(
canvasOutput = await addOutpaint({
state,
g,
manager,
@@ -208,9 +218,11 @@ export const buildSDXLGraph = async (
modelLoader,
originalSize,
scaledSize,
refinerModel ? Math.min(refinerStart, 1 - params.img2imgStrength) : 1 - params.img2imgStrength,
fp32
);
denoising_start,
fp32,
});
} else {
assert<Equals<typeof generationMode, never>>(false);
}
const controlNetCollector = g.addNode({
@@ -294,7 +306,7 @@ export const buildSDXLGraph = async (
}
g.updateNode(canvasOutput, {
id: getPrefixedId('canvas_output'),
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
is_intermediate,
use_cache: false,
board,

View File

@@ -129,3 +129,5 @@ export const addImageToLatents = (g: Graph, isFlux: boolean, fp32: boolean, imag
return g.addNode({ id: 'i2l', type: 'i2l', fp32, image: image_name ? { image_name } : undefined });
}
};
export const CANVAS_OUTPUT_PREFIX = 'canvas_output';

View File

@@ -1,15 +1,160 @@
import { IconButton } from '@invoke-ai/ui-library';
import { Flex, IconButton, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
import { EMPTY_OBJECT } from 'app/store/constants';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { selectModel } from 'features/controlLayers/store/paramsSlice';
import {
selectCFGRescaleMultiplier,
selectCFGScale,
selectGuidance,
selectModel,
selectScheduler,
selectSteps,
selectVAE,
selectVAEPrecision,
} from 'features/controlLayers/store/paramsSlice';
import { selectHeight, selectWidth } from 'features/controlLayers/store/selectors';
import { setDefaultSettings } from 'features/parameters/store/actions';
import { useCallback } from 'react';
import { isNil } from 'lodash-es';
import { useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiSparkleFill } from 'react-icons/pi';
import { modelConfigsAdapterSelectors, useGetModelConfigsQuery } from 'services/api/endpoints/models';
import type { S } from 'services/api/types';
import { isNonRefinerMainModelConfig } from 'services/api/types';
export const UseDefaultSettingsButton = () => {
const model = useAppSelector(selectModel);
const { t } = useTranslation();
const dispatch = useAppDispatch();
const { data: modelConfigs } = useGetModelConfigsQuery();
const scheduler = useAppSelector(selectScheduler);
const steps = useAppSelector(selectSteps);
const vae = useAppSelector(selectVAE);
const vaePrecision = useAppSelector(selectVAEPrecision);
const width = useAppSelector(selectWidth);
const height = useAppSelector(selectHeight);
const guidance = useAppSelector(selectGuidance);
const cfg = useAppSelector(selectCFGScale);
const cfgRescale = useAppSelector(selectCFGRescaleMultiplier);
const modelConfig = useMemo(() => {
if (!modelConfigs) {
return null;
}
if (model === null) {
return null;
}
return modelConfigsAdapterSelectors.selectById(modelConfigs, model.key);
}, [modelConfigs, model]);
const hasDefaultSettings = useMemo(() => {
const settings = modelConfig && isNonRefinerMainModelConfig(modelConfig) && modelConfig.default_settings;
return settings && Object.values(settings).some((setting) => !!setting);
}, [modelConfig]);
const defaultSettings = useMemo<S['MainModelDefaultSettings']>(() => {
return modelConfig && isNonRefinerMainModelConfig(modelConfig) && modelConfig.default_settings
? modelConfig.default_settings
: EMPTY_OBJECT;
}, [modelConfig]);
const outOfSyncSettings = useMemo(() => {
const settings = [];
if (hasDefaultSettings) {
const {
vae: defaultVAE,
vae_precision: defaultVAEPrecision,
cfg_scale: defaultCfg,
cfg_rescale_multiplier: defaultCfgRescale,
steps: defaultSteps,
scheduler: defaultScheduler,
width: defaultWidth,
height: defaultHeight,
guidance: defaultGuidance,
} = defaultSettings;
if (!isNil(defaultVAE) && vae && defaultVAE !== vae.key) {
settings.push(t('modelManager.vae'));
}
if (!isNil(defaultVAE) && !vae && defaultVAE !== 'default') {
settings.push(t('modelManager.vae'));
}
if (!isNil(defaultVAEPrecision) && defaultVAEPrecision !== vaePrecision) {
settings.push(t('modelManager.vaePrecision'));
}
if (!isNil(defaultCfg) && defaultCfg !== cfg) {
settings.push(t('parameters.cfgScale'));
}
if (!isNil(defaultCfgRescale) && defaultCfgRescale !== cfgRescale) {
settings.push(t('parameters.cfgRescaleMultiplier'));
}
if (!isNil(defaultSteps) && defaultSteps !== steps) {
settings.push(t('parameters.steps'));
}
if (!isNil(defaultScheduler) && defaultScheduler !== scheduler) {
settings.push(t('parameters.scheduler'));
}
if (!isNil(defaultWidth) && defaultWidth !== width) {
settings.push(t('parameters.width'));
}
if (!isNil(defaultHeight) && defaultHeight !== height) {
settings.push(t('parameters.height'));
}
if (!isNil(defaultGuidance) && defaultGuidance !== guidance) {
settings.push(t('parameters.guidance'));
}
}
return settings;
}, [
hasDefaultSettings,
vae,
vaePrecision,
cfg,
cfgRescale,
steps,
scheduler,
width,
height,
guidance,
t,
defaultSettings,
]);
const tooltip = useMemo(() => {
if (!model) {
return t('modelManager.noModelSelected');
}
if (!hasDefaultSettings) {
return t('modelManager.noDefaultSettings');
}
if (outOfSyncSettings.length === 0) {
return t('modelManager.usingDefaultSettings');
}
return (
<Flex direction="column" gap={2}>
<Text>{t('modelManager.defaultSettingsOutOfSync')}</Text>
<UnorderedList>
{outOfSyncSettings.map((setting) => (
<ListItem key={setting}>{setting}</ListItem>
))}
</UnorderedList>
<Text>{t('modelManager.restoreDefaultSettings')}</Text>
</Flex>
);
}, [model, hasDefaultSettings, outOfSyncSettings, t]);
const handleClickDefaultSettings = useCallback(() => {
dispatch(setDefaultSettings());
@@ -18,12 +163,13 @@ export const UseDefaultSettingsButton = () => {
return (
<IconButton
icon={<PiSparkleFill />}
tooltip={t('modelManager.useDefaultSettings')}
tooltip={tooltip}
aria-label={t('modelManager.useDefaultSettings')}
isDisabled={!model}
isDisabled={!model || !hasDefaultSettings || outOfSyncSettings.length === 0}
onClick={handleClickDefaultSettings}
size="sm"
variant="ghost"
colorScheme="warning"
/>
);
};

View File

@@ -141,6 +141,20 @@ export const isParameterStrength = (val: unknown): val is ParameterStrength =>
zParameterStrength.safeParse(val).success;
// #endregion
// #region SeamlessX
const zParameterSeamlessX = z.boolean();
export type ParameterSeamlessX = z.infer<typeof zParameterSeamlessX>;
export const isParameterSeamlessX = (val: unknown): val is ParameterSeamlessX =>
zParameterSeamlessX.safeParse(val).success;
// #endregion
// #region SeamlessY
const zParameterSeamlessY = z.boolean();
export type ParameterSeamlessY = z.infer<typeof zParameterSeamlessY>;
export const isParameterSeamlessY = (val: unknown): val is ParameterSeamlessY =>
zParameterSeamlessY.safeParse(val).success;
// #endregion
// #region Precision
const zParameterPrecision = z.enum(['fp16', 'fp32']);
export type ParameterPrecision = z.infer<typeof zParameterPrecision>;

View File

@@ -31,10 +31,12 @@ import {
selectSystemShouldConfirmOnDelete,
selectSystemShouldConfirmOnNewSession,
selectSystemShouldEnableInformationalPopovers,
selectSystemShouldEnableModelDescriptions,
selectSystemShouldUseNSFWChecker,
selectSystemShouldUseWatermarker,
setShouldConfirmOnDelete,
setShouldEnableInformationalPopovers,
setShouldEnableModelDescriptions,
shouldAntialiasProgressImageChanged,
shouldConfirmOnNewSessionToggled,
shouldUseNSFWCheckerChanged,
@@ -99,6 +101,7 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
const shouldUseNSFWChecker = useAppSelector(selectSystemShouldUseNSFWChecker);
const shouldUseWatermarker = useAppSelector(selectSystemShouldUseWatermarker);
const shouldEnableInformationalPopovers = useAppSelector(selectSystemShouldEnableInformationalPopovers);
const shouldEnableModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
const shouldConfirmOnNewSession = useAppSelector(selectSystemShouldConfirmOnNewSession);
const onToggleConfirmOnNewSession = useCallback(() => {
dispatch(shouldConfirmOnNewSessionToggled());
@@ -154,6 +157,12 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
},
[dispatch]
);
const handleChangeShouldEnableModelDescriptions = useCallback(
(e: ChangeEvent<HTMLInputElement>) => {
dispatch(setShouldEnableModelDescriptions(e.target.checked));
},
[dispatch]
);
const handleChangeShouldUseCpuNoise = useCallback(
(e: ChangeEvent<HTMLInputElement>) => {
dispatch(shouldUseCpuNoiseChanged(e.target.checked));
@@ -226,6 +235,13 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
onChange={handleChangeShouldEnableInformationalPopovers}
/>
</FormControl>
<FormControl>
<FormLabel>{t('settings.enableModelDescriptions')}</FormLabel>
<Switch
isChecked={shouldEnableModelDescriptions}
onChange={handleChangeShouldEnableModelDescriptions}
/>
</FormControl>
</StickyScrollable>
{Boolean(config?.shouldShowDeveloperSettings) && (

View File

@@ -17,6 +17,7 @@ const initialSystemState: SystemState = {
shouldUseNSFWChecker: false,
shouldUseWatermarker: false,
shouldEnableInformationalPopovers: true,
shouldEnableModelDescriptions: true,
logIsEnabled: true,
logLevel: 'debug',
logNamespaces: [...zLogNamespace.options],
@@ -57,6 +58,9 @@ export const systemSlice = createSlice({
setShouldEnableInformationalPopovers(state, action: PayloadAction<boolean>) {
state.shouldEnableInformationalPopovers = action.payload;
},
setShouldEnableModelDescriptions(state, action: PayloadAction<boolean>) {
state.shouldEnableModelDescriptions = action.payload;
},
shouldConfirmOnNewSessionToggled(state) {
state.shouldConfirmOnNewSession = !state.shouldConfirmOnNewSession;
},
@@ -73,6 +77,7 @@ export const {
shouldUseNSFWCheckerChanged,
shouldUseWatermarkerChanged,
setShouldEnableInformationalPopovers,
setShouldEnableModelDescriptions,
shouldConfirmOnNewSessionToggled,
} = systemSlice.actions;
@@ -108,4 +113,7 @@ export const selectSystemShouldAntialiasProgressImage = createSystemSelector(
export const selectSystemShouldEnableInformationalPopovers = createSystemSelector(
(system) => system.shouldEnableInformationalPopovers
);
export const selectSystemShouldEnableModelDescriptions = createSystemSelector(
(system) => system.shouldEnableModelDescriptions
);
export const selectSystemShouldConfirmOnNewSession = createSystemSelector((system) => system.shouldConfirmOnNewSession);

View File

@@ -37,6 +37,7 @@ export interface SystemState {
shouldUseNSFWChecker: boolean;
shouldUseWatermarker: boolean;
shouldEnableInformationalPopovers: boolean;
shouldEnableModelDescriptions: boolean;
logIsEnabled: boolean;
logLevel: LogLevel;
logNamespaces: LogNamespace[];

View File

@@ -21,14 +21,14 @@ import { useTranslation } from 'react-i18next';
import { PiLightbulbFilamentBold } from 'react-icons/pi';
import { useGetAppVersionQuery } from 'services/api/endpoints/appInfo';
import { CanvasV2Announcement } from './CanvasV2Announcement';
import { WhatsNew } from './WhatsNew';
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
export const Notifications = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const shouldShowNotification = useAppSelector((s) => s.ui.shouldShowNotification);
const shouldShowNotification = useAppSelector((s) => s.ui.shouldShowNotificationV2);
const resetIndicator = useCallback(() => {
dispatch(shouldShowNotificationChanged(false));
}, [dispatch]);
@@ -58,11 +58,16 @@ export const Notifications = () => {
<Flex alignItems="center" gap={3}>
<Image src={InvokeSymbol} boxSize={6} />
{t('whatsNew.whatsNewInInvoke')}
{isLocal && <Text variant="subtext">{`v${data.version}`}</Text>}
{!!data.version.length &&
(isLocal ? (
<Text variant="subtext">{`v${data.version}`}</Text>
) : (
<Text variant="subtext">{data.version}</Text>
))}
</Flex>
</PopoverHeader>
<PopoverBody p={2}>
<CanvasV2Announcement />
<PopoverBody p={2} maxW={300}>
<WhatsNew />
</PopoverBody>
</PopoverContent>
</Popover>

View File

@@ -1,27 +1,34 @@
import { ExternalLink, Flex, ListItem, UnorderedList } from '@invoke-ai/ui-library';
import { ExternalLink, Flex, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { selectConfigSlice } from 'features/system/store/configSlice';
import { useTranslation } from 'react-i18next';
import { Trans, useTranslation } from 'react-i18next';
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
export const CanvasV2Announcement = () => {
export const WhatsNew = () => {
const { t } = useTranslation();
const isLocal = useAppSelector(selectIsLocal);
return (
<Flex gap={4} flexDir="column">
<UnorderedList fontSize="sm">
<ListItem>{t('whatsNew.canvasV2Announcement.newCanvas')}</ListItem>
<ListItem>{t('whatsNew.canvasV2Announcement.newLayerTypes')}</ListItem>
<ListItem>{t('whatsNew.canvasV2Announcement.fluxSupport')}</ListItem>
<ListItem>
<Trans
i18nKey="whatsNew.line1"
components={{
ItalicComponent: <Text as="span" color="white" fontSize="sm" fontStyle="italic" />,
}}
/>
</ListItem>
<ListItem>{t('whatsNew.line2')}</ListItem>
<ListItem>{t('whatsNew.line3')}</ListItem>
</UnorderedList>
<Flex flexDir="column" gap={1}>
<ExternalLink
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.canvasV2Announcement.readReleaseNotes')}
label={t('whatsNew.readReleaseNotes')}
href={
isLocal
? 'https://github.com/invoke-ai/InvokeAI/releases/tag/v5.0.0'
@@ -31,14 +38,8 @@ export const CanvasV2Announcement = () => {
<ExternalLink
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.canvasV2Announcement.watchReleaseVideo')}
href="https://www.youtube.com/watch?v=y80W3PjR0Gc"
/>
<ExternalLink
fontSize="sm"
fontWeight="semibold"
label={t('whatsNew.canvasV2Announcement.watchUiUpdatesOverview')}
href="https://www.youtube.com/watch?v=Tl-69JvwJ2s"
label={t('whatsNew.watchRecentReleaseVideos')}
href="https://www.youtube.com/@invokeai/videos"
/>
</Flex>
</Flex>

View File

@@ -15,7 +15,7 @@ const initialUIState: UIState = {
shouldShowProgressInViewer: true,
accordions: {},
expanders: {},
shouldShowNotification: true,
shouldShowNotificationV2: true,
};
export const uiSlice = createSlice({
@@ -43,7 +43,7 @@ export const uiSlice = createSlice({
state.expanders[id] = isOpen;
},
shouldShowNotificationChanged: (state, action: PayloadAction<boolean>) => {
state.shouldShowNotification = action.payload;
state.shouldShowNotificationV2 = action.payload;
},
},
extraReducers(builder) {

View File

@@ -31,7 +31,7 @@ export interface UIState {
*/
expanders: Record<string, boolean>;
/**
* Whether or not to show the user the open notification.
* Whether or not to show the user the open notification. Bump version to reset users who may have closed previous version.
*/
shouldShowNotification: boolean;
shouldShowNotificationV2: boolean;
}

View File

@@ -1714,6 +1714,12 @@ export type components = {
* @default null
*/
image?: components["schemas"]["ImageField"];
/**
* Invert
* @description Whether to invert the mask.
* @default false
*/
invert?: boolean;
/**
* type
* @default apply_tensor_mask_to_image

View File

@@ -6,6 +6,7 @@ import { stagingAreaImageStaged } from 'features/controlLayers/store/canvasStagi
import { boardIdSelected, galleryViewChanged, imageSelected, offsetChanged } from 'features/gallery/store/gallerySlice';
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
import { zNodeStatus } from 'features/nodes/types/invocation';
import { CANVAS_OUTPUT_PREFIX } from 'features/nodes/util/graph/graphBuilderUtils';
import { boardsApi } from 'services/api/endpoints/boards';
import { getImageDTOSafe, imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO, S } from 'services/api/types';
@@ -15,7 +16,7 @@ import { $lastProgressEvent } from 'services/events/stores';
const log = logger('events');
const isCanvasOutputNode = (data: S['InvocationCompleteEvent']) => {
return data.invocation_source_id.split(':')[0] === 'canvas_output';
return data.invocation_source_id.split(':')[0] === CANVAS_OUTPUT_PREFIX;
};
const nodeTypeDenylist = ['load_image', 'image'];

View File

@@ -1 +1 @@
__version__ = "5.3.0rc1"
__version__ = "5.4.0a1"

View File

@@ -33,12 +33,12 @@ classifiers = [
]
dependencies = [
# Core generation dependencies, pinned for reproducible builds.
"accelerate==0.30.1",
"accelerate==1.0.1",
"bitsandbytes==0.43.3; sys_platform!='darwin'",
"clip_anytorch==2.6.0", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel==2.0.2",
"controlnet-aux==0.0.7",
"diffusers[torch]==0.27.2",
"diffusers[torch]==0.31.0",
"gguf==0.10.0",
"invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids
"mediapipe>=0.10.7", # needed for "mediapipeface" controlnet model
@@ -61,7 +61,7 @@ dependencies = [
# Core application dependencies, pinned for reproducible builds.
"fastapi-events==0.11.1",
"fastapi==0.111.0",
"huggingface-hub==0.23.1",
"huggingface-hub==0.26.1",
"pydantic-settings==2.2.1",
"pydantic==2.7.2",
"python-socketio==5.11.1",