mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-22 12:08:05 -05:00
Compare commits
86 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
731d317a42 | ||
|
|
e81579f752 | ||
|
|
9a10e98c0b | ||
|
|
27fdc139b7 | ||
|
|
0a00805afc | ||
|
|
7b38143fbd | ||
|
|
4c5ad1b7d7 | ||
|
|
d80cc962ad | ||
|
|
7ccabfa200 | ||
|
|
936d59cc52 | ||
|
|
fc16fb6099 | ||
|
|
c848cbc2e3 | ||
|
|
66fd0f0d8a | ||
|
|
c266f39f06 | ||
|
|
98a44fa4d7 | ||
|
|
c1d230f961 | ||
|
|
68108435ae | ||
|
|
e121bf1f62 | ||
|
|
4835c344b3 | ||
|
|
a589dec122 | ||
|
|
bc67d5c841 | ||
|
|
f3d5691c04 | ||
|
|
b98abc2457 | ||
|
|
7e527ccfb7 | ||
|
|
0f0c911845 | ||
|
|
e4818b967b | ||
|
|
ce3eede26f | ||
|
|
d98725c5e9 | ||
|
|
31a96d2945 | ||
|
|
845a321a43 | ||
|
|
87a44a28ef | ||
|
|
d5b9c3ee5a | ||
|
|
91db136cd1 | ||
|
|
f351ad4b66 | ||
|
|
fb6fb9abbd | ||
|
|
675c990486 | ||
|
|
6ee5cde4bb | ||
|
|
c8077f9430 | ||
|
|
6aabe9959e | ||
|
|
0b58d172d2 | ||
|
|
d7c6e293d7 | ||
|
|
c600bc867d | ||
|
|
f4140dd772 | ||
|
|
a2d8261d40 | ||
|
|
bce88a8873 | ||
|
|
b37e1a3ad6 | ||
|
|
35a088e0a6 | ||
|
|
b936cab039 | ||
|
|
34e4093408 | ||
|
|
d7f93c3cc0 | ||
|
|
d4c4926caa | ||
|
|
558c7db055 | ||
|
|
2ece59b51b | ||
|
|
7dbe39957c | ||
|
|
6fa46d35a5 | ||
|
|
b2a2b38ea8 | ||
|
|
12934da390 | ||
|
|
231bc18188 | ||
|
|
530cd180c5 | ||
|
|
2a92e7b920 | ||
|
|
019e057e29 | ||
|
|
9aa26f883e | ||
|
|
3f727e24b1 | ||
|
|
9e90bf1b20 | ||
|
|
db3964797f | ||
|
|
881efbda1b | ||
|
|
e9ce2ed5f2 | ||
|
|
53ac9eafbf | ||
|
|
9e095006a5 | ||
|
|
21b24c3ba6 | ||
|
|
139ecc10ce | ||
|
|
78ea143b46 | ||
|
|
174249ec15 | ||
|
|
2510ad7431 | ||
|
|
ba5e855a60 | ||
|
|
23627cf18d | ||
|
|
5e20c9a1ca | ||
|
|
933cf5f276 | ||
|
|
41316de659 | ||
|
|
041ccfd68e | ||
|
|
ad24c203a4 | ||
|
|
3fd28ce600 | ||
|
|
32df3bdf6e | ||
|
|
ba69e89e8c | ||
|
|
a8e0c48ddc | ||
|
|
66f6571086 |
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@@ -1,5 +1,5 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku @psychedelicious
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
@@ -9,13 +9,13 @@
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @hipsterusername @jazzhaiku
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
/pyproject.toml @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @psychedelicious @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @psychedelicious @hipsterusername
|
||||
/installer/ @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/configs @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
@@ -24,8 +24,8 @@
|
||||
/invokeai/backend @lstein @blessedcoolant @hipsterusername @jazzhaiku @psychedelicious @maryhipp
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/CLI @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
4
.github/workflows/python-checks.yml
vendored
4
.github/workflows/python-checks.yml
vendored
@@ -67,6 +67,10 @@ jobs:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
|
||||
- name: check pypi classifiers
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||
|
||||
@@ -71,7 +71,14 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
|
||||
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||
|
||||
=== "Invoke v5.10.0 and later"
|
||||
=== "Invoke v5.12 and later"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
|
||||
@@ -13,6 +13,7 @@ If you'd prefer, you can also just download the whole node folder from the linke
|
||||
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
- Community Nodes
|
||||
+ [Anamorphic Tools](#anamorphic-tools)
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
@@ -20,9 +21,12 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
+ [Clothing Mask](#clothing-mask)
|
||||
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||
+ [Curves](#curves)
|
||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||
+ [Enhance Detail](#enhance-detail)
|
||||
+ [Film Grain](#film-grain)
|
||||
+ [Flip Pose](#flip-pose)
|
||||
+ [Flux Ideal Size](#flux-ideal-size)
|
||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
@@ -61,6 +65,13 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
- [Help](#help)
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Anamorphic Tools
|
||||
|
||||
**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/anamorphic-tools
|
||||
|
||||
--------------------------------
|
||||
### Adapters Linked Nodes
|
||||
|
||||
@@ -132,6 +143,13 @@ Node Link: https://github.com/VeyDlin/clahe-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Curves
|
||||
|
||||
**Description:** Adjust an image's curve based on a user-defined string.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/curves-node
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
@@ -162,6 +180,20 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||
|
||||
--------------------------------
|
||||
### Flip Pose
|
||||
|
||||
**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flip-pose-node
|
||||
|
||||
--------------------------------
|
||||
### Flux Ideal Size
|
||||
|
||||
**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flux-ideal-size
|
||||
|
||||
--------------------------------
|
||||
### Generative Grammar-Based Prompt Nodes
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ async def list_boards(
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
board_id: str = Path(description="The id of the board or 'none' for uncategorized images"),
|
||||
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import Optional
|
||||
from typing import ClassVar, Optional
|
||||
|
||||
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||
@@ -19,6 +20,8 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
|
||||
@@ -27,6 +30,19 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
class ResizeToDimensions(BaseModel):
|
||||
width: int = Field(..., gt=0)
|
||||
height: int = Field(..., gt=0)
|
||||
|
||||
MAX_SIZE: ClassVar[int] = 4096 * 4096
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_total_output_size(self):
|
||||
if self.width * self.height > self.MAX_SIZE:
|
||||
raise ValueError(f"Max total output size for resizing is {self.MAX_SIZE} pixels")
|
||||
return self
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/upload",
|
||||
operation_id="upload_image",
|
||||
@@ -46,6 +62,11 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
resize_to: Optional[str] = Body(
|
||||
default=None,
|
||||
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
|
||||
example='"[1024,1024]"',
|
||||
),
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||
@@ -59,13 +80,31 @@ async def upload_image(
|
||||
contents = await file.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
if crop_visible:
|
||||
try:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to crop image")
|
||||
|
||||
if resize_to:
|
||||
try:
|
||||
dims = json.loads(resize_to)
|
||||
resize_dims = ResizeToDimensions(**dims)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
|
||||
try:
|
||||
np_image = pil_to_np(pil_image)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to resize image")
|
||||
|
||||
extracted_metadata = extract_metadata_from_image(
|
||||
pil_image=pil_image,
|
||||
invokeai_metadata_override=metadata,
|
||||
@@ -356,6 +395,29 @@ async def delete_images_from_list(
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
@images_router.delete(
|
||||
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
|
||||
)
|
||||
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
|
||||
"""Deletes all images that are uncategorized"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id="none", categories=None, is_intermediate=None
|
||||
)
|
||||
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@@ -643,6 +643,16 @@ def invocation(
|
||||
|
||||
fields["type"] = (invocation_type_annotation, invocation_type_field_info)
|
||||
|
||||
# Invocation outputs must be registered using the @invocation_output decorator, but it is possible that the
|
||||
# output is registered _after_ this invocation is registered. It depends on module import ordering.
|
||||
#
|
||||
# We can only confirm the output for an invocation is registered after all modules are imported. There's
|
||||
# only really one good time to do that - during application startup, in `run_app.py`, after loading all
|
||||
# custom nodes.
|
||||
#
|
||||
# We can still do some basic validation here - ensure the invoke method is defined and returns an instance
|
||||
# of BaseInvocationOutput.
|
||||
|
||||
# Validate the `invoke()` method is implemented
|
||||
if "invoke" in cls.__abstractmethods__:
|
||||
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
import torch
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel import Compel, ReturnedEmbeddingsType, SplitLongTextMode
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
|
||||
@@ -104,6 +104,7 @@ class CompelInvocation(BaseInvocation):
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
@@ -205,6 +206,7 @@ class SDXLPromptInvocationBase:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
|
||||
@@ -22,7 +22,11 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.app.util.controlnet_utils import (
|
||||
CONTROLNET_MODE_VALUES,
|
||||
CONTROLNET_RESIZE_VALUES,
|
||||
heuristic_resize_fast,
|
||||
)
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
|
||||
@@ -109,7 +113,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
@@ -122,7 +126,7 @@ class HeuristicResizeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
np_resized = heuristic_resize_fast(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image, ImageFilter
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
@@ -42,15 +44,13 @@ class GradientMaskOutput(BaseInvocationOutput):
|
||||
title="Create Gradient Mask",
|
||||
tags=["mask", "denoise"],
|
||||
category="latents",
|
||||
version="1.2.1",
|
||||
version="1.3.0",
|
||||
)
|
||||
class CreateGradientMaskInvocation(BaseInvocation):
|
||||
"""Creates mask for denoising model run."""
|
||||
"""Creates mask for denoising."""
|
||||
|
||||
mask: ImageField = InputField(description="Image which will be masked", ui_order=1)
|
||||
edge_radius: int = InputField(
|
||||
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
|
||||
)
|
||||
edge_radius: int = InputField(default=16, ge=0, description="How far to expand the edges of the mask", ui_order=2)
|
||||
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
|
||||
minimum_denoise: float = InputField(
|
||||
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
|
||||
@@ -81,45 +81,110 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
# Resize the mask_image. Makes the filter 64x faster and doesn't hurt quality in latent scale anyway
|
||||
mask_image = mask_image.resize(
|
||||
(
|
||||
mask_image.width // LATENT_SCALE_FACTOR,
|
||||
mask_image.height // LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.BILINEAR,
|
||||
)
|
||||
|
||||
mask_np_orig = np.array(mask_image, dtype=np.float32)
|
||||
|
||||
self.edge_radius = self.edge_radius // LATENT_SCALE_FACTOR # scale the edge radius to match the mask size
|
||||
|
||||
if self.edge_radius > 0:
|
||||
mask_np = 255 - mask_np_orig # invert so 0 is unmasked (higher values = higher denoise strength)
|
||||
dilated_mask = mask_np.copy()
|
||||
|
||||
# Create kernel based on coherence mode
|
||||
if self.coherence_mode == "Box Blur":
|
||||
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
|
||||
else: # Gaussian Blur OR Staged
|
||||
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
|
||||
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
|
||||
# Create a circular distance kernel that fades from center outward
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
center = self.edge_radius
|
||||
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist <= self.edge_radius:
|
||||
kernel[i, j] = 1.0 - (dist / self.edge_radius)
|
||||
else: # Gaussian Blur or Staged
|
||||
# Create a Gaussian kernel
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
kernel = cv2.getGaussianKernel(
|
||||
kernel_size, self.edge_radius / 2.5
|
||||
) # 2.5 is a magic number (standard deviation capturing)
|
||||
kernel = kernel * kernel.T # Make 2D gaussian kernel
|
||||
kernel = kernel / np.max(kernel) # Normalize center to 1.0
|
||||
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
|
||||
# Ensure values outside radius are 0
|
||||
center = self.edge_radius
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist > self.edge_radius:
|
||||
kernel[i, j] = 0
|
||||
|
||||
# redistribute blur so that the original edges are 0 and blur outwards to 1
|
||||
blur_tensor = (blur_tensor - 0.5) * 2
|
||||
blur_tensor[blur_tensor < 0] = 0.0
|
||||
# 2D max filter
|
||||
mask_tensor = torch.tensor(mask_np)
|
||||
kernel_tensor = torch.tensor(kernel)
|
||||
dilated_mask = 255 - self.max_filter2D_torch(mask_tensor, kernel_tensor).cpu()
|
||||
dilated_mask = dilated_mask.numpy()
|
||||
|
||||
threshold = 1 - self.minimum_denoise
|
||||
threshold = (1 - self.minimum_denoise) * 255
|
||||
|
||||
if self.coherence_mode == "Staged":
|
||||
# wherever the blur_tensor is less than fully masked, convert it to threshold
|
||||
blur_tensor = torch.where((blur_tensor < 1) & (blur_tensor > 0), threshold, blur_tensor)
|
||||
else:
|
||||
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
|
||||
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
|
||||
# wherever expanded mask is darker than the original mask but original was above threshhold, set it to the threshold
|
||||
# makes any expansion areas drop to threshhold. Raising minimum across the image happen outside of this if
|
||||
threshold_mask = (dilated_mask < mask_np_orig) & (mask_np_orig > threshold)
|
||||
dilated_mask = np.where(threshold_mask, threshold, mask_np_orig)
|
||||
|
||||
# wherever expanded mask is less than 255 but greater than threshold, drop it to threshold (minimum denoise)
|
||||
threshold_mask = (dilated_mask > threshold) & (dilated_mask < 255)
|
||||
dilated_mask = np.where(threshold_mask, threshold, dilated_mask)
|
||||
|
||||
else:
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
dilated_mask = mask_np_orig.copy()
|
||||
|
||||
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
|
||||
# convert to tensor
|
||||
dilated_mask = np.clip(dilated_mask, 0, 255).astype(np.uint8)
|
||||
mask_tensor = torch.tensor(dilated_mask, device=torch.device("cpu"))
|
||||
|
||||
# compute a [0, 1] mask from the blur_tensor
|
||||
expanded_mask = torch.where((blur_tensor < 1), 0, 1)
|
||||
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
|
||||
# binary mask for compositing
|
||||
expanded_mask = np.where((dilated_mask < 255), 0, 255)
|
||||
expanded_mask_image = Image.fromarray(expanded_mask.astype(np.uint8), mode="L")
|
||||
expanded_mask_image = expanded_mask_image.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
expanded_image_dto = context.images.save(expanded_mask_image)
|
||||
|
||||
# restore the original mask size
|
||||
dilated_mask = Image.fromarray(dilated_mask.astype(np.uint8))
|
||||
dilated_mask = dilated_mask.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
|
||||
# stack the mask as a tensor, repeating 4 times on dimmension 1
|
||||
dilated_mask_tensor = image_resized_to_grid_as_tensor(dilated_mask, normalize=False)
|
||||
mask_name = context.tensors.save(tensor=dilated_mask_tensor.unsqueeze(0))
|
||||
|
||||
masked_latents_name = None
|
||||
if self.unet is not None and self.vae is not None and self.image is not None:
|
||||
# all three fields must be present at the same time
|
||||
main_model_config = context.models.get_config(self.unet.unet.key)
|
||||
assert isinstance(main_model_config, MainConfigBase)
|
||||
if main_model_config.variant is ModelVariantType.Inpaint:
|
||||
mask = blur_tensor
|
||||
mask = mask_tensor
|
||||
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
@@ -137,3 +202,29 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
|
||||
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
|
||||
)
|
||||
|
||||
def max_filter2D_torch(self, image: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
This morphological operation is much faster in torch than numpy or opencv
|
||||
For reasonable kernel sizes, the overhead of copying the data to the GPU is not worth it.
|
||||
"""
|
||||
h, w = kernel.shape
|
||||
pad_h, pad_w = h // 2, w // 2
|
||||
|
||||
padded = torch.nn.functional.pad(image, (pad_w, pad_w, pad_h, pad_h), mode="constant", value=0)
|
||||
result = torch.zeros_like(image)
|
||||
|
||||
# This looks like it's inside out, but it does the same thing and is more efficient
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
weight = kernel[i, j]
|
||||
if weight <= 0:
|
||||
continue
|
||||
|
||||
# Extract the region from padded tensor
|
||||
region = padded[i : i + image.shape[0], j : j + image.shape[1]]
|
||||
|
||||
# Apply weight and update max
|
||||
result = torch.maximum(result, region * weight)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1218,12 +1218,15 @@ class ApplyMaskToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Add Image Noise",
|
||||
tags=["image", "noise"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.0",
|
||||
)
|
||||
class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add noise to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to add noise to")
|
||||
mask: Optional[ImageField] = InputField(
|
||||
default=None, description="Optional mask determining where to apply noise (black=noise, white=no noise)"
|
||||
)
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
@@ -1267,12 +1270,27 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise = Image.fromarray(noise.astype(numpy.uint8), mode="RGB").resize(
|
||||
(image.width, image.height), Image.Resampling.NEAREST
|
||||
)
|
||||
|
||||
# Create a noisy version of the input image
|
||||
noisy_image = Image.blend(image.convert("RGB"), noise, self.amount).convert("RGBA")
|
||||
|
||||
# Paste back the alpha channel
|
||||
noisy_image.putalpha(alpha)
|
||||
# Apply mask if provided
|
||||
if self.mask is not None:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
image_dto = context.images.save(image=noisy_image)
|
||||
if mask_image.size != image.size:
|
||||
mask_image = mask_image.resize(image.size, Image.Resampling.LANCZOS)
|
||||
|
||||
result_image = image.copy()
|
||||
mask_image = ImageOps.invert(mask_image)
|
||||
result_image.paste(noisy_image, (0, 0), mask=mask_image)
|
||||
else:
|
||||
result_image = noisy_image
|
||||
|
||||
# Paste back the alpha channel from the original image
|
||||
result_image.putalpha(alpha)
|
||||
|
||||
image_dto = context.images.save(image=result_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@@ -42,7 +42,9 @@ class IPAdapterMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
clip_vision_model: Literal["ViT-L", "ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
|
||||
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = Field(
|
||||
description="Method to apply IP Weights with"
|
||||
)
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")
|
||||
|
||||
@@ -1,12 +1,3 @@
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
|
||||
def get_app():
|
||||
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
|
||||
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
|
||||
@@ -18,9 +9,18 @@ def get_app():
|
||||
|
||||
def run_app() -> None:
|
||||
"""The main entrypoint for the app."""
|
||||
# Parse the CLI arguments.
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
# Parse the CLI arguments before doing anything else, which ensures CLI args correctly override settings from other
|
||||
# sources like `invokeai.yaml` or env vars.
|
||||
InvokeAIArgs.parse_args()
|
||||
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# Load config.
|
||||
app_config = get_config()
|
||||
|
||||
@@ -32,6 +32,8 @@ def run_app() -> None:
|
||||
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
|
||||
|
||||
# This import must happen after configure_torch_cuda_allocator() is called, because the module imports torch.
|
||||
from invokeai.app.invocations.baseinvocation import InvocationRegistry
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
@@ -66,6 +68,15 @@ def run_app() -> None:
|
||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
|
||||
|
||||
# Check all invocations and ensure their outputs are registered.
|
||||
for invocation in InvocationRegistry.get_invocation_classes():
|
||||
invocation_type = invocation.get_type()
|
||||
output_annotation = invocation.get_output_annotation()
|
||||
if output_annotation not in InvocationRegistry.get_output_classes():
|
||||
logger.warning(
|
||||
f'Invocation "{invocation_type}" has unregistered output class "{output_annotation.__name__}"'
|
||||
)
|
||||
|
||||
if app_config.dev_reload:
|
||||
# load_custom_nodes seems to bypass jurrigged's import sniffer, so be sure to call it *after* they're already
|
||||
# imported.
|
||||
|
||||
@@ -98,9 +98,18 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Handle board_id filter
|
||||
if board_id == "none":
|
||||
stmt += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
else:
|
||||
stmt += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
|
||||
@@ -230,6 +230,86 @@ def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.
|
||||
return resized
|
||||
|
||||
|
||||
# precompute common kernels
|
||||
_KERNEL3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
||||
# directional masks for NMS
|
||||
_DIRS = [
|
||||
np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], np.uint8),
|
||||
np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], np.uint8),
|
||||
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], np.uint8),
|
||||
np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], np.uint8),
|
||||
]
|
||||
|
||||
|
||||
def heuristic_resize_fast(np_img: np.ndarray, size: tuple[int, int]) -> np.ndarray:
|
||||
h, w = np_img.shape[:2]
|
||||
# early exit
|
||||
if (w, h) == size:
|
||||
return np_img
|
||||
|
||||
# separate alpha channel
|
||||
img = np_img
|
||||
alpha = None
|
||||
if img.ndim == 3 and img.shape[2] == 4:
|
||||
alpha, img = img[:, :, 3], img[:, :, :3]
|
||||
|
||||
# build small sample for unique‐color & binary detection
|
||||
flat = img.reshape(-1, img.shape[-1])
|
||||
N = flat.shape[0]
|
||||
# include four corners to avoid missing extreme values
|
||||
corners = np.vstack([img[0, 0], img[0, w - 1], img[h - 1, 0], img[h - 1, w - 1]])
|
||||
cnt = min(N, 100_000)
|
||||
samp = np.vstack([corners, flat[np.random.choice(N, cnt, replace=False)]])
|
||||
uc = np.unique(samp, axis=0).shape[0]
|
||||
vmin, vmax = samp.min(), samp.max()
|
||||
|
||||
# detect binary edge map & one‐pixel‐edge case
|
||||
is_binary = uc == 2 and vmin < 16 and vmax > 240
|
||||
one_pixel_edge = False
|
||||
if is_binary:
|
||||
# single gray conversion
|
||||
gray0 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
grad = cv2.morphologyEx(gray0, cv2.MORPH_GRADIENT, _KERNEL3)
|
||||
cnt_edge = cv2.countNonZero(grad)
|
||||
cnt_all = cv2.countNonZero((gray0 > 127).astype(np.uint8))
|
||||
one_pixel_edge = (2 * cnt_edge) > cnt_all
|
||||
|
||||
# choose interp for color/seg/grayscale
|
||||
area_new, area_old = size[0] * size[1], w * h
|
||||
if 2 < uc < 200: # segmentation map
|
||||
interp = cv2.INTER_NEAREST
|
||||
elif area_new < area_old:
|
||||
interp = cv2.INTER_AREA
|
||||
else:
|
||||
interp = cv2.INTER_CUBIC
|
||||
|
||||
# single resize pass on RGB
|
||||
resized = cv2.resize(img, size, interpolation=interp)
|
||||
|
||||
if is_binary:
|
||||
# convert to gray & apply NMS via C++ dilate
|
||||
gray_r = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
||||
nms = np.zeros_like(gray_r)
|
||||
for K in _DIRS:
|
||||
d = cv2.dilate(gray_r, K)
|
||||
mask = d == gray_r
|
||||
nms[mask] = gray_r[mask]
|
||||
|
||||
# threshold + thinning if needed
|
||||
_, bw = cv2.threshold(nms, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
out_bin = cv2.ximgproc.thinning(bw) if one_pixel_edge else bw
|
||||
# restore 3 channels
|
||||
resized = np.stack([out_bin] * 3, axis=2)
|
||||
|
||||
# restore alpha with same interp as RGB for consistency
|
||||
if alpha is not None:
|
||||
am = cv2.resize(alpha, size, interpolation=interp)
|
||||
am = (am > 127).astype(np.uint8) * 255
|
||||
resized = np.dstack((resized, am))
|
||||
|
||||
return resized
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
@@ -244,7 +324,7 @@ def np_img_resize(
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = heuristic_resize(np_img, (w, h))
|
||||
np_img = heuristic_resize_fast(np_img, (w, h))
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
@@ -265,7 +345,7 @@ def np_img_resize(
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
@@ -275,7 +355,7 @@ def np_img_resize(
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
|
||||
@@ -12,6 +12,9 @@ from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFie
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None:
|
||||
|
||||
@@ -62,11 +62,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
# If this too fails, raise exception.
|
||||
|
||||
model_info = None
|
||||
|
||||
# Handling for our special syntax - we only want the base HF `org/repo` here.
|
||||
repo_id = id.split("::")[0] or id
|
||||
while not model_info:
|
||||
try:
|
||||
model_info = HfApi().model_info(repo_id=id, files_metadata=True, revision=variant)
|
||||
model_info = HfApi().model_info(repo_id=repo_id, files_metadata=True, revision=variant)
|
||||
except RepositoryNotFoundError as excp:
|
||||
raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp
|
||||
raise UnknownMetadataException(f"'{repo_id}' not found. See trace for details.") from excp
|
||||
except RevisionNotFoundError:
|
||||
if variant is None:
|
||||
raise
|
||||
@@ -75,14 +78,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
|
||||
files: list[RemoteModelFile] = []
|
||||
|
||||
_, name = id.split("/")
|
||||
_, name = repo_id.split("/")
|
||||
|
||||
for s in model_info.siblings or []:
|
||||
assert s.rfilename is not None
|
||||
assert s.size is not None
|
||||
files.append(
|
||||
RemoteModelFile(
|
||||
url=hf_hub_url(id, s.rfilename, revision=variant or "main"),
|
||||
url=hf_hub_url(repo_id, s.rfilename, revision=variant or "main"),
|
||||
path=Path(name, s.rfilename),
|
||||
size=s.size,
|
||||
sha256=s.lfs.get("sha256") if s.lfs else None,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
from typing import Dict, Iterable, Optional, Tuple
|
||||
|
||||
@@ -7,6 +8,7 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
@@ -23,6 +25,7 @@ class LayerPatcher:
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
force_direct_patching: bool = False,
|
||||
force_sidecar_patching: bool = False,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply 'smart' model patching that chooses whether to use direct patching or a sidecar wrapper for each
|
||||
module.
|
||||
@@ -44,6 +47,7 @@ class LayerPatcher:
|
||||
dtype=dtype,
|
||||
force_direct_patching=force_direct_patching,
|
||||
force_sidecar_patching=force_sidecar_patching,
|
||||
suppress_warning_layers=suppress_warning_layers,
|
||||
)
|
||||
|
||||
yield
|
||||
@@ -70,6 +74,7 @@ class LayerPatcher:
|
||||
dtype: torch.dtype,
|
||||
force_direct_patching: bool,
|
||||
force_sidecar_patching: bool,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply a single LoRA patch to a model using the 'smart' patching strategy that chooses whether to use direct
|
||||
patching or a sidecar wrapper for each module.
|
||||
@@ -89,9 +94,17 @@ class LayerPatcher:
|
||||
if not layer_key.startswith(prefix):
|
||||
continue
|
||||
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
try:
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
except AttributeError:
|
||||
if suppress_warning_layers and suppress_warning_layers.search(layer_key):
|
||||
pass
|
||||
else:
|
||||
logger = InvokeAILogger.get_logger(LayerPatcher.__name__)
|
||||
logger.warning("Failed to find module for LoRA layer key: %s", layer_key)
|
||||
continue
|
||||
|
||||
# Decide whether to use direct patching or a sidecar patch.
|
||||
# Direct patching is preferred, because it results in better runtime speed.
|
||||
|
||||
@@ -30,18 +30,13 @@ class RectifiedFlowInpaintExtension:
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region.
|
||||
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
mask = torch.where(self._inpaint_mask >= t_prev + eps, 1.0, 0.0).to(
|
||||
dtype=self._inpaint_mask.dtype, device=self._inpaint_mask.device
|
||||
)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ const config: KnipConfig = {
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
// This feature is (temprarily?) disabled
|
||||
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -24,15 +24,18 @@
|
||||
"autoAddBoard": "Auto-Add Board",
|
||||
"boards": "Boards",
|
||||
"selectedForAutoAdd": "Selected for Auto-Add",
|
||||
"bottomMessage": "Deleting this board and its images will reset any features currently using them.",
|
||||
"bottomMessage": "Deleting images will reset any features currently using them.",
|
||||
"cancel": "Cancel",
|
||||
"changeBoard": "Change Board",
|
||||
"clearSearch": "Clear Search",
|
||||
"deleteBoard": "Delete Board",
|
||||
"deleteBoardAndImages": "Delete Board and Images",
|
||||
"deleteBoardOnly": "Delete Board Only",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"uncategorizedImages": "Uncategorized Images",
|
||||
"deleteAllUncategorizedImages": "Delete All Uncategorized Images",
|
||||
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
|
||||
"hideBoards": "Hide Boards",
|
||||
"loading": "Loading...",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
@@ -46,7 +49,7 @@
|
||||
"searchBoard": "Search Boards...",
|
||||
"selectBoard": "Select a Board",
|
||||
"shared": "Shared Boards",
|
||||
"topMessage": "This board contains images used in the following features:",
|
||||
"topMessage": "This selection contains images used in the following features:",
|
||||
"unarchiveBoard": "Unarchive Board",
|
||||
"uncategorized": "Uncategorized",
|
||||
"viewBoards": "View Boards",
|
||||
@@ -1907,11 +1910,13 @@
|
||||
"addPositivePrompt": "Add $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "Add $t(controlLayers.negativePrompt)",
|
||||
"addReferenceImage": "Add $t(controlLayers.referenceImage)",
|
||||
"addImageNoise": "Add $t(controlLayers.imageNoise)",
|
||||
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
|
||||
"addControlLayer": "Add $t(controlLayers.controlLayer)",
|
||||
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
|
||||
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
|
||||
"addGlobalReferenceImage": "Add $t(controlLayers.globalReferenceImage)",
|
||||
"addDenoiseLimit": "Add $t(controlLayers.denoiseLimit)",
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
@@ -2009,8 +2014,10 @@
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
|
||||
"imageNoise": "Image Noise",
|
||||
"denoiseLimit": "Denoise Limit",
|
||||
"warnings": {
|
||||
"problemsFound": "Problems found",
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
@@ -2419,9 +2426,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke uses PyTorch 2.7.0, which is required for these GPUs.",
|
||||
"Model Relationships: Link LoRAs to main models, and the LoRAs will show up first in the list.",
|
||||
"IP Adapter: New Style (Strong) and Style (Precise) methods for SDXL and SD1.5 models."
|
||||
"Inpainting: Per-mask noise levels and denoise limits.",
|
||||
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -883,7 +883,8 @@
|
||||
"problemUnpublishingWorkflow": "Problema durante l'annullamento della pubblicazione del flusso di lavoro",
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1085,11 +1086,11 @@
|
||||
"menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
|
||||
"cancel": "Annulla",
|
||||
"addBoard": "Aggiungi Bacheca",
|
||||
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
|
||||
"bottomMessage": "L'eliminazione delle immagini reimposterà tutte le funzionalità che le stanno utilizzando.",
|
||||
"changeBoard": "Cambia Bacheca",
|
||||
"loading": "Caricamento in corso ...",
|
||||
"clearSearch": "Cancella Ricerca",
|
||||
"topMessage": "Questa bacheca contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"topMessage": "Questa selezione contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"move": "Sposta",
|
||||
"myBoard": "Bacheca",
|
||||
"searchBoard": "Cerca bacheche ...",
|
||||
@@ -1100,7 +1101,7 @@
|
||||
"deleteBoardOnly": "solo la Bacheca",
|
||||
"deleteBoard": "Elimina Bacheca",
|
||||
"deleteBoardAndImages": "Bacheca e Immagini",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate nella bacheca \"Non categorizzato\".",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate in uno stato non categorizzato.",
|
||||
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
|
||||
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
|
||||
"movingImagesToBoard_other": "Spostare {{count}} immagini nella bacheca:",
|
||||
@@ -1122,8 +1123,11 @@
|
||||
"noBoards": "Nessuna bacheca {{boardType}}",
|
||||
"hideBoards": "Nascondi bacheche",
|
||||
"viewBoards": "Visualizza bacheche",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche cancellate non possono essere ripristinate. Selezionando 'Cancella solo bacheca', le immagini verranno spostate nella bacheca \"Non categorizzato\" privata dell'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca"
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\", le immagini verranno spostate in uno stato privato e non categorizzato per l'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
|
||||
"uncategorizedImages": "Immagini non categorizzate",
|
||||
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
|
||||
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@@ -2295,7 +2299,7 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
@@ -2344,7 +2348,11 @@
|
||||
"lowest": "Il più basso",
|
||||
"medium": "Medio",
|
||||
"highest": "La più alta"
|
||||
}
|
||||
},
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2445,7 +2453,8 @@
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"GPU Nvidia 50xx: Invoke utilizza PyTorch 2.7.0, necessario per queste GPU.",
|
||||
"Relazioni tra modelli: collega i LoRA ai modelli principali e i LoRA verranno visualizzati per primi nell'elenco."
|
||||
"Relazioni tra modelli: collega i LoRA ai modelli principali e i LoRA verranno visualizzati per primi nell'elenco.",
|
||||
"Adattatore IP: nuovi metodi Style (Strong) e Style (Precise) per i modelli SDXL e SD1.5."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -652,7 +652,9 @@
|
||||
"filterModels": "フィルターモデル",
|
||||
"modelPickerFallbackNoModelsInstalled": "モデルがインストールされていません.",
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット"
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -872,7 +874,8 @@
|
||||
"problemDeletingWorkflow": "ワークフローが削除された問題",
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -1246,7 +1249,8 @@
|
||||
"paramRatio": {
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
"生成された画像の縦横比。",
|
||||
"SD1.5 モデルの場合は 512x512 に相当する画像サイズ (ピクセル数) が推奨され, SDXL モデルの場合は 1024x1024 に相当するサイズが推奨されます."
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
@@ -1321,7 +1325,11 @@
|
||||
"heading": "高さ"
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"各生成で実行されるステップの数.",
|
||||
"通常, ステップ数が多いほど, より高品質な画像が作成されますが生成時間も長くなります."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "モード",
|
||||
@@ -1330,7 +1338,11 @@
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
"heading": "シード"
|
||||
"heading": "シード",
|
||||
"paragraphs": [
|
||||
"生成に使用する始動ノイズを制御します.",
|
||||
"同じ生成設定で同一の結果を生成するには, 「ランダム」オプションを無効にします."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "生成回数"
|
||||
@@ -1345,10 +1357,16 @@
|
||||
"heading": "幅"
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA"
|
||||
"heading": "LoRA",
|
||||
"paragraphs": [
|
||||
"ベースモデルと組み合わせて使用する軽量モデル."
|
||||
]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "重み"
|
||||
"heading": "重み",
|
||||
"paragraphs": [
|
||||
"LoRA の重み. 重みを大きくすると, 最終的な画像への影響が大きくなります."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale"
|
||||
@@ -1437,7 +1455,8 @@
|
||||
"heading": "ダイナミックプロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトは,単一のプロンプトを複数のプロンプトに解析します.",
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます."
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます.",
|
||||
"1 つのプロンプト内で構文を何度でも使用できますが, 生成されるプロンプトの数を Max Prompts 設定で制限するようにしてください."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1457,6 +1476,41 @@
|
||||
"paragraphs": [
|
||||
"プロンプトまたは コントロールネットのいずれかを重視します."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"paragraphs": [
|
||||
"CPU または GPU でノイズを生成するかどうかを制御します.",
|
||||
"CPU ノイズを有効にすると, 特定のシードによってどのマシンでも同じ画像が生成されます.",
|
||||
"CPU ノイズを有効にしてもパフォーマンスに影響はありません."
|
||||
],
|
||||
"heading": "CPUノイズを使用する"
|
||||
},
|
||||
"dynamicPromptsMaxPrompts": {
|
||||
"heading": "最大プロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトによって生成できるプロンプトの数を制限します."
|
||||
]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
"paragraphs": [
|
||||
"プロンプトを生成するときにシードがどのように使用されるかを制御します.",
|
||||
"反復ごとに固有のシードを使用します. 単一のシードでプロンプトのバリエーションを試す場合に使用します.",
|
||||
"たとえば, プロンプトが 5 つある場合, 各画像は同じシードを使用します.",
|
||||
"「画像ごと」では, 画像ごとに固有のシード値が使用されます. これにより、より多くのバリエーションが得られます."
|
||||
],
|
||||
"heading": "シード行動"
|
||||
},
|
||||
"imageFit": {
|
||||
"paragraphs": [
|
||||
"初期画像の幅と高さを出力画像に合わせてサイズ変更します. 有効にすることをお勧めします."
|
||||
],
|
||||
"heading": "初期画像を出力サイズに合わせる"
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "充填方法",
|
||||
"paragraphs": [
|
||||
"アウトペインティングまたはインペインティングのプロセス中に埋め込む方法."
|
||||
]
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"boards": "Bảng",
|
||||
"selectedForAutoAdd": "Đã Chọn Để Tự động thêm",
|
||||
"myBoard": "Bảng Của Tôi",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"changeBoard": "Thay Đổi Bảng",
|
||||
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
|
||||
"updateBoardError": "Lỗi khi cập nhật Bảng",
|
||||
@@ -41,18 +41,21 @@
|
||||
"deleteBoard": "Xoá Bảng",
|
||||
"deleteBoardAndImages": "Xoá Bảng Lẫn Hình ảnh",
|
||||
"deleteBoardOnly": "Chỉ Xoá Bảng",
|
||||
"deletedBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Xoá bảng này lẫn ảnh của nó sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"deletedBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Việc xóa ảnh sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"menuItemAutoAdd": "Tự động thêm cho Bảng này",
|
||||
"move": "Di Chuyển",
|
||||
"topMessage": "Bảng này chứa ảnh được dùng với những tính năng sau:",
|
||||
"topMessage": "Lựa chọn này chứa ảnh được dùng với những tính năng sau:",
|
||||
"uncategorized": "Chưa Sắp Xếp",
|
||||
"archived": "Được Lưu Trữ",
|
||||
"loading": "Đang Tải...",
|
||||
"selectBoard": "Chọn Bảng",
|
||||
"archiveBoard": "Lưu trữ Bảng",
|
||||
"unarchiveBoard": "Ngừng Lưu Trữ Bảng",
|
||||
"assetsWithCount_other": "{{count}} tài nguyên"
|
||||
"assetsWithCount_other": "{{count}} tài nguyên",
|
||||
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
|
||||
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
|
||||
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
|
||||
},
|
||||
"gallery": {
|
||||
"swapImages": "Đổi Hình Ảnh",
|
||||
@@ -2059,7 +2062,7 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
@@ -2108,7 +2111,11 @@
|
||||
"imageInfluence": "Ảnh Chi Phối",
|
||||
"medium": "Vừa",
|
||||
"highest": "Cao Nhất"
|
||||
}
|
||||
},
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2249,7 +2256,8 @@
|
||||
"problemUnpublishingWorkflowDescription": "Có vấn đề khi ngừng đăng tải workflow. Vui lòng thử lại sau.",
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2432,7 +2440,8 @@
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách.",
|
||||
"IP Adapter: Thủ thuật Phong Cách (Mạnh Mẽ) và Phong Cách (Chính Xác) mới cho model SDXL và SD1.5."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import { selectUiSlice, textAreaSizesStateChanged } from 'features/ui/store/uiSlice';
|
||||
import { debounce } from 'lodash-es';
|
||||
import { type RefObject, useCallback, useEffect, useMemo } from 'react';
|
||||
|
||||
type Options = {
|
||||
trackWidth: boolean;
|
||||
trackHeight: boolean;
|
||||
initialWidth?: number;
|
||||
initialHeight?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Persists the width and/or height of a text area to redux.
|
||||
* @param id The unique id of this textarea, used as key to storage
|
||||
* @param ref A ref to the textarea element
|
||||
* @param options.trackWidth Whether to track width
|
||||
* @param options.trackHeight Whether to track width
|
||||
* @param options.initialWidth An optional initial width in pixels
|
||||
* @param options.initialHeight An optional initial height in pixels
|
||||
*/
|
||||
export const usePersistedTextAreaSize = (id: string, ref: RefObject<HTMLTextAreaElement>, options: Options) => {
|
||||
const { dispatch, getState } = useAppStore();
|
||||
|
||||
const onResize = useCallback(
|
||||
(size: Partial<Dimensions>) => {
|
||||
dispatch(textAreaSizesStateChanged({ id, size }));
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const debouncedOnResize = useMemo(() => debounce(onResize, 300), [onResize]);
|
||||
|
||||
useEffect(() => {
|
||||
const el = ref.current;
|
||||
if (!el) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Nothing to do here if we are not tracking anything.
|
||||
if (!options.trackHeight && !options.trackWidth) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Before registering the observer, grab the stored size from state - we may need to restore the size.
|
||||
const storedSize = selectUiSlice(getState()).textAreaSizes[id];
|
||||
|
||||
// Prefer to restore the stored size, falling back to initial size if it exists
|
||||
if (storedSize?.width !== undefined) {
|
||||
el.style.width = `${storedSize.width}px`;
|
||||
} else if (options.initialWidth !== undefined) {
|
||||
el.style.width = `${options.initialWidth}px`;
|
||||
}
|
||||
|
||||
if (storedSize?.height !== undefined) {
|
||||
el.style.height = `${storedSize.height}px`;
|
||||
} else if (options.initialHeight !== undefined) {
|
||||
el.style.height = `${options.initialHeight}px`;
|
||||
}
|
||||
|
||||
let currentHeight = el.offsetHeight;
|
||||
let currentWidth = el.offsetWidth;
|
||||
|
||||
const resizeObserver = new ResizeObserver(() => {
|
||||
// We only want to push the changes if a tracked dimension changes
|
||||
let didChange = false;
|
||||
const newSize: Partial<Dimensions> = {};
|
||||
|
||||
if (options.trackHeight) {
|
||||
if (el.offsetHeight !== currentHeight) {
|
||||
didChange = true;
|
||||
currentHeight = el.offsetHeight;
|
||||
}
|
||||
newSize.height = currentHeight;
|
||||
}
|
||||
|
||||
if (options.trackWidth) {
|
||||
if (el.offsetWidth !== currentWidth) {
|
||||
didChange = true;
|
||||
currentWidth = el.offsetWidth;
|
||||
}
|
||||
newSize.width = currentWidth;
|
||||
}
|
||||
|
||||
if (didChange) {
|
||||
debouncedOnResize(newSize);
|
||||
}
|
||||
});
|
||||
|
||||
resizeObserver.observe(el);
|
||||
|
||||
return () => {
|
||||
debouncedOnResize.cancel();
|
||||
resizeObserver.disconnect();
|
||||
};
|
||||
}, [
|
||||
debouncedOnResize,
|
||||
dispatch,
|
||||
getState,
|
||||
id,
|
||||
options.initialHeight,
|
||||
options.initialWidth,
|
||||
options.trackHeight,
|
||||
options.trackWidth,
|
||||
ref,
|
||||
]);
|
||||
};
|
||||
@@ -2,10 +2,11 @@ import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoLayer } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { replaceCanvasEntityObjectsWithImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@@ -23,27 +24,27 @@ export const ControlLayerSettingsEmptyState = memo(() => {
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const pullBboxIntoLayer = usePullBboxIntoLayer(entityIdentifier);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
PullBboxButton: (
|
||||
<Button onClick={pullBboxIntoLayer} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, pullBboxIntoLayer, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans
|
||||
i18nKey="controlLayers.controlLayerEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
<Trans i18nKey="controlLayers.controlLayerEmptyState" components={components} />
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
</Flex>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { InpaintMaskSettings } from 'features/controlLayers/components/InpaintMask/InpaintMaskSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { InpaintMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
@@ -28,6 +29,7 @@ export const InpaintMask = memo(({ id }: Props) => {
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<InpaintMaskSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</InpaintMaskAdapterGate>
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
// import { Button, Flex } from '@invoke-ai/ui-library';
|
||||
// import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
// import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
// import { useTranslation } from 'react-i18next';
|
||||
// import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
// Removed buttons because denosie limit is not helpful for many architectures
|
||||
// Users can access with right click menu instead.
|
||||
// If buttons for noise or new features are deemed important in the future, add them back here.
|
||||
export const InpaintMaskAddButtons = () => {
|
||||
// Buttons are temporarily hidden. To restore, uncomment the code below.
|
||||
return null;
|
||||
// const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
// const { t } = useTranslation();
|
||||
// const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
|
||||
// const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
|
||||
// return (
|
||||
// <Flex w="full" p={2} justifyContent="center">
|
||||
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskDenoiseLimit}>
|
||||
// {t('controlLayers.denoiseLimit')}
|
||||
// </Button>
|
||||
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskNoise}>
|
||||
// {t('controlLayers.imageNoise')}
|
||||
// </Button>
|
||||
// </Flex>
|
||||
// );
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
import type { IconButtonProps } from '@invoke-ai/ui-library';
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
|
||||
type Props = Omit<IconButtonProps, 'aria-label'> & {
|
||||
onDelete: () => void;
|
||||
};
|
||||
|
||||
export const InpaintMaskDeleteModifierButton = memo(({ onDelete, ...rest }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<IconButton
|
||||
tooltip={t('common.delete')}
|
||||
variant="link"
|
||||
aria-label={t('common.delete')}
|
||||
icon={<PiXBold />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskDeleteModifierButton.displayName = 'InpaintMaskDeleteNoiseButton';
|
||||
@@ -0,0 +1,70 @@
|
||||
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import {
|
||||
inpaintMaskDenoiseLimitChanged,
|
||||
inpaintMaskDenoiseLimitDeleted,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskDenoiseLimitSlider = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selectDenoiseLimit = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskDenoiseLimitSlider').denoiseLimit
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const denoiseLimit = useAppSelector(selectDenoiseLimit);
|
||||
|
||||
const handleDenoiseLimitChange = useCallback(
|
||||
(value: number) => {
|
||||
dispatch(inpaintMaskDenoiseLimitChanged({ entityIdentifier, denoiseLimit: value }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
const onDeleteDenoiseLimit = useCallback(() => {
|
||||
dispatch(inpaintMaskDenoiseLimitDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (denoiseLimit === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Text fontSize="sm">{t('controlLayers.denoiseLimit')}</Text>
|
||||
<Flex alignItems="center" gap={1}>
|
||||
<Text fontSize="sm">{denoiseLimit.toFixed(2)}</Text>
|
||||
<InpaintMaskDeleteModifierButton onDelete={onDeleteDenoiseLimit} />
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Slider
|
||||
aria-label={t('controlLayers.denoiseLimit')}
|
||||
value={denoiseLimit}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
onChange={handleDenoiseLimitChange}
|
||||
>
|
||||
<SliderTrack>
|
||||
<SliderFilledTrack />
|
||||
</SliderTrack>
|
||||
<SliderThumb />
|
||||
</Slider>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskDenoiseLimitSlider.displayName = 'InpaintMaskDenoiseLimitSlider';
|
||||
@@ -7,6 +7,7 @@ import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/component
|
||||
import { CanvasEntityMenuItemsMergeDown } from 'features/controlLayers/components/common/CanvasEntityMenuItemsMergeDown';
|
||||
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { InpaintMaskMenuItemsAddModifiers } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsAddModifiers';
|
||||
import { InpaintMaskMenuItemsConvertToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsConvertToSubMenu';
|
||||
import { InpaintMaskMenuItemsCopyToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsCopyToSubMenu';
|
||||
import { memo } from 'react';
|
||||
@@ -20,6 +21,8 @@ export const InpaintMaskMenuItems = memo(() => {
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<InpaintMaskMenuItemsAddModifiers />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsMergeDown />
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskMenuItemsAddModifiers = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
|
||||
const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem onClick={addInpaintMaskNoise} isDisabled={isBusy}>
|
||||
{t('controlLayers.addImageNoise')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={addInpaintMaskDenoiseLimit} isDisabled={isBusy}>
|
||||
{t('controlLayers.addDenoiseLimit')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskMenuItemsAddModifiers.displayName = 'InpaintMaskMenuItemsAddNoise';
|
||||
@@ -0,0 +1,67 @@
|
||||
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { inpaintMaskNoiseChanged, inpaintMaskNoiseDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskNoiseSlider = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selectNoiseLevel = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskNoiseSlider').noiseLevel
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const noiseLevel = useAppSelector(selectNoiseLevel);
|
||||
|
||||
const handleNoiseChange = useCallback(
|
||||
(value: number) => {
|
||||
dispatch(inpaintMaskNoiseChanged({ entityIdentifier, noiseLevel: value }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
const onDeleteNoise = useCallback(() => {
|
||||
dispatch(inpaintMaskNoiseDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (noiseLevel === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Text fontSize="sm">{t('controlLayers.imageNoise')}</Text>
|
||||
<Flex alignItems="center" gap={1}>
|
||||
<Text fontSize="sm">{Math.round(noiseLevel * 100)}%</Text>
|
||||
<InpaintMaskDeleteModifierButton onDelete={onDeleteNoise} />
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Slider
|
||||
aria-label={t('controlLayers.imageNoise')}
|
||||
value={noiseLevel}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
onChange={handleNoiseChange}
|
||||
>
|
||||
<SliderTrack>
|
||||
<SliderFilledTrack />
|
||||
</SliderTrack>
|
||||
<SliderThumb />
|
||||
</Slider>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskNoiseSlider.displayName = 'InpaintMaskNoiseSlider';
|
||||
@@ -0,0 +1,47 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { InpaintMaskDenoiseLimitSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskDenoiseLimitSlider';
|
||||
import { InpaintMaskNoiseSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskNoiseSlider';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
const buildSelectHasDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
|
||||
return entity.denoiseLimit !== undefined;
|
||||
});
|
||||
|
||||
const buildSelectHasNoiseLevel = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
|
||||
return entity.noiseLevel !== undefined;
|
||||
});
|
||||
|
||||
export const InpaintMaskSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const selectHasDenoiseLimit = useMemo(() => buildSelectHasDenoiseLimit(entityIdentifier), [entityIdentifier]);
|
||||
const selectHasNoiseLevel = useMemo(() => buildSelectHasNoiseLevel(entityIdentifier), [entityIdentifier]);
|
||||
|
||||
const hasDenoiseLimit = useAppSelector(selectHasDenoiseLimit);
|
||||
const hasNoiseLevel = useAppSelector(selectHasNoiseLevel);
|
||||
|
||||
if (!hasNoiseLevel && !hasDenoiseLimit) {
|
||||
// If we show the <InpaintMaskAddButtons /> below, we can remove this check.
|
||||
// Until then, if there are no sliders to show for the mask settings, return null. This prevents rendering an
|
||||
// empty settings wrapper div, which adds unnecessary space in the UI.
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<CanvasEntitySettingsWrapper>
|
||||
{/* {!hasNoiseLevel && !hasDenoiseLimit && <InpaintMaskAddButtons />} */}
|
||||
{hasNoiseLevel && <InpaintMaskNoiseSlider />}
|
||||
{hasDenoiseLimit && <InpaintMaskDenoiseLimitSlider />}
|
||||
</CanvasEntitySettingsWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskSettings.displayName = 'InpaintMaskSettings';
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
$shift,
|
||||
CompositeSlider,
|
||||
@@ -16,7 +17,6 @@ import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { snapToNearest } from 'features/controlLayers/konva/util';
|
||||
import { round } from 'lodash-es';
|
||||
import { computed } from 'nanostores';
|
||||
import type { KeyboardEvent } from 'react';
|
||||
import { memo, useCallback, useEffect, useState } from 'react';
|
||||
import { PiCaretDownBold, PiMagnifyingGlassMinusBold, PiMagnifyingGlassPlusBold } from 'react-icons/pi';
|
||||
@@ -68,9 +68,16 @@ const sliderDefaultValue = mapRawValueToSliderValue(100);
|
||||
|
||||
const snapCandidates = marks.slice(1, marks.length - 1);
|
||||
|
||||
const inputFieldSx = {
|
||||
paddingInlineEnd: 7,
|
||||
_focusVisible: {
|
||||
zIndex: 0,
|
||||
},
|
||||
} satisfies SystemStyleObject;
|
||||
|
||||
export const CanvasToolbarScale = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const [localScale, setLocalScale] = useState(scale * 100);
|
||||
|
||||
const onChangeSlider = useCallback(
|
||||
@@ -115,7 +122,7 @@ export const CanvasToolbarScale = memo(() => {
|
||||
return (
|
||||
<Flex alignItems="center">
|
||||
<ZoomOutButton />
|
||||
<Popover>
|
||||
<Popover isLazy lazyBehavior="unmount">
|
||||
<PopoverAnchor>
|
||||
<NumberInput
|
||||
variant="outline"
|
||||
@@ -132,7 +139,7 @@ export const CanvasToolbarScale = memo(() => {
|
||||
onKeyDown={onKeyDown}
|
||||
clampValueOnBlur={false}
|
||||
>
|
||||
<NumberInputField paddingInlineEnd={7} title="" _focusVisible={{ zIndex: 0 }} />
|
||||
<NumberInputField title="" sx={inputFieldSx} />
|
||||
<PopoverTrigger>
|
||||
<IconButton
|
||||
aria-label="open-slider"
|
||||
@@ -171,16 +178,17 @@ CanvasToolbarScale.displayName = 'CanvasToolbarScale';
|
||||
|
||||
const SCALE_SNAPS = [0.1, 0.15, 0.2, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 5, 7.5, 10, 15, 20];
|
||||
|
||||
const ZoomOutButton = () => {
|
||||
const ZoomOutButton = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const onClick = useCallback(() => {
|
||||
const scale = canvasManager.stage.$scale.get();
|
||||
const nextScale =
|
||||
SCALE_SNAPS.slice()
|
||||
.reverse()
|
||||
.find((snap) => snap < scale) ?? canvasManager.stage.config.MIN_SCALE;
|
||||
canvasManager.stage.setScale(Math.max(nextScale, canvasManager.stage.config.MIN_SCALE));
|
||||
}, [canvasManager.stage, scale]);
|
||||
}, [canvasManager.stage]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -192,15 +200,17 @@ const ZoomOutButton = () => {
|
||||
isDisabled={scale <= canvasManager.stage.config.MIN_SCALE}
|
||||
/>
|
||||
);
|
||||
};
|
||||
});
|
||||
ZoomOutButton.displayName = 'ZoomOutButton';
|
||||
|
||||
const ZoomInButton = () => {
|
||||
const ZoomInButton = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const onClick = useCallback(() => {
|
||||
const scale = canvasManager.stage.$scale.get();
|
||||
const nextScale = SCALE_SNAPS.find((snap) => snap > scale) ?? canvasManager.stage.config.MAX_SCALE;
|
||||
canvasManager.stage.setScale(Math.min(nextScale, canvasManager.stage.config.MAX_SCALE));
|
||||
}, [canvasManager.stage, scale]);
|
||||
}, [canvasManager.stage]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -212,4 +222,5 @@ const ZoomInButton = () => {
|
||||
isDisabled={scale >= canvasManager.stage.config.MAX_SCALE}
|
||||
/>
|
||||
);
|
||||
};
|
||||
});
|
||||
ZoomInButton.displayName = 'ZoomInButton';
|
||||
|
||||
@@ -6,6 +6,8 @@ import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskDenoiseLimitAdded,
|
||||
inpaintMaskNoiseAdded,
|
||||
rasterLayerAdded,
|
||||
referenceImageAdded,
|
||||
rgAdded,
|
||||
@@ -222,6 +224,24 @@ export const useAddRegionalGuidanceNegativePrompt = (entityIdentifier: CanvasEnt
|
||||
return runc;
|
||||
};
|
||||
|
||||
export const useAddInpaintMaskNoise = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskNoiseAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddInpaintMaskDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskDenoiseLimitAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const buildSelectValidRegionalGuidanceActions = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>
|
||||
) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { withResult, withResultAsync } from 'common/util/result';
|
||||
import { CanvasCacheModule } from 'features/controlLayers/konva/CanvasCacheModule';
|
||||
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
|
||||
import type { CanvasEntityAdapter, CanvasEntityAdapterFromType } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
@@ -426,6 +427,145 @@ export class CanvasCompositorModule extends CanvasModuleBase {
|
||||
return this.mergeByEntityIdentifiers(entityIdentifiers, false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates and uploads a grayscale representation of the inpaint mask image noise or denoise limit values.
|
||||
* This produces an image with a white background where the mask is represented by dark values.
|
||||
*
|
||||
* @param adapters The adapters for the canvas entities to composite
|
||||
* @param rect The region to include in the rasterized image
|
||||
* @param attribute The attribute to use for grayscale values (defaults to 'noiseLevel')
|
||||
* @param uploadOptions Options for uploading the image
|
||||
* @param forceUpload If true, the image is always re-uploaded, returning a new image DTO
|
||||
* @returns A promise that resolves to the image DTO
|
||||
*/
|
||||
getGrayscaleMaskCompositeImageDTO = async (
|
||||
adapters: CanvasEntityAdapterInpaintMask[],
|
||||
rect: Rect,
|
||||
attribute: 'noiseLevel' | 'denoiseLimit' = 'noiseLevel',
|
||||
invertMask: boolean = false,
|
||||
uploadOptions: SetOptional<Omit<UploadImageArg, 'file'>, 'image_category'> = { is_intermediate: true },
|
||||
forceUpload?: boolean
|
||||
): Promise<ImageDTO> => {
|
||||
assert(rect.width > 0 && rect.height > 0, 'Unable to rasterize empty rect');
|
||||
// Use a unique hash that includes the attribute name for caching
|
||||
const hash = this.getCompositeHash(adapters, { rect, attribute, invertMask, grayscale: true });
|
||||
const cachedImageName = forceUpload ? undefined : this.manager.cache.imageNameCache.get(hash);
|
||||
|
||||
let imageDTO: ImageDTO | null = null;
|
||||
|
||||
if (cachedImageName) {
|
||||
imageDTO = await getImageDTOSafe(cachedImageName);
|
||||
if (imageDTO) {
|
||||
this.log.debug({ rect, imageName: cachedImageName, imageDTO }, 'Using cached grayscale composite image');
|
||||
return imageDTO;
|
||||
}
|
||||
this.log.warn({ rect, imageName: cachedImageName }, 'Cached grayscale image name not found, recompositing');
|
||||
}
|
||||
|
||||
// Create a white background canvas
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = rect.width;
|
||||
canvas.height = rect.height;
|
||||
|
||||
const ctx = canvas.getContext('2d');
|
||||
assert(ctx !== null, 'Canvas 2D context is null');
|
||||
|
||||
// Fill with white first (creates white background)
|
||||
ctx.fillStyle = 'white';
|
||||
ctx.fillRect(0, 0, rect.width, rect.height);
|
||||
|
||||
// Apply special compositing mode
|
||||
ctx.globalCompositeOperation = 'darken';
|
||||
|
||||
// Draw each adapter's content
|
||||
for (const adapter of adapters) {
|
||||
this.log.debug({ entityIdentifier: adapter.entityIdentifier }, 'Drawing entity to grayscale composite canvas');
|
||||
|
||||
// Get the canvas from the adapter
|
||||
const adapterCanvas = adapter.getCanvas(rect);
|
||||
|
||||
// Create a temporary canvas for grayscale conversion
|
||||
const tempCanvas = document.createElement('canvas');
|
||||
tempCanvas.width = adapterCanvas.width;
|
||||
tempCanvas.height = adapterCanvas.height;
|
||||
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
assert(tempCtx !== null, 'Temp canvas 2D context is null');
|
||||
|
||||
// Draw the original adapter canvas to the temp canvas
|
||||
tempCtx.drawImage(adapterCanvas, 0, 0);
|
||||
|
||||
// Get the image data for processing
|
||||
const imageData = tempCtx.getImageData(0, 0, tempCanvas.width, tempCanvas.height);
|
||||
const data = imageData.data;
|
||||
|
||||
const attributeValue = typeof adapter.state[attribute] === 'number' ? (adapter.state[attribute] as number) : 1.0; // Default to full strength if attribute is undefined
|
||||
|
||||
// Process all pixels in the image data
|
||||
for (let i = 0; i < data.length; i += 4) {
|
||||
// Make sure we're accessing valid array indices
|
||||
if (i + 3 < data.length) {
|
||||
// input has transparency
|
||||
// Calculate grayscale value: white (255) for no mask, darker for stronger mask
|
||||
let grayValue = 255; // Default to white for unmasked areas
|
||||
if (invertMask ? (data[i + 3] ?? 0) < 128 : (data[i + 3] ?? 0) > 127) {
|
||||
grayValue = Math.max(0, Math.min(255, 255 - Math.round(255 * attributeValue)));
|
||||
}
|
||||
|
||||
data[i] = grayValue; // R
|
||||
data[i + 1] = grayValue; // G
|
||||
data[i + 2] = grayValue; // B
|
||||
data[i + 3] = 255; // A (output is fully opaque)
|
||||
}
|
||||
}
|
||||
|
||||
imageData.data.set(data); // Update the image data with the processed values
|
||||
|
||||
// Put the processed image data back to the temp canvas
|
||||
tempCtx.putImageData(imageData, 0, 0);
|
||||
|
||||
// Draw the temp canvas to the main canvas
|
||||
ctx.drawImage(tempCanvas, 0, 0);
|
||||
}
|
||||
|
||||
// Convert to blob and upload
|
||||
this.$isProcessing.set(true);
|
||||
const blobResult = await withResultAsync(() => canvasToBlob(canvas));
|
||||
this.$isProcessing.set(false);
|
||||
|
||||
if (blobResult.isErr()) {
|
||||
this.log.error(
|
||||
{ error: serializeError(blobResult.error) },
|
||||
'Failed to convert grayscale composite canvas to blob'
|
||||
);
|
||||
throw blobResult.error;
|
||||
}
|
||||
|
||||
const blob = blobResult.value;
|
||||
|
||||
if (this.manager._isDebugging) {
|
||||
previewBlob(blob, 'Grayscale Composite');
|
||||
}
|
||||
|
||||
this.$isUploading.set(true);
|
||||
const uploadResult = await withResultAsync(() =>
|
||||
uploadImage({
|
||||
file: new File([blob], 'canvas-grayscale-composite.png', { type: 'image/png' }),
|
||||
image_category: 'general',
|
||||
...uploadOptions,
|
||||
})
|
||||
);
|
||||
this.$isUploading.set(false);
|
||||
|
||||
if (uploadResult.isErr()) {
|
||||
throw uploadResult.error;
|
||||
}
|
||||
|
||||
imageDTO = uploadResult.value;
|
||||
this.manager.cache.imageNameCache.set(hash, imageDTO.image_name);
|
||||
return imageDTO;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the transparency of the composite of the give adapters.
|
||||
* @param adapters The adapters to composite
|
||||
|
||||
@@ -6,7 +6,7 @@ import type { Coordinate, Dimensions, Rect, StageAttrs } from 'features/controlL
|
||||
import Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
import { clamp } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { atom, computed } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
|
||||
type CanvasStageModuleConfig = {
|
||||
@@ -26,6 +26,14 @@ type CanvasStageModuleConfig = {
|
||||
* The padding in pixels to use when fitting the layers to the stage.
|
||||
*/
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: number;
|
||||
/**
|
||||
* The snap points for the scale of the canvas.
|
||||
*/
|
||||
SCALE_SNAP_POINTS: number[];
|
||||
/**
|
||||
* The tolerance for snapping the scale of the canvas, as a fraction of the scale.
|
||||
*/
|
||||
SCALE_SNAP_TOLERANCE: number;
|
||||
};
|
||||
|
||||
const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
@@ -33,6 +41,8 @@ const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
MAX_SCALE: 20,
|
||||
SCALE_FACTOR: 0.999,
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: 48,
|
||||
SCALE_SNAP_POINTS: [0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5],
|
||||
SCALE_SNAP_TOLERANCE: 0.05,
|
||||
};
|
||||
|
||||
export class CanvasStageModule extends CanvasModuleBase {
|
||||
@@ -43,6 +53,11 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
readonly manager: CanvasManager;
|
||||
readonly log: Logger;
|
||||
|
||||
// State for scale snapping logic
|
||||
private _intendedScale: number = 1;
|
||||
private _activeSnapPoint: number | null = null;
|
||||
private _snapTimeout: number | null = null;
|
||||
|
||||
container: HTMLDivElement;
|
||||
konva: { stage: Konva.Stage };
|
||||
|
||||
@@ -55,6 +70,7 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
height: 0,
|
||||
scale: 0,
|
||||
});
|
||||
$scale = computed(this.$stageAttrs, (attrs) => attrs.scale);
|
||||
|
||||
subscriptions = new Set<() => void>();
|
||||
resizeObserver: ResizeObserver | null = null;
|
||||
@@ -76,6 +92,9 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
container,
|
||||
}),
|
||||
};
|
||||
|
||||
// Initialize intended scale to the default stage scale
|
||||
this._intendedScale = this.konva.stage.scaleX();
|
||||
}
|
||||
|
||||
setContainer = (container: HTMLDivElement) => {
|
||||
@@ -195,6 +214,10 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
-rect.y * scale + this.config.FIT_LAYERS_TO_STAGE_PADDING_PX + (availableHeight - rect.height * scale) / 2
|
||||
);
|
||||
|
||||
// When fitting the stage, we update the intended scale and reset any active snap.
|
||||
this._intendedScale = scale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
x,
|
||||
y,
|
||||
@@ -230,26 +253,41 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
* Constrains a scale to be within the valid range
|
||||
*/
|
||||
constrainScale = (scale: number): number => {
|
||||
return clamp(Math.round(scale * 100) / 100, this.config.MIN_SCALE, this.config.MAX_SCALE);
|
||||
return clamp(scale, this.config.MIN_SCALE, this.config.MAX_SCALE);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the scale of the stage. If center is provided, the stage will zoom in/out on that point.
|
||||
* @param scale The new scale to set
|
||||
* @param center The center of the stage to zoom in/out on
|
||||
* Programmatically sets the scale of the stage, overriding any active snapping.
|
||||
* If a center point is provided, the stage will zoom on that point.
|
||||
* @param scale The new scale to set.
|
||||
* @param center The center point for the zoom.
|
||||
*/
|
||||
setScale = (scale: number, center: Coordinate = this.getCenter(true)): void => {
|
||||
this.log.trace('Setting scale');
|
||||
setScale = (scale: number, center?: Coordinate): void => {
|
||||
this.log.trace({ scale }, 'Programmatically setting scale');
|
||||
const newScale = this.constrainScale(scale);
|
||||
|
||||
const { x, y } = this.getPosition();
|
||||
// When scale is set programmatically, update the intended scale and reset any active snap.
|
||||
this._intendedScale = newScale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
this._applyScale(newScale, center);
|
||||
};
|
||||
|
||||
/**
|
||||
* Applies a scale to the stage, adjusting the position to keep the given center point stationary.
|
||||
* This internal method does NOT modify snapping state.
|
||||
*/
|
||||
private _applyScale = (newScale: number, center?: Coordinate): void => {
|
||||
const oldScale = this.getScale();
|
||||
|
||||
const deltaX = (center.x - x) / oldScale;
|
||||
const deltaY = (center.y - y) / oldScale;
|
||||
const _center = center ?? this.getCenter(true);
|
||||
const { x, y } = this.getPosition();
|
||||
|
||||
const newX = Math.floor(center.x - deltaX * newScale);
|
||||
const newY = Math.floor(center.y - deltaY * newScale);
|
||||
const deltaX = (_center.x - x) / oldScale;
|
||||
const deltaY = (_center.y - y) / oldScale;
|
||||
|
||||
const newX = _center.x - deltaX * newScale;
|
||||
const newY = _center.y - deltaY * newScale;
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
x: newX,
|
||||
@@ -263,6 +301,7 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
|
||||
onStageMouseWheel = (e: KonvaEventObject<WheelEvent>) => {
|
||||
e.evt.preventDefault();
|
||||
this._snapTimeout && window.clearTimeout(this._snapTimeout);
|
||||
|
||||
if (e.evt.ctrlKey || e.evt.metaKey) {
|
||||
return;
|
||||
@@ -271,12 +310,59 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
// We need the absolute cursor position - not the scaled position
|
||||
const cursorPos = this.konva.stage.getPointerPosition();
|
||||
|
||||
if (cursorPos) {
|
||||
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
|
||||
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
|
||||
const scale = this.manager.stage.getScale() * this.config.SCALE_FACTOR ** delta;
|
||||
this.manager.stage.setScale(scale, cursorPos);
|
||||
if (!cursorPos) {
|
||||
return;
|
||||
}
|
||||
|
||||
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
|
||||
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
|
||||
|
||||
// Update the intended scale based on the last intended scale, creating a continuous zoom feel
|
||||
const newIntendedScale = this._intendedScale * this.config.SCALE_FACTOR ** delta;
|
||||
this._intendedScale = this.constrainScale(newIntendedScale);
|
||||
|
||||
// Pass control to the snapping logic
|
||||
this._updateScaleWithSnapping(cursorPos);
|
||||
|
||||
this._snapTimeout = window.setTimeout(() => {
|
||||
// After a short delay, we can reset the intended scale to the current scale
|
||||
// This allows for continuous zooming without snapping back to the last snapped scale
|
||||
this._intendedScale = this.getScale();
|
||||
}, 100);
|
||||
};
|
||||
|
||||
/**
|
||||
* Implements "sticky" snap logic.
|
||||
* - If not snapped, checks if the intended scale is close enough to a snap point to engage the snap.
|
||||
* - If snapped, checks if the intended scale has moved far enough away to break the snap.
|
||||
* - Applies the resulting scale to the stage.
|
||||
*/
|
||||
private _updateScaleWithSnapping = (center: Coordinate) => {
|
||||
// If we are currently snapped, check if we should break out
|
||||
if (this._activeSnapPoint !== null) {
|
||||
const threshold = this._activeSnapPoint * this.config.SCALE_SNAP_TOLERANCE;
|
||||
if (Math.abs(this._intendedScale - this._activeSnapPoint) > threshold) {
|
||||
// User has scrolled far enough to break the snap
|
||||
this._activeSnapPoint = null;
|
||||
this._applyScale(this._intendedScale, center);
|
||||
}
|
||||
// Else, do nothing - we remain snapped at the current scale, creating a "dead zone"
|
||||
return;
|
||||
}
|
||||
|
||||
// If we are not snapped, check if we should snap to a point
|
||||
for (const snapPoint of this.config.SCALE_SNAP_POINTS) {
|
||||
const threshold = snapPoint * this.config.SCALE_SNAP_TOLERANCE;
|
||||
if (Math.abs(this._intendedScale - snapPoint) < threshold) {
|
||||
// Engage the snap
|
||||
this._activeSnapPoint = snapPoint;
|
||||
this._applyScale(snapPoint, center);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not snapping and not breaking a snap, just update to the intended scale
|
||||
this._applyScale(this._intendedScale, center);
|
||||
};
|
||||
|
||||
onStagePointerDown = (e: KonvaEventObject<PointerEvent>) => {
|
||||
|
||||
@@ -1096,6 +1096,30 @@ export const canvasSlice = createSlice({
|
||||
state.inpaintMasks.entities = [data];
|
||||
state.selectedEntityIdentifier = { type: 'inpaint_mask', id: data.id };
|
||||
},
|
||||
inpaintMaskNoiseAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = 0.15; // Default noise level
|
||||
}
|
||||
},
|
||||
inpaintMaskNoiseChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ noiseLevel: number }, 'inpaint_mask'>>
|
||||
) => {
|
||||
const { entityIdentifier, noiseLevel } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = noiseLevel;
|
||||
}
|
||||
},
|
||||
inpaintMaskNoiseDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = undefined;
|
||||
}
|
||||
},
|
||||
inpaintMaskConvertedToRegionalGuidance: {
|
||||
reducer: (
|
||||
state,
|
||||
@@ -1134,6 +1158,30 @@ export const canvasSlice = createSlice({
|
||||
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
},
|
||||
inpaintMaskDenoiseLimitAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = 1.0; // Default denoise limit
|
||||
}
|
||||
},
|
||||
inpaintMaskDenoiseLimitChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ denoiseLimit: number }, 'inpaint_mask'>>
|
||||
) => {
|
||||
const { entityIdentifier, denoiseLimit } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = denoiseLimit;
|
||||
}
|
||||
},
|
||||
inpaintMaskDenoiseLimitDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = undefined;
|
||||
}
|
||||
},
|
||||
//#region BBox
|
||||
bboxScaledWidthChanged: (state, action: PayloadAction<number>) => {
|
||||
const gridSize = getGridSize(state.bbox.modelBase);
|
||||
@@ -1869,6 +1917,12 @@ export const {
|
||||
// Inpaint mask
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskConvertedToRegionalGuidance,
|
||||
inpaintMaskNoiseAdded,
|
||||
inpaintMaskNoiseChanged,
|
||||
inpaintMaskNoiseDeleted,
|
||||
inpaintMaskDenoiseLimitAdded,
|
||||
inpaintMaskDenoiseLimitChanged,
|
||||
inpaintMaskDenoiseLimitDeleted,
|
||||
// inpaintMaskRecalled,
|
||||
} = canvasSlice.actions;
|
||||
|
||||
|
||||
@@ -310,6 +310,8 @@ const zCanvasInpaintMaskState = zCanvasEntityBase.extend({
|
||||
fill: zFill,
|
||||
opacity: zOpacity,
|
||||
objects: z.array(zCanvasObjectState),
|
||||
noiseLevel: z.number().gte(0).lte(1).optional(),
|
||||
denoiseLimit: z.number().gte(0).lte(1).optional(),
|
||||
});
|
||||
export type CanvasInpaintMaskState = z.infer<typeof zCanvasInpaintMaskState>;
|
||||
|
||||
|
||||
@@ -199,6 +199,8 @@ export const getInpaintMaskState = (
|
||||
style: 'diagonal',
|
||||
color: getInpaintMaskFillColor(),
|
||||
},
|
||||
noiseLevel: undefined,
|
||||
denoiseLimit: undefined,
|
||||
};
|
||||
merge(entityState, overrides);
|
||||
return entityState;
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import type { MainModelBase } from 'features/nodes/types/common';
|
||||
import { getGridSize, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import {
|
||||
getGridSize,
|
||||
getOptimalDimension,
|
||||
isInSDXLTrainingDimensions,
|
||||
} from 'features/parameters/util/optimalDimension';
|
||||
|
||||
/**
|
||||
* Scales the bounding box dimensions to the optimal dimension. The optimal dimensions should be the trained dimension
|
||||
@@ -10,6 +14,11 @@ import { getGridSize, getOptimalDimension } from 'features/parameters/util/optim
|
||||
* @param modelBase The base model
|
||||
*/
|
||||
export const getScaledBoundingBoxDimensions = (dimensions: Dimensions, modelBase: MainModelBase): Dimensions => {
|
||||
// Special cases: Return original if SDXL and in training dimensions
|
||||
if (modelBase === 'sdxl' && isInSDXLTrainingDimensions(dimensions.width, dimensions.height)) {
|
||||
return { ...dimensions };
|
||||
}
|
||||
|
||||
const optimalDimension = getOptimalDimension(modelBase);
|
||||
const gridSize = getGridSize(modelBase);
|
||||
const width = roundToMultiple(dimensions.width, gridSize);
|
||||
|
||||
@@ -26,19 +26,26 @@ import { atom } from 'nanostores';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListAllImageNamesForBoardQuery } from 'services/api/endpoints/boards';
|
||||
import { useDeleteBoardAndImagesMutation, useDeleteBoardMutation } from 'services/api/endpoints/images';
|
||||
import {
|
||||
useDeleteBoardAndImagesMutation,
|
||||
useDeleteBoardMutation,
|
||||
useDeleteUncategorizedImagesMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import type { BoardDTO } from 'services/api/types';
|
||||
|
||||
export const $boardToDelete = atom<BoardDTO | null>(null);
|
||||
export const $boardToDelete = atom<BoardDTO | 'none' | null>(null);
|
||||
|
||||
const DeleteBoardModal = () => {
|
||||
useAssertSingleton('DeleteBoardModal');
|
||||
const boardToDelete = useStore($boardToDelete);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const boardId = useMemo(() => (boardToDelete === 'none' ? 'none' : boardToDelete?.board_id), [boardToDelete]);
|
||||
|
||||
const { currentData: boardImageNames, isFetching: isFetchingBoardNames } = useListAllImageNamesForBoardQuery(
|
||||
boardToDelete?.board_id
|
||||
boardId
|
||||
? {
|
||||
board_id: boardToDelete?.board_id,
|
||||
board_id: boardId,
|
||||
categories: undefined,
|
||||
is_intermediate: undefined,
|
||||
}
|
||||
@@ -71,10 +78,13 @@ const DeleteBoardModal = () => {
|
||||
|
||||
const [deleteBoardAndImages, { isLoading: isDeleteBoardAndImagesLoading }] = useDeleteBoardAndImagesMutation();
|
||||
|
||||
const [deleteUncategorizedImages, { isLoading: isDeleteUncategorizedImagesLoading }] =
|
||||
useDeleteUncategorizedImagesMutation();
|
||||
|
||||
const imageUsageSummary = useAppSelector(selectImageUsageSummary);
|
||||
|
||||
const handleDeleteBoardOnly = useCallback(() => {
|
||||
if (!boardToDelete) {
|
||||
if (!boardToDelete || boardToDelete === 'none') {
|
||||
return;
|
||||
}
|
||||
deleteBoardOnly(boardToDelete.board_id);
|
||||
@@ -82,13 +92,21 @@ const DeleteBoardModal = () => {
|
||||
}, [boardToDelete, deleteBoardOnly]);
|
||||
|
||||
const handleDeleteBoardAndImages = useCallback(() => {
|
||||
if (!boardToDelete) {
|
||||
if (!boardToDelete || boardToDelete === 'none') {
|
||||
return;
|
||||
}
|
||||
deleteBoardAndImages(boardToDelete.board_id);
|
||||
$boardToDelete.set(null);
|
||||
}, [boardToDelete, deleteBoardAndImages]);
|
||||
|
||||
const handleDeleteUncategorizedImages = useCallback(() => {
|
||||
if (!boardToDelete || boardToDelete !== 'none') {
|
||||
return;
|
||||
}
|
||||
deleteUncategorizedImages();
|
||||
$boardToDelete.set(null);
|
||||
}, [boardToDelete, deleteUncategorizedImages]);
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
$boardToDelete.set(null);
|
||||
}, []);
|
||||
@@ -96,8 +114,12 @@ const DeleteBoardModal = () => {
|
||||
const cancelRef = useRef<HTMLButtonElement>(null);
|
||||
|
||||
const isLoading = useMemo(
|
||||
() => isDeleteBoardAndImagesLoading || isDeleteBoardOnlyLoading || isFetchingBoardNames,
|
||||
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames]
|
||||
() =>
|
||||
isDeleteBoardAndImagesLoading ||
|
||||
isDeleteBoardOnlyLoading ||
|
||||
isFetchingBoardNames ||
|
||||
isDeleteUncategorizedImagesLoading,
|
||||
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames, isDeleteUncategorizedImagesLoading]
|
||||
);
|
||||
|
||||
if (!boardToDelete) {
|
||||
@@ -109,7 +131,7 @@ const DeleteBoardModal = () => {
|
||||
<AlertDialogOverlay>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader fontSize="lg" fontWeight="bold">
|
||||
{t('common.delete')} {boardToDelete.board_name}
|
||||
{t('common.delete')} {boardToDelete === 'none' ? t('boards.uncategorizedImages') : boardToDelete.board_name}
|
||||
</AlertDialogHeader>
|
||||
|
||||
<AlertDialogBody>
|
||||
@@ -125,11 +147,13 @@ const DeleteBoardModal = () => {
|
||||
bottomMessage={t('boards.bottomMessage')}
|
||||
/>
|
||||
)}
|
||||
<Text>
|
||||
{boardToDelete.is_private
|
||||
? t('boards.deletedPrivateBoardsCannotbeRestored')
|
||||
: t('boards.deletedBoardsCannotbeRestored')}
|
||||
</Text>
|
||||
{boardToDelete !== 'none' && (
|
||||
<Text>
|
||||
{boardToDelete.is_private
|
||||
? t('boards.deletedPrivateBoardsCannotbeRestored')
|
||||
: t('boards.deletedBoardsCannotbeRestored')}
|
||||
</Text>
|
||||
)}
|
||||
<Text>{t('gallery.deleteImagePermanent')}</Text>
|
||||
</Flex>
|
||||
</AlertDialogBody>
|
||||
@@ -138,12 +162,21 @@ const DeleteBoardModal = () => {
|
||||
<Button ref={cancelRef} onClick={handleClose}>
|
||||
{t('boards.cancel')}
|
||||
</Button>
|
||||
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
|
||||
{t('boards.deleteBoardOnly')}
|
||||
</Button>
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
|
||||
{t('boards.deleteBoardAndImages')}
|
||||
</Button>
|
||||
{boardToDelete !== 'none' && (
|
||||
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
|
||||
{t('boards.deleteBoardOnly')}
|
||||
</Button>
|
||||
)}
|
||||
{boardToDelete !== 'none' && (
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
|
||||
{t('boards.deleteBoardAndImages')}
|
||||
</Button>
|
||||
)}
|
||||
{boardToDelete === 'none' && (
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteUncategorizedImages}>
|
||||
{t('boards.deleteAllUncategorizedImages')}
|
||||
</Button>
|
||||
)}
|
||||
</Flex>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
|
||||
@@ -7,9 +7,11 @@ import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDownloadBold, PiPlusBold } from 'react-icons/pi';
|
||||
import { PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
|
||||
import { useBulkDownloadImagesMutation } from 'services/api/endpoints/images';
|
||||
|
||||
import { $boardToDelete } from './DeleteBoardModal';
|
||||
|
||||
type Props = {
|
||||
children: ContextMenuProps<HTMLDivElement>['children'];
|
||||
};
|
||||
@@ -33,6 +35,10 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
|
||||
bulkDownload({ image_names: [], board_id: 'none' });
|
||||
}, [bulkDownload]);
|
||||
|
||||
const setUncategorizedImagesAsToBeDeleted = useCallback(() => {
|
||||
$boardToDelete.set('none');
|
||||
}, []);
|
||||
|
||||
const renderMenuFunc = useCallback(
|
||||
() => (
|
||||
<MenuList visibility="visible">
|
||||
@@ -47,10 +53,26 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
|
||||
{t('boards.downloadBoard')}
|
||||
</MenuItem>
|
||||
)}
|
||||
<MenuItem
|
||||
color="error.300"
|
||||
icon={<PiTrashSimpleBold />}
|
||||
onClick={setUncategorizedImagesAsToBeDeleted}
|
||||
isDestructive
|
||||
>
|
||||
{t('boards.deleteAllUncategorizedImages')}
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
</MenuList>
|
||||
),
|
||||
[autoAssignBoardOnClick, handleBulkDownload, handleSetAutoAdd, isBulkDownloadEnabled, isSelectedForAutoAdd, t]
|
||||
[
|
||||
autoAssignBoardOnClick,
|
||||
handleBulkDownload,
|
||||
handleSetAutoAdd,
|
||||
isBulkDownloadEnabled,
|
||||
isSelectedForAutoAdd,
|
||||
t,
|
||||
setUncategorizedImagesAsToBeDeleted,
|
||||
]
|
||||
);
|
||||
|
||||
return <ContextMenu renderMenu={renderMenuFunc}>{children}</ContextMenu>;
|
||||
|
||||
@@ -19,9 +19,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusySafe();
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -31,9 +31,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -43,9 +43,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -55,9 +55,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { selectDefaultIPAdapter, selectDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { CanvasEntityAdapterBase } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterBase';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import {
|
||||
@@ -20,6 +19,7 @@ import type {
|
||||
CanvasControlLayerState,
|
||||
CanvasEntityIdentifier,
|
||||
CanvasEntityType,
|
||||
CanvasImageState,
|
||||
CanvasInpaintMaskState,
|
||||
CanvasRasterLayerState,
|
||||
CanvasRegionalGuidanceState,
|
||||
@@ -34,7 +34,7 @@ import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type { FieldIdentifier } from 'features/nodes/types/field';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { imageDTOToFile, imagesApi, uploadImage } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -142,14 +142,14 @@ export const createNewCanvasEntityFromImage = (arg: {
|
||||
*
|
||||
* Using 'raster_layer' for the type and enabling `withResize` replicates the common img2img flow.
|
||||
*/
|
||||
export const newCanvasFromImage = (arg: {
|
||||
export const newCanvasFromImage = async (arg: {
|
||||
imageDTO: ImageDTO;
|
||||
type: CanvasEntityType | 'regional_guidance_with_reference_image';
|
||||
withResize: boolean;
|
||||
withResize?: boolean;
|
||||
dispatch: AppDispatch;
|
||||
getState: () => RootState;
|
||||
}) => {
|
||||
const { type, imageDTO, withResize, dispatch, getState } = arg;
|
||||
const { type, imageDTO, withResize = false, dispatch, getState } = arg;
|
||||
const state = getState();
|
||||
|
||||
const base = selectBboxModelBase(state);
|
||||
@@ -158,22 +158,22 @@ export const newCanvasFromImage = (arg: {
|
||||
const optimalDimension = getOptimalDimension(base);
|
||||
const { width, height } = calculateNewSize(ratio, optimalDimension ** 2, base);
|
||||
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
const { x, y } = selectBboxRect(state);
|
||||
let imageObject: CanvasImageState;
|
||||
|
||||
const addInitCallback = (id: string) => {
|
||||
CanvasEntityAdapterBase.registerInitCallback(async (adapter) => {
|
||||
// Skip the callback if the adapter is not the one we are creating
|
||||
if (adapter.id !== id) {
|
||||
return false;
|
||||
}
|
||||
// Fit the layer to the bbox w/ fill strategy
|
||||
await adapter.transformer.startTransform({ silent: true });
|
||||
adapter.transformer.fitToBboxFill();
|
||||
await adapter.transformer.applyTransform();
|
||||
return true;
|
||||
if (withResize && (width !== imageDTO.width || height !== imageDTO.height)) {
|
||||
const resizedImageDTO = await uploadImage({
|
||||
file: await imageDTOToFile(imageDTO),
|
||||
image_category: 'general',
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
resize_to: { width, height },
|
||||
});
|
||||
};
|
||||
imageObject = imageDTOToImageObject(resizedImageDTO);
|
||||
} else {
|
||||
imageObject = imageDTOToImageObject(imageDTO);
|
||||
}
|
||||
|
||||
const { x, y } = selectBboxRect(state);
|
||||
|
||||
switch (type) {
|
||||
case 'raster_layer': {
|
||||
@@ -182,9 +182,6 @@ export const newCanvasFromImage = (arg: {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRasterLayerState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -198,9 +195,6 @@ export const newCanvasFromImage = (arg: {
|
||||
position: { x, y },
|
||||
controlAdapter: deepClone(initialControlNet),
|
||||
} satisfies Partial<CanvasControlLayerState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -213,9 +207,6 @@ export const newCanvasFromImage = (arg: {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasInpaintMaskState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -228,9 +219,6 @@ export const newCanvasFromImage = (arg: {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRegionalGuidanceState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
|
||||
@@ -14,7 +14,7 @@ import type {
|
||||
VaeSourceNodes,
|
||||
} from 'features/nodes/util/graph/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
|
||||
type AddInpaintArg = {
|
||||
state: RootState;
|
||||
@@ -29,6 +29,7 @@ type AddInpaintArg = {
|
||||
scaledSize: Dimensions;
|
||||
denoising_start: number;
|
||||
fp32: boolean;
|
||||
seed: number;
|
||||
};
|
||||
|
||||
export const addInpaint = async ({
|
||||
@@ -44,6 +45,7 @@ export const addInpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
}: AddInpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
@@ -51,19 +53,45 @@ export const addInpaint = async ({
|
||||
const canvasSettings = selectCanvasSettingsSlice(state);
|
||||
const canvas = selectCanvasSlice(state);
|
||||
|
||||
const { bbox } = canvas;
|
||||
const { rect } = canvas.bbox;
|
||||
|
||||
const rasterAdapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
|
||||
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, bbox.rect, {
|
||||
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
|
||||
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
// Get inpaint mask adapters that have noise settings
|
||||
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);
|
||||
|
||||
// Create a composite noise mask if we have any adapters with noise settings
|
||||
let noiseMaskImage: ImageDTO | null = null;
|
||||
if (noiseMaskAdapters.length > 0) {
|
||||
noiseMaskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
noiseMaskAdapters,
|
||||
rect,
|
||||
'noiseLevel',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Create a composite denoise limit mask
|
||||
const maskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
inpaintMaskAdapters, // denoise limit defaults to 1 for masks that don't have it
|
||||
rect,
|
||||
'denoiseLimit',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
|
||||
const needsScaleBeforeProcessing = !isEqual(scaledSize, originalSize);
|
||||
|
||||
@@ -82,15 +110,38 @@ export const addInpaint = async ({
|
||||
image: { image_name: initialImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
const alphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Resize the noise mask to match the scaled size
|
||||
const resizeNoiseMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_noise_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: noiseMaskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
});
|
||||
|
||||
g.addEdge(resizeImageToScaledSize, 'image', noiseNode, 'image');
|
||||
g.addEdge(resizeNoiseMaskToScaledSize, 'image', noiseNode, 'mask');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
const resizeMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: maskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
const resizeImageToOriginalSize = g.addNode({
|
||||
@@ -117,12 +168,8 @@ export const addInpaint = async ({
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
|
||||
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
if (!isMainModelWithoutUnet(modelLoader)) {
|
||||
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
|
||||
@@ -169,12 +216,23 @@ export const addInpaint = async ({
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
|
||||
const alphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
image: initialImage.image_name ? { image_name: initialImage.image_name } : undefined,
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
mask: { image_name: noiseMaskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
const createGradientMask = g.addNode({
|
||||
id: getPrefixedId('create_gradient_mask'),
|
||||
type: 'create_gradient_mask',
|
||||
@@ -183,9 +241,9 @@ export const addInpaint = async ({
|
||||
edge_radius: params.canvasCoherenceEdgeSize,
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
mask: { image_name: maskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
|
||||
@@ -15,7 +15,7 @@ import type {
|
||||
VaeSourceNodes,
|
||||
} from 'features/nodes/util/graph/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
|
||||
type AddOutpaintArg = {
|
||||
state: RootState;
|
||||
@@ -30,6 +30,7 @@ type AddOutpaintArg = {
|
||||
scaledSize: Dimensions;
|
||||
denoising_start: number;
|
||||
fp32: boolean;
|
||||
seed: number;
|
||||
};
|
||||
|
||||
export const addOutpaint = async ({
|
||||
@@ -45,6 +46,7 @@ export const addOutpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
}: AddOutpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
@@ -61,10 +63,38 @@ export const addOutpaint = async ({
|
||||
});
|
||||
|
||||
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
|
||||
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
const { rect } = canvas.bbox;
|
||||
|
||||
// Get inpaint mask adapters that have noise settings
|
||||
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);
|
||||
|
||||
// Create a composite noise mask if we have any adapters with noise settings
|
||||
let noiseMaskImage: ImageDTO | null = null;
|
||||
if (noiseMaskAdapters.length > 0) {
|
||||
noiseMaskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
noiseMaskAdapters,
|
||||
rect,
|
||||
'noiseLevel',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Create a composite denoise limit mask
|
||||
const maskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
inpaintMaskAdapters, // denoise limit defaults to 1 for masks that don't have it
|
||||
rect,
|
||||
'denoiseLimit',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
|
||||
const infill = getInfill(g, params);
|
||||
|
||||
@@ -72,14 +102,6 @@ export const addOutpaint = async ({
|
||||
|
||||
if (needsScaleBeforeProcessing) {
|
||||
// Scale before processing requires some resizing
|
||||
|
||||
// Combine the inpaint mask and the initial image's alpha channel into a single mask
|
||||
const maskAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
const initialImageAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('image_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
@@ -88,8 +110,8 @@ export const addOutpaint = async ({
|
||||
const maskCombine = g.addNode({
|
||||
id: getPrefixedId('mask_combine'),
|
||||
type: 'mask_combine',
|
||||
mask1: { image_name: maskImage.image_name },
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
|
||||
// Resize the combined and initial image to the scaled size
|
||||
@@ -134,7 +156,32 @@ export const addOutpaint = async ({
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Resize the noise mask to match the scaled size
|
||||
const resizeNoiseMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_noise_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: noiseMaskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
});
|
||||
|
||||
g.addEdge(resizeNoiseMaskToScaledSize, 'image', noiseNode, 'mask');
|
||||
g.addEdge(infill, 'image', noiseNode, 'image');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
}
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
|
||||
@@ -190,12 +237,6 @@ export const addOutpaint = async ({
|
||||
type: i2lNodeType,
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
const maskAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('mask_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
const initialImageAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('image_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
@@ -204,6 +245,7 @@ export const addOutpaint = async ({
|
||||
const maskCombine = g.addNode({
|
||||
id: getPrefixedId('mask_combine'),
|
||||
type: 'mask_combine',
|
||||
mask1: { image_name: maskImage.image_name },
|
||||
});
|
||||
const createGradientMask = g.addNode({
|
||||
id: getPrefixedId('create_gradient_mask'),
|
||||
@@ -214,10 +256,29 @@ export const addOutpaint = async ({
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
g.addEdge(maskCombine, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
image: initialImage.image_name ? { image_name: initialImage.image_name } : undefined,
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
mask: { image_name: noiseMaskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(infill, 'image', noiseNode, 'image');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
|
||||
@@ -137,6 +137,7 @@ export const buildCogView4Graph = async (state: RootState, manager: CanvasManage
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'cogview4_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -153,6 +154,7 @@ export const buildCogView4Graph = async (state: RootState, manager: CanvasManage
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'cogview4_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -212,6 +212,7 @@ export const buildFLUXGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'flux_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -228,6 +229,7 @@ export const buildFLUXGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'flux_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -198,6 +198,7 @@ export const buildSD1Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: vaePrecision === 'fp32',
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -214,6 +215,7 @@ export const buildSD1Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -162,6 +162,7 @@ export const buildSD3Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sd3_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -178,6 +179,7 @@ export const buildSD3Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sd3_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -205,6 +205,7 @@ export const buildSDXLGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sdxl_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -221,6 +222,7 @@ export const buildSDXLGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sdxl_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePromptChanged, selectNegativePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -15,12 +16,20 @@ import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamNegativePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt);
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
@@ -31,7 +40,6 @@ export const ParamNegativePrompt = memo(() => {
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const _onChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePromptChanged, selectBase, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { ShowDynamicPromptsPreviewButton } from 'features/dynamicPrompts/components/ShowDynamicPromptsPreviewButton';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
@@ -19,6 +20,12 @@ import type { HotkeyCallback } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
initialHeight: 120,
|
||||
};
|
||||
|
||||
export const ParamPositivePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt);
|
||||
@@ -26,6 +33,9 @@ export const ParamPositivePrompt = memo(() => {
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
@@ -36,7 +46,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
@@ -75,7 +84,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
onChange={onChange}
|
||||
minH={40}
|
||||
onKeyDown={onKeyDown}
|
||||
variant="darkFilled"
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
@@ -83,6 +91,8 @@ export const ParamPositivePrompt = memo(() => {
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
resize="vertical"
|
||||
minH={28}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
|
||||
@@ -12,7 +12,8 @@ import {
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { Group, PickerContextState } from 'common/components/Picker/Picker';
|
||||
import { buildGroup, getRegex, Picker, usePickerContext } from 'common/components/Picker/Picker';
|
||||
import { useDisclosure } from 'common/hooks/useBoolean';
|
||||
@@ -22,6 +23,7 @@ import { BASE_COLOR_MAP } from 'features/modelManagerV2/subpanels/ModelManagerPa
|
||||
import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage';
|
||||
import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton';
|
||||
import { API_BASE_MODELS, MODEL_TYPE_MAP, MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { filesize } from 'filesize';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
@@ -32,12 +34,23 @@ import type { AnyModelConfig, BaseModelType } from 'services/api/types';
|
||||
const getOptionId = (modelConfig: AnyModelConfig) => modelConfig.key;
|
||||
|
||||
const ModelManagerLink = memo((props: ButtonProps) => {
|
||||
const onClickGoToModelManager = useStore($onClickGoToModelManager);
|
||||
const dispatch = useAppDispatch();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(setActiveTab('models'));
|
||||
$installModelsTab.set(3);
|
||||
}, [dispatch]);
|
||||
return <Button size="sm" flexGrow={0} variant="link" color="base.200" onClick={onClick} {...props} />;
|
||||
|
||||
return (
|
||||
<Button
|
||||
size="sm"
|
||||
flexGrow={0}
|
||||
variant="link"
|
||||
color="base.200"
|
||||
onClick={onClickGoToModelManager ?? onClick}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
});
|
||||
ModelManagerLink.displayName = 'ModelManagerLink';
|
||||
|
||||
@@ -47,12 +60,17 @@ const components = {
|
||||
|
||||
const NoOptionsFallback = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isModelsTabDisabled = useAppSelector(selectIsModelsTabDisabled);
|
||||
const onClickGoToModelManager = useStore($onClickGoToModelManager);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={4} alignItems="center">
|
||||
<Text color="base.200">{t('modelManager.modelPickerFallbackNoModelsInstalled')}</Text>
|
||||
<Text color="base.200">
|
||||
<Trans i18nKey="modelManager.modelPickerFallbackNoModelsInstalled2" components={components} />
|
||||
</Text>
|
||||
{(!isModelsTabDisabled || onClickGoToModelManager) && (
|
||||
<Text color="base.200">
|
||||
<Trans i18nKey="modelManager.modelPickerFallbackNoModelsInstalled2" components={components} />
|
||||
</Text>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -26,6 +26,40 @@ export const getOptimalDimension = (base?: BaseModelType | null): number => {
|
||||
}
|
||||
};
|
||||
|
||||
const SDXL_TRAINING_DIMENSIONS: [number, number][] = [
|
||||
[512, 2048],
|
||||
[512, 1984],
|
||||
[512, 1920],
|
||||
[512, 1856],
|
||||
[576, 1792],
|
||||
[576, 1728],
|
||||
[576, 1664],
|
||||
[640, 1600],
|
||||
[640, 1536],
|
||||
[704, 1472],
|
||||
[704, 1408],
|
||||
[704, 1344],
|
||||
[768, 1344],
|
||||
[768, 1280],
|
||||
[832, 1216],
|
||||
[832, 1152],
|
||||
[896, 1152],
|
||||
[896, 1088],
|
||||
[960, 1088],
|
||||
[960, 1024],
|
||||
[1024, 1024],
|
||||
];
|
||||
|
||||
/**
|
||||
* Checks if the given width and height are in the SDXL training dimensions.
|
||||
* @param width The width to check
|
||||
* @param height The height to check
|
||||
* @returns Whether the width and height are in the SDXL training dimensions (order agnostic)
|
||||
*/
|
||||
export const isInSDXLTrainingDimensions = (width: number, height: number): boolean => {
|
||||
return SDXL_TRAINING_DIMENSIONS.some(([w, h]) => (w === width && h === height) || (w === height && h === width));
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets the grid size for a given base model. For Flux, the grid size is 16, otherwise it is 8.
|
||||
* - sd-1, sd-2, sdxl: 8
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePrompt2Changed, selectNegativePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -9,10 +10,17 @@ import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLNegativeStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePrompt2Changed, selectPositivePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -9,10 +10,17 @@ import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLPositiveStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSelector, createSlice } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { newSessionRequested } from 'features/controlLayers/store/actions';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import { workflowLoaded } from 'features/nodes/store/nodesSlice';
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
@@ -15,6 +16,7 @@ const initialUIState: UIState = {
|
||||
shouldShowProgressInViewer: true,
|
||||
accordions: {},
|
||||
expanders: {},
|
||||
textAreaSizes: {},
|
||||
shouldShowNotificationV2: true,
|
||||
};
|
||||
|
||||
@@ -42,6 +44,10 @@ export const uiSlice = createSlice({
|
||||
const { id, isOpen } = action.payload;
|
||||
state.expanders[id] = isOpen;
|
||||
},
|
||||
textAreaSizesStateChanged: (state, action: PayloadAction<{ id: string; size: Partial<Dimensions> }>) => {
|
||||
const { id, size } = action.payload;
|
||||
state.textAreaSizes[id] = size;
|
||||
},
|
||||
shouldShowNotificationChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldShowNotificationV2 = action.payload;
|
||||
},
|
||||
@@ -64,6 +70,7 @@ export const {
|
||||
accordionStateChanged,
|
||||
expanderStateChanged,
|
||||
shouldShowNotificationChanged,
|
||||
textAreaSizesStateChanged,
|
||||
} = uiSlice.actions;
|
||||
|
||||
export const selectUiSlice = (state: RootState) => state.ui;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
|
||||
export type TabName = 'canvas' | 'upscaling' | 'workflows' | 'models' | 'queue';
|
||||
export type CanvasRightPanelTabName = 'layers' | 'gallery';
|
||||
|
||||
@@ -30,6 +32,10 @@ export interface UIState {
|
||||
* The state of expanders. The key is the id of the expander, and the value is a boolean representing the open state.
|
||||
*/
|
||||
expanders: Record<string, boolean>;
|
||||
/**
|
||||
* The size of textareas. The key is the id of the text area, and the value is an object representing its width and/or height.
|
||||
*/
|
||||
textAreaSizes: Record<string, Partial<Dimensions>>;
|
||||
/**
|
||||
* Whether or not to show the user the open notification. Bump version to reset users who may have closed previous version.
|
||||
*/
|
||||
|
||||
@@ -160,6 +160,42 @@ export const imagesApi = api.injectEndpoints({
|
||||
return [];
|
||||
},
|
||||
}),
|
||||
deleteUncategorizedImages: build.mutation<components['schemas']['DeleteImagesFromListResult'], void>({
|
||||
query: () => ({ url: buildImagesUrl('uncategorized'), method: 'DELETE' }),
|
||||
invalidatesTags: (result) => {
|
||||
if (result && result.deleted_images.length > 0) {
|
||||
const boardId = 'none';
|
||||
|
||||
const tags: ApiTagDescription[] = [
|
||||
{
|
||||
type: 'ImageList',
|
||||
id: getListImagesUrl({
|
||||
board_id: boardId,
|
||||
categories: IMAGE_CATEGORIES,
|
||||
}),
|
||||
},
|
||||
{
|
||||
type: 'ImageList',
|
||||
id: getListImagesUrl({
|
||||
board_id: boardId,
|
||||
categories: ASSETS_CATEGORIES,
|
||||
}),
|
||||
},
|
||||
{
|
||||
type: 'Board',
|
||||
id: boardId,
|
||||
},
|
||||
{
|
||||
type: 'BoardImagesTotal',
|
||||
id: boardId,
|
||||
},
|
||||
];
|
||||
|
||||
return tags;
|
||||
}
|
||||
return [];
|
||||
},
|
||||
}),
|
||||
/**
|
||||
* Change an image's `is_intermediate` property.
|
||||
*/
|
||||
@@ -270,12 +306,15 @@ export const imagesApi = api.injectEndpoints({
|
||||
},
|
||||
}),
|
||||
uploadImage: build.mutation<ImageDTO, UploadImageArg>({
|
||||
query: ({ file, image_category, is_intermediate, session_id, board_id, crop_visible, metadata }) => {
|
||||
query: ({ file, image_category, is_intermediate, session_id, board_id, crop_visible, metadata, resize_to }) => {
|
||||
const formData = new FormData();
|
||||
formData.append('file', file);
|
||||
if (metadata) {
|
||||
formData.append('metadata', JSON.stringify(metadata));
|
||||
}
|
||||
if (resize_to) {
|
||||
formData.append('resize_to', JSON.stringify(resize_to));
|
||||
}
|
||||
return {
|
||||
url: buildImagesUrl('upload'),
|
||||
method: 'POST',
|
||||
@@ -563,6 +602,7 @@ export const {
|
||||
useAddImagesToBoardMutation,
|
||||
useRemoveImagesFromBoardMutation,
|
||||
useDeleteBoardAndImagesMutation,
|
||||
useDeleteUncategorizedImagesMutation,
|
||||
useDeleteBoardMutation,
|
||||
useStarImagesMutation,
|
||||
useUnstarImagesMutation,
|
||||
|
||||
@@ -661,6 +661,26 @@ export type paths = {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/uncategorized": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
get?: never;
|
||||
put?: never;
|
||||
post?: never;
|
||||
/**
|
||||
* Delete Uncategorized Images
|
||||
* @description Deletes all images that are uncategorized
|
||||
*/
|
||||
delete: operations["delete_uncategorized_images"];
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/star": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -2723,6 +2743,11 @@ export type components = {
|
||||
* Format: binary
|
||||
*/
|
||||
file: Blob;
|
||||
/**
|
||||
* Resize To
|
||||
* @description Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: 16777216
|
||||
*/
|
||||
resize_to?: string | null;
|
||||
/**
|
||||
* Metadata
|
||||
* @description The metadata to associate with the image, must be a stringified JSON dict
|
||||
@@ -5573,7 +5598,7 @@ export type components = {
|
||||
};
|
||||
/**
|
||||
* Create Gradient Mask
|
||||
* @description Creates mask for denoising model run.
|
||||
* @description Creates mask for denoising.
|
||||
*/
|
||||
CreateGradientMaskInvocation: {
|
||||
/**
|
||||
@@ -5600,7 +5625,7 @@ export type components = {
|
||||
mask?: components["schemas"]["ImageField"] | null;
|
||||
/**
|
||||
* Edge Radius
|
||||
* @description How far to blur/expand the edges of the mask
|
||||
* @description How far to expand the edges of the mask
|
||||
* @default 16
|
||||
*/
|
||||
edge_radius?: number;
|
||||
@@ -9366,7 +9391,7 @@ export type components = {
|
||||
* @description Method to apply IP Weights with
|
||||
* @enum {string}
|
||||
*/
|
||||
method: "full" | "style" | "composition";
|
||||
method: "full" | "style" | "composition" | "style_strong" | "style_precise";
|
||||
/**
|
||||
* Weight
|
||||
* @description The weight given to the IP-Adapter
|
||||
@@ -10443,6 +10468,11 @@ export type components = {
|
||||
* @default null
|
||||
*/
|
||||
image?: components["schemas"]["ImageField"] | null;
|
||||
/**
|
||||
* @description Optional mask determining where to apply noise (black=noise, white=no noise)
|
||||
* @default null
|
||||
*/
|
||||
mask?: components["schemas"]["ImageField"] | null;
|
||||
/**
|
||||
* Seed
|
||||
* @description Seed for random number generation
|
||||
@@ -23480,6 +23510,26 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
delete_uncategorized_images: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["DeleteImagesFromListResult"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
star_images_in_list: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -23807,7 +23857,7 @@ export interface operations {
|
||||
};
|
||||
header?: never;
|
||||
path: {
|
||||
/** @description The id of the board */
|
||||
/** @description The id of the board or 'none' for uncategorized images */
|
||||
board_id: string;
|
||||
};
|
||||
cookie?: never;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import type { components, paths } from 'services/api/schema';
|
||||
import type { JsonObject, SetRequired } from 'type-fest';
|
||||
|
||||
@@ -373,6 +374,10 @@ export type UploadImageArg = {
|
||||
* Whether this is the first upload of a batch (used when displaying user feedback with toasts - ignored if the upload is silent)
|
||||
*/
|
||||
isFirstUploadOfBatch?: boolean;
|
||||
/**
|
||||
* If provided, the uploaded image will resized to the given dimensions.
|
||||
*/
|
||||
resize_to?: Dimensions;
|
||||
};
|
||||
|
||||
export type ImageUploadEntryResponse = S['ImageUploadEntry'];
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "5.12.0"
|
||||
__version__ = "5.13.0"
|
||||
|
||||
@@ -10,20 +10,20 @@ readme = { content-type = "text/markdown", file = "README.md" }
|
||||
keywords = ["stable-diffusion", "AI"]
|
||||
dynamic = ["version"]
|
||||
license = { file = "LICENSE" }
|
||||
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
||||
authors = [{ name = "Invoke", email = "support@invoke.ai" }]
|
||||
classifiers = [
|
||||
'Development Status :: 4 - Beta',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: GPU',
|
||||
'Environment :: GPU :: NVIDIA CUDA',
|
||||
'Environment :: MacOS X',
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Programming Language :: Python :: 3 :: Only',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Programming Language :: Python :: 3.12',
|
||||
'Topic :: Artistic Software',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
||||
@@ -35,7 +35,7 @@ dependencies = [
|
||||
# Core generation dependencies, pinned for reproducible builds.
|
||||
"accelerate",
|
||||
"bitsandbytes; sys_platform!='darwin'",
|
||||
"compel==2.0.2",
|
||||
"compel==2.1.1",
|
||||
"diffusers[torch]==0.33.0",
|
||||
"gguf",
|
||||
"invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids
|
||||
@@ -43,7 +43,7 @@ dependencies = [
|
||||
"numpy<2.0.0",
|
||||
"onnx==1.16.1",
|
||||
"onnxruntime==1.19.2",
|
||||
"opencv-python==4.9.0.80",
|
||||
"opencv-contrib-python",
|
||||
"safetensors",
|
||||
"sentencepiece",
|
||||
"spandrel",
|
||||
|
||||
@@ -38,6 +38,12 @@ echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show HEAD
|
||||
echo
|
||||
|
||||
# If the classifiers are invalid, publishing to PyPI will fail but the build will succeed.
|
||||
# It's a fast check, do it early.
|
||||
echo "Checking pyproject classifiers..."
|
||||
python3 ./check_classifiers.py ../pyproject.toml
|
||||
echo
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
|
||||
48
scripts/check_classifiers.py
Normal file
48
scripts/check_classifiers.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import re
|
||||
import sys
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
# This script checks the classifiers in a pyproject.toml file against the official Trove classifier list.
|
||||
# If the classifiers are invalid, PyPI will reject the package upload.
|
||||
|
||||
# Step 1: Get pyproject.toml path from args
|
||||
if len(sys.argv) != 2:
|
||||
print(f"Usage: {sys.argv[0]} path/to/pyproject.toml", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
pyproject_path = Path(sys.argv[1])
|
||||
if not pyproject_path.is_file():
|
||||
print(f"File not found: {pyproject_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Step 1: Download the official Trove classifier list
|
||||
url = "https://pypi.org/pypi?%3Aaction=list_classifiers"
|
||||
with urllib.request.urlopen(url) as response:
|
||||
trove_classifiers = {line.decode("utf-8").strip() for line in response}
|
||||
|
||||
# Step 2: Extract classifiers from pyproject.toml
|
||||
with open(pyproject_path) as f:
|
||||
content = f.read()
|
||||
|
||||
match = re.search(r"classifiers\s*=\s*\[([^\]]*)\]", content, re.MULTILINE | re.DOTALL)
|
||||
if not match:
|
||||
print("No 'classifiers' block found in pyproject.toml", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
raw_block = match.group(1)
|
||||
classifiers = [c.strip(" \"'\n") for c in raw_block.split(",") if c.strip()]
|
||||
|
||||
# Step 3: Check for invalid classifiers
|
||||
invalid = [c for c in classifiers if c not in trove_classifiers]
|
||||
|
||||
if invalid:
|
||||
print("❌ Invalid classifiers:")
|
||||
for c in invalid:
|
||||
print(f" - {c}")
|
||||
print("Valid classifiers:")
|
||||
for c in sorted(trove_classifiers):
|
||||
print(f" - {c}")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("✅ All classifiers are valid.")
|
||||
12
uv.lock
generated
12
uv.lock
generated
@@ -378,7 +378,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "compel"
|
||||
version = "2.0.2"
|
||||
version = "2.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "diffusers" },
|
||||
@@ -386,9 +386,9 @@ dependencies = [
|
||||
{ name = "torch" },
|
||||
{ name = "transformers" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/61/30/cd997f95b01a5e97b9e822d8b482acb8ac9adb744b1edefbe351d888edb5/compel-2.0.2.tar.gz", hash = "sha256:2e9de64b6ea5f9df59f8fae7ebad9a57d1f369dcc953c8645880a49bb19c2c7c", size = 40583 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c4/7a/b11be81d48456deab718e7c1216d68793cede4bba501997f30033cfc8059/compel-2.1.1.tar.gz", hash = "sha256:9a201819723193a0b3ef5c090a150e64b4ef5e01715262dce723ebd32b22df7c", size = 42259 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/5a/18c7d44406994c31a61af1560d0cdbd6e5c27b2fb7cb6146c2f19e40ec25/compel-2.0.2-py3-none-any.whl", hash = "sha256:4f6ddcb98f53c0a96b9083525d4578e8c3cc0310299c8a4187c50b922cc89947", size = 30073 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/c1/0b8e6950d8c82a962172ad608fdfa2e7e402e12664c8319a58578096c640/compel-2.1.1-py3-none-any.whl", hash = "sha256:576bf2760f54654a6fb1262b3ce632986b9940150d4b0c59ca0cf2ef6e6265a7", size = 31101 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -979,7 +979,7 @@ dependencies = [
|
||||
{ name = "numpy" },
|
||||
{ name = "onnx" },
|
||||
{ name = "onnxruntime" },
|
||||
{ name = "opencv-python" },
|
||||
{ name = "opencv-contrib-python" },
|
||||
{ name = "picklescan" },
|
||||
{ name = "pillow" },
|
||||
{ name = "prompt-toolkit" },
|
||||
@@ -1050,7 +1050,7 @@ requires-dist = [
|
||||
{ name = "accelerate" },
|
||||
{ name = "bitsandbytes", marker = "sys_platform != 'darwin'" },
|
||||
{ name = "blake3" },
|
||||
{ name = "compel", specifier = "==2.0.2" },
|
||||
{ name = "compel", specifier = "==2.1.1" },
|
||||
{ name = "deprecated" },
|
||||
{ name = "diffusers", extras = ["torch"], specifier = "==0.33.0" },
|
||||
{ name = "dnspython" },
|
||||
@@ -1077,7 +1077,7 @@ requires-dist = [
|
||||
{ name = "onnxruntime", marker = "extra == 'onnx'" },
|
||||
{ name = "onnxruntime-directml", marker = "extra == 'onnx-directml'" },
|
||||
{ name = "onnxruntime-gpu", marker = "extra == 'onnx-cuda'" },
|
||||
{ name = "opencv-python", specifier = "==4.9.0.80" },
|
||||
{ name = "opencv-contrib-python" },
|
||||
{ name = "picklescan" },
|
||||
{ name = "pillow" },
|
||||
{ name = "pip-tools", marker = "extra == 'dist'" },
|
||||
|
||||
Reference in New Issue
Block a user