mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 09:18:00 -05:00
Compare commits
121 Commits
v5.12.0rc2
...
saas-targe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
649596cec5 | ||
|
|
45aa84c01a | ||
|
|
064d5787c9 | ||
|
|
d81b23adff | ||
|
|
c72480fd1b | ||
|
|
3704573ef8 | ||
|
|
01fbf2ce4d | ||
|
|
96e7003449 | ||
|
|
80197b8856 | ||
|
|
0187bc671e | ||
|
|
31584daabe | ||
|
|
a6cb522fed | ||
|
|
f70be1e415 | ||
|
|
a2901f2b46 | ||
|
|
b61c66c3a9 | ||
|
|
c77f9ec202 | ||
|
|
2c5c35647f | ||
|
|
bf0fdbd10e | ||
|
|
731d317a42 | ||
|
|
e81579f752 | ||
|
|
9a10e98c0b | ||
|
|
27fdc139b7 | ||
|
|
0a00805afc | ||
|
|
7b38143fbd | ||
|
|
4c5ad1b7d7 | ||
|
|
d80cc962ad | ||
|
|
7ccabfa200 | ||
|
|
936d59cc52 | ||
|
|
fc16fb6099 | ||
|
|
c848cbc2e3 | ||
|
|
66fd0f0d8a | ||
|
|
c266f39f06 | ||
|
|
98a44fa4d7 | ||
|
|
c1d230f961 | ||
|
|
68108435ae | ||
|
|
e121bf1f62 | ||
|
|
4835c344b3 | ||
|
|
a589dec122 | ||
|
|
bc67d5c841 | ||
|
|
f3d5691c04 | ||
|
|
b98abc2457 | ||
|
|
7e527ccfb7 | ||
|
|
0f0c911845 | ||
|
|
e4818b967b | ||
|
|
ce3eede26f | ||
|
|
d98725c5e9 | ||
|
|
31a96d2945 | ||
|
|
845a321a43 | ||
|
|
87a44a28ef | ||
|
|
d5b9c3ee5a | ||
|
|
91db136cd1 | ||
|
|
f351ad4b66 | ||
|
|
fb6fb9abbd | ||
|
|
675c990486 | ||
|
|
6ee5cde4bb | ||
|
|
c8077f9430 | ||
|
|
6aabe9959e | ||
|
|
0b58d172d2 | ||
|
|
d7c6e293d7 | ||
|
|
c600bc867d | ||
|
|
f4140dd772 | ||
|
|
a2d8261d40 | ||
|
|
bce88a8873 | ||
|
|
b37e1a3ad6 | ||
|
|
35a088e0a6 | ||
|
|
b936cab039 | ||
|
|
34e4093408 | ||
|
|
d7f93c3cc0 | ||
|
|
d4c4926caa | ||
|
|
558c7db055 | ||
|
|
2ece59b51b | ||
|
|
7dbe39957c | ||
|
|
6fa46d35a5 | ||
|
|
b2a2b38ea8 | ||
|
|
12934da390 | ||
|
|
231bc18188 | ||
|
|
530cd180c5 | ||
|
|
2a92e7b920 | ||
|
|
019e057e29 | ||
|
|
9aa26f883e | ||
|
|
3f727e24b1 | ||
|
|
9e90bf1b20 | ||
|
|
db3964797f | ||
|
|
881efbda1b | ||
|
|
e9ce2ed5f2 | ||
|
|
53ac9eafbf | ||
|
|
9e095006a5 | ||
|
|
21b24c3ba6 | ||
|
|
139ecc10ce | ||
|
|
78ea143b46 | ||
|
|
174249ec15 | ||
|
|
2510ad7431 | ||
|
|
ba5e855a60 | ||
|
|
23627cf18d | ||
|
|
5e20c9a1ca | ||
|
|
933cf5f276 | ||
|
|
41316de659 | ||
|
|
041ccfd68e | ||
|
|
ad24c203a4 | ||
|
|
3fd28ce600 | ||
|
|
32df3bdf6e | ||
|
|
ba69e89e8c | ||
|
|
a8e0c48ddc | ||
|
|
66f6571086 | ||
|
|
8a3848e7b6 | ||
|
|
3f8486b480 | ||
|
|
b80be4f639 | ||
|
|
adb3a849b9 | ||
|
|
798499fda6 | ||
|
|
02fc5a165c | ||
|
|
b1b8edecfb | ||
|
|
3cd8d48809 | ||
|
|
f4672ad8c1 | ||
|
|
5a86490845 | ||
|
|
27dc843046 | ||
|
|
2f35d74902 | ||
|
|
8bd52ed744 | ||
|
|
f3e2a3c384 | ||
|
|
ecc6e8a532 | ||
|
|
9170576a38 | ||
|
|
f26baa0341 |
29
.github/CODEOWNERS
vendored
29
.github/CODEOWNERS
vendored
@@ -1,32 +1,31 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku @psychedelicious
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
|
||||
# nodes
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @brandonrising @hipsterusername @jazzhaiku
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @hipsterusername @jazzhaiku
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
/pyproject.toml @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @psychedelicious @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @psychedelicious @hipsterusername
|
||||
/installer/ @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/configs @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
|
||||
# generation, model management, postprocessing
|
||||
/invokeai/backend @lstein @blessedcoolant @brandonrising @hipsterusername @jazzhaiku
|
||||
/invokeai/backend @lstein @blessedcoolant @hipsterusername @jazzhaiku @psychedelicious @maryhipp
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/CLI @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
4
.github/workflows/python-checks.yml
vendored
4
.github/workflows/python-checks.yml
vendored
@@ -67,6 +67,10 @@ jobs:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
|
||||
- name: check pypi classifiers
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||
|
||||
@@ -71,7 +71,14 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
|
||||
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||
|
||||
=== "Invoke v5.10.0 and later"
|
||||
=== "Invoke v5.12 and later"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
|
||||
@@ -13,6 +13,7 @@ If you'd prefer, you can also just download the whole node folder from the linke
|
||||
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
- Community Nodes
|
||||
+ [Anamorphic Tools](#anamorphic-tools)
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
@@ -20,9 +21,12 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
+ [Clothing Mask](#clothing-mask)
|
||||
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||
+ [Curves](#curves)
|
||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||
+ [Enhance Detail](#enhance-detail)
|
||||
+ [Film Grain](#film-grain)
|
||||
+ [Flip Pose](#flip-pose)
|
||||
+ [Flux Ideal Size](#flux-ideal-size)
|
||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
@@ -61,6 +65,13 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
- [Help](#help)
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Anamorphic Tools
|
||||
|
||||
**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/anamorphic-tools
|
||||
|
||||
--------------------------------
|
||||
### Adapters Linked Nodes
|
||||
|
||||
@@ -132,6 +143,13 @@ Node Link: https://github.com/VeyDlin/clahe-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Curves
|
||||
|
||||
**Description:** Adjust an image's curve based on a user-defined string.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/curves-node
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
@@ -162,6 +180,20 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||
|
||||
--------------------------------
|
||||
### Flip Pose
|
||||
|
||||
**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flip-pose-node
|
||||
|
||||
--------------------------------
|
||||
### Flux Ideal Size
|
||||
|
||||
**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flux-ideal-size
|
||||
|
||||
--------------------------------
|
||||
### Generative Grammar-Based Prompt Nodes
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from importlib.metadata import distributions
|
||||
from pathlib import Path
|
||||
from platform import python_version
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
accelerate: str = Field(description="accelerate version")
|
||||
compel: str = Field(description="compel version")
|
||||
cuda: Optional[str] = Field(description="CUDA version")
|
||||
diffusers: str = Field(description="diffusers version")
|
||||
numpy: str = Field(description="Numpy version")
|
||||
opencv: str = Field(description="OpenCV version")
|
||||
onnx: str = Field(description="ONNX version")
|
||||
pillow: str = Field(description="Pillow (PIL) version")
|
||||
python: str = Field(description="Python version")
|
||||
torch: str = Field(description="PyTorch version")
|
||||
torchvision: str = Field(description="PyTorch Vision version")
|
||||
transformers: str = Field(description="transformers version")
|
||||
xformers: Optional[str] = Field(description="xformers version")
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
|
||||
async def get_app_deps() -> AppDependencyVersions:
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
|
||||
async def get_app_deps() -> dict[str, str]:
|
||||
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
|
||||
try:
|
||||
xformers = version("xformers")
|
||||
except PackageNotFoundError:
|
||||
xformers = None
|
||||
return AppDependencyVersions(
|
||||
accelerate=version("accelerate"),
|
||||
compel=version("compel"),
|
||||
cuda=torch.version.cuda,
|
||||
diffusers=version("diffusers"),
|
||||
numpy=version("numpy"),
|
||||
opencv=version("opencv-python"),
|
||||
onnx=version("onnx"),
|
||||
pillow=version("pillow"),
|
||||
python=python_version(),
|
||||
torch=torch.version.__version__,
|
||||
torchvision=version("torchvision"),
|
||||
transformers=version("transformers"),
|
||||
xformers=xformers,
|
||||
)
|
||||
cuda = torch.version.cuda or "N/A"
|
||||
except Exception:
|
||||
cuda = "N/A"
|
||||
|
||||
deps["CUDA"] = cuda
|
||||
|
||||
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
|
||||
|
||||
return sorted_deps
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
|
||||
@@ -146,7 +146,7 @@ async def list_boards(
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
board_id: str = Path(description="The id of the board or 'none' for uncategorized images"),
|
||||
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import Optional
|
||||
from typing import ClassVar, Optional
|
||||
|
||||
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||
@@ -19,6 +20,8 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
|
||||
@@ -27,6 +30,19 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
class ResizeToDimensions(BaseModel):
|
||||
width: int = Field(..., gt=0)
|
||||
height: int = Field(..., gt=0)
|
||||
|
||||
MAX_SIZE: ClassVar[int] = 4096 * 4096
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_total_output_size(self):
|
||||
if self.width * self.height > self.MAX_SIZE:
|
||||
raise ValueError(f"Max total output size for resizing is {self.MAX_SIZE} pixels")
|
||||
return self
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/upload",
|
||||
operation_id="upload_image",
|
||||
@@ -46,6 +62,11 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
resize_to: Optional[str] = Body(
|
||||
default=None,
|
||||
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
|
||||
example='"[1024,1024]"',
|
||||
),
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||
@@ -59,13 +80,31 @@ async def upload_image(
|
||||
contents = await file.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
if crop_visible:
|
||||
try:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to crop image")
|
||||
|
||||
if resize_to:
|
||||
try:
|
||||
dims = json.loads(resize_to)
|
||||
resize_dims = ResizeToDimensions(**dims)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
|
||||
try:
|
||||
np_image = pil_to_np(pil_image)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to resize image")
|
||||
|
||||
extracted_metadata = extract_metadata_from_image(
|
||||
pil_image=pil_image,
|
||||
invokeai_metadata_override=metadata,
|
||||
@@ -356,6 +395,29 @@ async def delete_images_from_list(
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
@images_router.delete(
|
||||
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
|
||||
)
|
||||
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
|
||||
"""Deletes all images that are uncategorized"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id="none", categories=None, is_intermediate=None
|
||||
)
|
||||
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@@ -582,6 +582,8 @@ def invocation(
|
||||
|
||||
fields: dict[str, tuple[Any, FieldInfo]] = {}
|
||||
|
||||
original_model_fields: dict[str, OriginalModelField] = {}
|
||||
|
||||
for field_name, field_info in cls.model_fields.items():
|
||||
annotation = field_info.annotation
|
||||
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
|
||||
@@ -589,7 +591,7 @@ def invocation(
|
||||
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
|
||||
)
|
||||
|
||||
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
|
||||
validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info)
|
||||
|
||||
@@ -643,6 +645,16 @@ def invocation(
|
||||
|
||||
fields["type"] = (invocation_type_annotation, invocation_type_field_info)
|
||||
|
||||
# Invocation outputs must be registered using the @invocation_output decorator, but it is possible that the
|
||||
# output is registered _after_ this invocation is registered. It depends on module import ordering.
|
||||
#
|
||||
# We can only confirm the output for an invocation is registered after all modules are imported. There's
|
||||
# only really one good time to do that - during application startup, in `run_app.py`, after loading all
|
||||
# custom nodes.
|
||||
#
|
||||
# We can still do some basic validation here - ensure the invoke method is defined and returns an instance
|
||||
# of BaseInvocationOutput.
|
||||
|
||||
# Validate the `invoke()` method is implemented
|
||||
if "invoke" in cls.__abstractmethods__:
|
||||
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
|
||||
@@ -666,6 +678,7 @@ def invocation(
|
||||
docstring = cls.__doc__
|
||||
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore
|
||||
new_class.__doc__ = docstring
|
||||
new_class._original_model_fields = original_model_fields
|
||||
|
||||
InvocationRegistry.register_invocation(new_class)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
import torch
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel import Compel, ReturnedEmbeddingsType, SplitLongTextMode
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
|
||||
@@ -104,6 +104,7 @@ class CompelInvocation(BaseInvocation):
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
@@ -205,6 +206,7 @@ class SDXLPromptInvocationBase:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
|
||||
@@ -22,7 +22,11 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.app.util.controlnet_utils import (
|
||||
CONTROLNET_MODE_VALUES,
|
||||
CONTROLNET_RESIZE_VALUES,
|
||||
heuristic_resize_fast,
|
||||
)
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
|
||||
@@ -109,7 +113,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
@@ -122,7 +126,7 @@ class HeuristicResizeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
np_resized = heuristic_resize_fast(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image, ImageFilter
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
@@ -42,15 +44,13 @@ class GradientMaskOutput(BaseInvocationOutput):
|
||||
title="Create Gradient Mask",
|
||||
tags=["mask", "denoise"],
|
||||
category="latents",
|
||||
version="1.2.1",
|
||||
version="1.3.0",
|
||||
)
|
||||
class CreateGradientMaskInvocation(BaseInvocation):
|
||||
"""Creates mask for denoising model run."""
|
||||
"""Creates mask for denoising."""
|
||||
|
||||
mask: ImageField = InputField(description="Image which will be masked", ui_order=1)
|
||||
edge_radius: int = InputField(
|
||||
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
|
||||
)
|
||||
edge_radius: int = InputField(default=16, ge=0, description="How far to expand the edges of the mask", ui_order=2)
|
||||
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
|
||||
minimum_denoise: float = InputField(
|
||||
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
|
||||
@@ -81,45 +81,110 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
# Resize the mask_image. Makes the filter 64x faster and doesn't hurt quality in latent scale anyway
|
||||
mask_image = mask_image.resize(
|
||||
(
|
||||
mask_image.width // LATENT_SCALE_FACTOR,
|
||||
mask_image.height // LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.BILINEAR,
|
||||
)
|
||||
|
||||
mask_np_orig = np.array(mask_image, dtype=np.float32)
|
||||
|
||||
self.edge_radius = self.edge_radius // LATENT_SCALE_FACTOR # scale the edge radius to match the mask size
|
||||
|
||||
if self.edge_radius > 0:
|
||||
mask_np = 255 - mask_np_orig # invert so 0 is unmasked (higher values = higher denoise strength)
|
||||
dilated_mask = mask_np.copy()
|
||||
|
||||
# Create kernel based on coherence mode
|
||||
if self.coherence_mode == "Box Blur":
|
||||
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
|
||||
else: # Gaussian Blur OR Staged
|
||||
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
|
||||
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
|
||||
# Create a circular distance kernel that fades from center outward
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
center = self.edge_radius
|
||||
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist <= self.edge_radius:
|
||||
kernel[i, j] = 1.0 - (dist / self.edge_radius)
|
||||
else: # Gaussian Blur or Staged
|
||||
# Create a Gaussian kernel
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
kernel = cv2.getGaussianKernel(
|
||||
kernel_size, self.edge_radius / 2.5
|
||||
) # 2.5 is a magic number (standard deviation capturing)
|
||||
kernel = kernel * kernel.T # Make 2D gaussian kernel
|
||||
kernel = kernel / np.max(kernel) # Normalize center to 1.0
|
||||
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
|
||||
# Ensure values outside radius are 0
|
||||
center = self.edge_radius
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist > self.edge_radius:
|
||||
kernel[i, j] = 0
|
||||
|
||||
# redistribute blur so that the original edges are 0 and blur outwards to 1
|
||||
blur_tensor = (blur_tensor - 0.5) * 2
|
||||
blur_tensor[blur_tensor < 0] = 0.0
|
||||
# 2D max filter
|
||||
mask_tensor = torch.tensor(mask_np)
|
||||
kernel_tensor = torch.tensor(kernel)
|
||||
dilated_mask = 255 - self.max_filter2D_torch(mask_tensor, kernel_tensor).cpu()
|
||||
dilated_mask = dilated_mask.numpy()
|
||||
|
||||
threshold = 1 - self.minimum_denoise
|
||||
threshold = (1 - self.minimum_denoise) * 255
|
||||
|
||||
if self.coherence_mode == "Staged":
|
||||
# wherever the blur_tensor is less than fully masked, convert it to threshold
|
||||
blur_tensor = torch.where((blur_tensor < 1) & (blur_tensor > 0), threshold, blur_tensor)
|
||||
else:
|
||||
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
|
||||
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
|
||||
# wherever expanded mask is darker than the original mask but original was above threshhold, set it to the threshold
|
||||
# makes any expansion areas drop to threshhold. Raising minimum across the image happen outside of this if
|
||||
threshold_mask = (dilated_mask < mask_np_orig) & (mask_np_orig > threshold)
|
||||
dilated_mask = np.where(threshold_mask, threshold, mask_np_orig)
|
||||
|
||||
# wherever expanded mask is less than 255 but greater than threshold, drop it to threshold (minimum denoise)
|
||||
threshold_mask = (dilated_mask > threshold) & (dilated_mask < 255)
|
||||
dilated_mask = np.where(threshold_mask, threshold, dilated_mask)
|
||||
|
||||
else:
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
dilated_mask = mask_np_orig.copy()
|
||||
|
||||
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
|
||||
# convert to tensor
|
||||
dilated_mask = np.clip(dilated_mask, 0, 255).astype(np.uint8)
|
||||
mask_tensor = torch.tensor(dilated_mask, device=torch.device("cpu"))
|
||||
|
||||
# compute a [0, 1] mask from the blur_tensor
|
||||
expanded_mask = torch.where((blur_tensor < 1), 0, 1)
|
||||
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
|
||||
# binary mask for compositing
|
||||
expanded_mask = np.where((dilated_mask < 255), 0, 255)
|
||||
expanded_mask_image = Image.fromarray(expanded_mask.astype(np.uint8), mode="L")
|
||||
expanded_mask_image = expanded_mask_image.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
expanded_image_dto = context.images.save(expanded_mask_image)
|
||||
|
||||
# restore the original mask size
|
||||
dilated_mask = Image.fromarray(dilated_mask.astype(np.uint8))
|
||||
dilated_mask = dilated_mask.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
|
||||
# stack the mask as a tensor, repeating 4 times on dimmension 1
|
||||
dilated_mask_tensor = image_resized_to_grid_as_tensor(dilated_mask, normalize=False)
|
||||
mask_name = context.tensors.save(tensor=dilated_mask_tensor.unsqueeze(0))
|
||||
|
||||
masked_latents_name = None
|
||||
if self.unet is not None and self.vae is not None and self.image is not None:
|
||||
# all three fields must be present at the same time
|
||||
main_model_config = context.models.get_config(self.unet.unet.key)
|
||||
assert isinstance(main_model_config, MainConfigBase)
|
||||
if main_model_config.variant is ModelVariantType.Inpaint:
|
||||
mask = blur_tensor
|
||||
mask = dilated_mask_tensor
|
||||
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
@@ -137,3 +202,29 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
|
||||
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
|
||||
)
|
||||
|
||||
def max_filter2D_torch(self, image: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
This morphological operation is much faster in torch than numpy or opencv
|
||||
For reasonable kernel sizes, the overhead of copying the data to the GPU is not worth it.
|
||||
"""
|
||||
h, w = kernel.shape
|
||||
pad_h, pad_w = h // 2, w // 2
|
||||
|
||||
padded = torch.nn.functional.pad(image, (pad_w, pad_w, pad_h, pad_h), mode="constant", value=0)
|
||||
result = torch.zeros_like(image)
|
||||
|
||||
# This looks like it's inside out, but it does the same thing and is more efficient
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
weight = kernel[i, j]
|
||||
if weight <= 0:
|
||||
continue
|
||||
|
||||
# Extract the region from padded tensor
|
||||
region = padded[i : i + image.shape[0], j : j + image.shape[1]]
|
||||
|
||||
# Apply weight and update max
|
||||
result = torch.maximum(result, region * weight)
|
||||
|
||||
return result
|
||||
|
||||
@@ -62,7 +62,9 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
FluxReduxModel = "FluxReduxModelField"
|
||||
LlavaOnevisionModel = "LLaVAModelField"
|
||||
Imagen3Model = "Imagen3ModelField"
|
||||
Imagen4Model = "Imagen4ModelField"
|
||||
ChatGPT4oModel = "ChatGPT4oModelField"
|
||||
FluxKontextModel = "FluxKontextModelField"
|
||||
# endregion
|
||||
|
||||
# region Misc Field Types
|
||||
|
||||
@@ -1218,12 +1218,15 @@ class ApplyMaskToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Add Image Noise",
|
||||
tags=["image", "noise"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.0",
|
||||
)
|
||||
class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add noise to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to add noise to")
|
||||
mask: Optional[ImageField] = InputField(
|
||||
default=None, description="Optional mask determining where to apply noise (black=noise, white=no noise)"
|
||||
)
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
@@ -1267,12 +1270,27 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise = Image.fromarray(noise.astype(numpy.uint8), mode="RGB").resize(
|
||||
(image.width, image.height), Image.Resampling.NEAREST
|
||||
)
|
||||
|
||||
# Create a noisy version of the input image
|
||||
noisy_image = Image.blend(image.convert("RGB"), noise, self.amount).convert("RGBA")
|
||||
|
||||
# Paste back the alpha channel
|
||||
noisy_image.putalpha(alpha)
|
||||
# Apply mask if provided
|
||||
if self.mask is not None:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
image_dto = context.images.save(image=noisy_image)
|
||||
if mask_image.size != image.size:
|
||||
mask_image = mask_image.resize(image.size, Image.Resampling.LANCZOS)
|
||||
|
||||
result_image = image.copy()
|
||||
mask_image = ImageOps.invert(mask_image)
|
||||
result_image.paste(noisy_image, (0, 0), mask=mask_image)
|
||||
else:
|
||||
result_image = noisy_image
|
||||
|
||||
# Paste back the alpha channel from the original image
|
||||
result_image.putalpha(alpha)
|
||||
|
||||
image_dto = context.images.save(image=result_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@@ -42,7 +42,9 @@ class IPAdapterMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
clip_vision_model: Literal["ViT-L", "ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
|
||||
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = Field(
|
||||
description="Method to apply IP Weights with"
|
||||
)
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")
|
||||
|
||||
@@ -6,7 +6,7 @@ import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from transformers import AutoModelForMaskGeneration, AutoProcessor
|
||||
from transformers import AutoProcessor
|
||||
from transformers.models.sam import SamModel
|
||||
from transformers.models.sam.processing_sam import SamProcessor
|
||||
|
||||
@@ -104,14 +104,13 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
|
||||
@staticmethod
|
||||
def _load_sam_model(model_path: Path):
|
||||
sam_model = AutoModelForMaskGeneration.from_pretrained(
|
||||
sam_model = SamModel.from_pretrained(
|
||||
model_path,
|
||||
local_files_only=True,
|
||||
# TODO(ryand): Setting the torch_dtype here doesn't work. Investigate whether fp16 is supported by the
|
||||
# model, and figure out how to make it work in the pipeline.
|
||||
# torch_dtype=TorchDevice.choose_torch_dtype(),
|
||||
)
|
||||
assert isinstance(sam_model, SamModel)
|
||||
|
||||
sam_processor = AutoProcessor.from_pretrained(model_path, local_files_only=True)
|
||||
assert isinstance(sam_processor, SamProcessor)
|
||||
|
||||
@@ -1,12 +1,3 @@
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
|
||||
def get_app():
|
||||
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
|
||||
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
|
||||
@@ -18,9 +9,18 @@ def get_app():
|
||||
|
||||
def run_app() -> None:
|
||||
"""The main entrypoint for the app."""
|
||||
# Parse the CLI arguments.
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
# Parse the CLI arguments before doing anything else, which ensures CLI args correctly override settings from other
|
||||
# sources like `invokeai.yaml` or env vars.
|
||||
InvokeAIArgs.parse_args()
|
||||
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# Load config.
|
||||
app_config = get_config()
|
||||
|
||||
@@ -32,6 +32,8 @@ def run_app() -> None:
|
||||
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
|
||||
|
||||
# This import must happen after configure_torch_cuda_allocator() is called, because the module imports torch.
|
||||
from invokeai.app.invocations.baseinvocation import InvocationRegistry
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
@@ -66,6 +68,15 @@ def run_app() -> None:
|
||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
|
||||
|
||||
# Check all invocations and ensure their outputs are registered.
|
||||
for invocation in InvocationRegistry.get_invocation_classes():
|
||||
invocation_type = invocation.get_type()
|
||||
output_annotation = invocation.get_output_annotation()
|
||||
if output_annotation not in InvocationRegistry.get_output_classes():
|
||||
logger.warning(
|
||||
f'Invocation "{invocation_type}" has unregistered output class "{output_annotation.__name__}"'
|
||||
)
|
||||
|
||||
if app_config.dev_reload:
|
||||
# load_custom_nodes seems to bypass jurrigged's import sniffer, so be sure to call it *after* they're already
|
||||
# imported.
|
||||
|
||||
@@ -98,9 +98,18 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Handle board_id filter
|
||||
if board_id == "none":
|
||||
stmt += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
else:
|
||||
stmt += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
|
||||
@@ -205,6 +205,7 @@ class FieldIdentifier(BaseModel):
|
||||
kind: Literal["input", "output"] = Field(description="The kind of field")
|
||||
node_id: str = Field(description="The ID of the node")
|
||||
field_name: str = Field(description="The name of the field")
|
||||
user_label: str | None = Field(description="The user label of the field, if any")
|
||||
|
||||
|
||||
class SessionQueueItemWithoutGraph(BaseModel):
|
||||
|
||||
@@ -230,6 +230,86 @@ def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.
|
||||
return resized
|
||||
|
||||
|
||||
# precompute common kernels
|
||||
_KERNEL3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
||||
# directional masks for NMS
|
||||
_DIRS = [
|
||||
np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], np.uint8),
|
||||
np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], np.uint8),
|
||||
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], np.uint8),
|
||||
np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], np.uint8),
|
||||
]
|
||||
|
||||
|
||||
def heuristic_resize_fast(np_img: np.ndarray, size: tuple[int, int]) -> np.ndarray:
|
||||
h, w = np_img.shape[:2]
|
||||
# early exit
|
||||
if (w, h) == size:
|
||||
return np_img
|
||||
|
||||
# separate alpha channel
|
||||
img = np_img
|
||||
alpha = None
|
||||
if img.ndim == 3 and img.shape[2] == 4:
|
||||
alpha, img = img[:, :, 3], img[:, :, :3]
|
||||
|
||||
# build small sample for unique‐color & binary detection
|
||||
flat = img.reshape(-1, img.shape[-1])
|
||||
N = flat.shape[0]
|
||||
# include four corners to avoid missing extreme values
|
||||
corners = np.vstack([img[0, 0], img[0, w - 1], img[h - 1, 0], img[h - 1, w - 1]])
|
||||
cnt = min(N, 100_000)
|
||||
samp = np.vstack([corners, flat[np.random.choice(N, cnt, replace=False)]])
|
||||
uc = np.unique(samp, axis=0).shape[0]
|
||||
vmin, vmax = samp.min(), samp.max()
|
||||
|
||||
# detect binary edge map & one‐pixel‐edge case
|
||||
is_binary = uc == 2 and vmin < 16 and vmax > 240
|
||||
one_pixel_edge = False
|
||||
if is_binary:
|
||||
# single gray conversion
|
||||
gray0 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
grad = cv2.morphologyEx(gray0, cv2.MORPH_GRADIENT, _KERNEL3)
|
||||
cnt_edge = cv2.countNonZero(grad)
|
||||
cnt_all = cv2.countNonZero((gray0 > 127).astype(np.uint8))
|
||||
one_pixel_edge = (2 * cnt_edge) > cnt_all
|
||||
|
||||
# choose interp for color/seg/grayscale
|
||||
area_new, area_old = size[0] * size[1], w * h
|
||||
if 2 < uc < 200: # segmentation map
|
||||
interp = cv2.INTER_NEAREST
|
||||
elif area_new < area_old:
|
||||
interp = cv2.INTER_AREA
|
||||
else:
|
||||
interp = cv2.INTER_CUBIC
|
||||
|
||||
# single resize pass on RGB
|
||||
resized = cv2.resize(img, size, interpolation=interp)
|
||||
|
||||
if is_binary:
|
||||
# convert to gray & apply NMS via C++ dilate
|
||||
gray_r = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
||||
nms = np.zeros_like(gray_r)
|
||||
for K in _DIRS:
|
||||
d = cv2.dilate(gray_r, K)
|
||||
mask = d == gray_r
|
||||
nms[mask] = gray_r[mask]
|
||||
|
||||
# threshold + thinning if needed
|
||||
_, bw = cv2.threshold(nms, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
out_bin = cv2.ximgproc.thinning(bw) if one_pixel_edge else bw
|
||||
# restore 3 channels
|
||||
resized = np.stack([out_bin] * 3, axis=2)
|
||||
|
||||
# restore alpha with same interp as RGB for consistency
|
||||
if alpha is not None:
|
||||
am = cv2.resize(alpha, size, interpolation=interp)
|
||||
am = (am > 127).astype(np.uint8) * 255
|
||||
resized = np.dstack((resized, am))
|
||||
|
||||
return resized
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
@@ -244,7 +324,7 @@ def np_img_resize(
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = heuristic_resize(np_img, (w, h))
|
||||
np_img = heuristic_resize_fast(np_img, (w, h))
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
@@ -265,7 +345,7 @@ def np_img_resize(
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
@@ -275,7 +355,7 @@ def np_img_resize(
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
|
||||
@@ -12,6 +12,9 @@ from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFie
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None:
|
||||
|
||||
@@ -62,11 +62,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
# If this too fails, raise exception.
|
||||
|
||||
model_info = None
|
||||
|
||||
# Handling for our special syntax - we only want the base HF `org/repo` here.
|
||||
repo_id = id.split("::")[0] or id
|
||||
while not model_info:
|
||||
try:
|
||||
model_info = HfApi().model_info(repo_id=id, files_metadata=True, revision=variant)
|
||||
model_info = HfApi().model_info(repo_id=repo_id, files_metadata=True, revision=variant)
|
||||
except RepositoryNotFoundError as excp:
|
||||
raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp
|
||||
raise UnknownMetadataException(f"'{repo_id}' not found. See trace for details.") from excp
|
||||
except RevisionNotFoundError:
|
||||
if variant is None:
|
||||
raise
|
||||
@@ -75,14 +78,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
|
||||
files: list[RemoteModelFile] = []
|
||||
|
||||
_, name = id.split("/")
|
||||
_, name = repo_id.split("/")
|
||||
|
||||
for s in model_info.siblings or []:
|
||||
assert s.rfilename is not None
|
||||
assert s.size is not None
|
||||
files.append(
|
||||
RemoteModelFile(
|
||||
url=hf_hub_url(id, s.rfilename, revision=variant or "main"),
|
||||
url=hf_hub_url(repo_id, s.rfilename, revision=variant or "main"),
|
||||
path=Path(name, s.rfilename),
|
||||
size=s.size,
|
||||
sha256=s.lfs.get("sha256") if s.lfs else None,
|
||||
|
||||
@@ -27,7 +27,9 @@ class BaseModelType(str, Enum):
|
||||
Flux = "flux"
|
||||
CogView4 = "cogview4"
|
||||
Imagen3 = "imagen3"
|
||||
Imagen4 = "imagen4"
|
||||
ChatGPT4o = "chatgpt-4o"
|
||||
FluxKontext = "flux-kontext"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
from typing import Dict, Iterable, Optional, Tuple
|
||||
|
||||
@@ -7,6 +8,7 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
@@ -23,6 +25,7 @@ class LayerPatcher:
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
force_direct_patching: bool = False,
|
||||
force_sidecar_patching: bool = False,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply 'smart' model patching that chooses whether to use direct patching or a sidecar wrapper for each
|
||||
module.
|
||||
@@ -44,6 +47,7 @@ class LayerPatcher:
|
||||
dtype=dtype,
|
||||
force_direct_patching=force_direct_patching,
|
||||
force_sidecar_patching=force_sidecar_patching,
|
||||
suppress_warning_layers=suppress_warning_layers,
|
||||
)
|
||||
|
||||
yield
|
||||
@@ -70,6 +74,7 @@ class LayerPatcher:
|
||||
dtype: torch.dtype,
|
||||
force_direct_patching: bool,
|
||||
force_sidecar_patching: bool,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply a single LoRA patch to a model using the 'smart' patching strategy that chooses whether to use direct
|
||||
patching or a sidecar wrapper for each module.
|
||||
@@ -89,9 +94,17 @@ class LayerPatcher:
|
||||
if not layer_key.startswith(prefix):
|
||||
continue
|
||||
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
try:
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
except AttributeError:
|
||||
if suppress_warning_layers and suppress_warning_layers.search(layer_key):
|
||||
pass
|
||||
else:
|
||||
logger = InvokeAILogger.get_logger(LayerPatcher.__name__)
|
||||
logger.warning("Failed to find module for LoRA layer key: %s", layer_key)
|
||||
continue
|
||||
|
||||
# Decide whether to use direct patching or a sidecar patch.
|
||||
# Direct patching is preferred, because it results in better runtime speed.
|
||||
|
||||
@@ -5,7 +5,8 @@ from typing import Callable, Optional, Union
|
||||
import gguf
|
||||
import torch
|
||||
|
||||
TORCH_COMPATIBLE_QTYPES = {None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16}
|
||||
# should not be a Set until this is resolved: https://github.com/pytorch/pytorch/issues/145761
|
||||
TORCH_COMPATIBLE_QTYPES = [None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16]
|
||||
|
||||
# K Quants #
|
||||
QK_K = 256
|
||||
|
||||
@@ -30,18 +30,13 @@ class RectifiedFlowInpaintExtension:
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region.
|
||||
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
mask = torch.where(self._inpaint_mask >= t_prev + eps, 1.0, 0.0).to(
|
||||
dtype=self._inpaint_mask.dtype, device=self._inpaint_mask.device
|
||||
)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ const config: KnipConfig = {
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
// This feature is (temprarily?) disabled
|
||||
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -24,15 +24,18 @@
|
||||
"autoAddBoard": "Auto-Add Board",
|
||||
"boards": "Boards",
|
||||
"selectedForAutoAdd": "Selected for Auto-Add",
|
||||
"bottomMessage": "Deleting this board and its images will reset any features currently using them.",
|
||||
"bottomMessage": "Deleting images will reset any features currently using them.",
|
||||
"cancel": "Cancel",
|
||||
"changeBoard": "Change Board",
|
||||
"clearSearch": "Clear Search",
|
||||
"deleteBoard": "Delete Board",
|
||||
"deleteBoardAndImages": "Delete Board and Images",
|
||||
"deleteBoardOnly": "Delete Board Only",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"uncategorizedImages": "Uncategorized Images",
|
||||
"deleteAllUncategorizedImages": "Delete All Uncategorized Images",
|
||||
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
|
||||
"hideBoards": "Hide Boards",
|
||||
"loading": "Loading...",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
@@ -46,7 +49,7 @@
|
||||
"searchBoard": "Search Boards...",
|
||||
"selectBoard": "Select a Board",
|
||||
"shared": "Shared Boards",
|
||||
"topMessage": "This board contains images used in the following features:",
|
||||
"topMessage": "This selection contains images used in the following features:",
|
||||
"unarchiveBoard": "Unarchive Board",
|
||||
"uncategorized": "Uncategorized",
|
||||
"viewBoards": "View Boards",
|
||||
@@ -1144,6 +1147,7 @@
|
||||
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
@@ -1332,8 +1336,9 @@
|
||||
"unableToCopyDesc": "Your browser does not support clipboard access. Firefox users may be able to fix this by following ",
|
||||
"unableToCopyDesc_theseSteps": "these steps",
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.",
|
||||
"imagen3IncompatibleGenerationMode": "Google Imagen3 supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supports Text to Image and Image to Image only. Use other models Inpainting and Outpainting tasks.",
|
||||
"fluxKontextIncompatibleGenerationMode": "Flux Kontext supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
|
||||
"problemUnpublishingWorkflow": "Problem Unpublishing Workflow",
|
||||
"problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.",
|
||||
"workflowUnpublished": "Workflow Unpublished"
|
||||
@@ -1907,11 +1912,13 @@
|
||||
"addPositivePrompt": "Add $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "Add $t(controlLayers.negativePrompt)",
|
||||
"addReferenceImage": "Add $t(controlLayers.referenceImage)",
|
||||
"addImageNoise": "Add $t(controlLayers.imageNoise)",
|
||||
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
|
||||
"addControlLayer": "Add $t(controlLayers.controlLayer)",
|
||||
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
|
||||
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
|
||||
"addGlobalReferenceImage": "Add $t(controlLayers.globalReferenceImage)",
|
||||
"addDenoiseLimit": "Add $t(controlLayers.denoiseLimit)",
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
@@ -2009,8 +2016,10 @@
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
|
||||
"imageNoise": "Image Noise",
|
||||
"denoiseLimit": "Denoise Limit",
|
||||
"warnings": {
|
||||
"problemsFound": "Problems found",
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
@@ -2419,8 +2428,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke uses PyTorch 2.7.0, which is required for these GPUs.",
|
||||
"Model Relationships: Link LoRAs to main models, and the LoRAs will show up first in the list."
|
||||
"Inpainting: Per-mask noise levels and denoise limits.",
|
||||
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -883,8 +883,8 @@
|
||||
"problemUnpublishingWorkflow": "Problema durante l'annullamento della pubblicazione del flusso di lavoro",
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"imagen3IncompatibleGenerationMode": "Google Imagen3 supporta solo la conversione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1086,11 +1086,11 @@
|
||||
"menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
|
||||
"cancel": "Annulla",
|
||||
"addBoard": "Aggiungi Bacheca",
|
||||
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
|
||||
"bottomMessage": "L'eliminazione delle immagini reimposterà tutte le funzionalità che le stanno utilizzando.",
|
||||
"changeBoard": "Cambia Bacheca",
|
||||
"loading": "Caricamento in corso ...",
|
||||
"clearSearch": "Cancella Ricerca",
|
||||
"topMessage": "Questa bacheca contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"topMessage": "Questa selezione contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"move": "Sposta",
|
||||
"myBoard": "Bacheca",
|
||||
"searchBoard": "Cerca bacheche ...",
|
||||
@@ -1101,7 +1101,7 @@
|
||||
"deleteBoardOnly": "solo la Bacheca",
|
||||
"deleteBoard": "Elimina Bacheca",
|
||||
"deleteBoardAndImages": "Bacheca e Immagini",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate nella bacheca \"Non categorizzato\".",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate in uno stato non categorizzato.",
|
||||
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
|
||||
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
|
||||
"movingImagesToBoard_other": "Spostare {{count}} immagini nella bacheca:",
|
||||
@@ -1123,8 +1123,11 @@
|
||||
"noBoards": "Nessuna bacheca {{boardType}}",
|
||||
"hideBoards": "Nascondi bacheche",
|
||||
"viewBoards": "Visualizza bacheche",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche cancellate non possono essere ripristinate. Selezionando 'Cancella solo bacheca', le immagini verranno spostate nella bacheca \"Non categorizzato\" privata dell'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca"
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\", le immagini verranno spostate in uno stato privato e non categorizzato per l'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
|
||||
"uncategorizedImages": "Immagini non categorizzate",
|
||||
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
|
||||
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@@ -2005,11 +2008,11 @@
|
||||
"stagingOnCanvas": "Genera immagini nella",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"style": "Stile (semplice)",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Modalità",
|
||||
"fullDesc": "Applica lo stile visivo (colori, texture) e la composizione (disposizione, struttura).",
|
||||
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione.",
|
||||
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione. Precedentemente chiamato \"Solo stile\".",
|
||||
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento.",
|
||||
"styleStrong": "Stile (forte)",
|
||||
"styleStrongDesc": "Applica uno stile visivo forte, con un'influenza sulla composizione leggermente ridotta.",
|
||||
@@ -2296,7 +2299,7 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
@@ -2345,7 +2348,11 @@
|
||||
"lowest": "Il più basso",
|
||||
"medium": "Medio",
|
||||
"highest": "La più alta"
|
||||
}
|
||||
},
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2445,8 +2452,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"CogView4: supporto per i modelli CogView4 in Tela e Flussi di lavoro.",
|
||||
"Dipendenze aggiornate: Invoke ora funziona con l'ultima versione delle sue dipendenze, tra cui Python 3.12 e Pytorch 2.6.0."
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -392,7 +392,7 @@
|
||||
"title": "全選択"
|
||||
},
|
||||
"addNode": {
|
||||
"desc": "ノード追加メニューを開く.",
|
||||
"desc": "ノード追加メニューを開く。",
|
||||
"title": "ノードを追加"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
@@ -652,7 +652,9 @@
|
||||
"filterModels": "フィルターモデル",
|
||||
"modelPickerFallbackNoModelsInstalled": "モデルがインストールされていません.",
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット"
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -872,8 +874,8 @@
|
||||
"problemDeletingWorkflow": "ワークフローが削除された問題",
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"imagen3IncompatibleGenerationMode": "Google Imagen3 はテキストから画像への生成のみをサポートしています.画像から画像,インペインティング,アウトペインティングのタスクには他のモデルをご利用ください.",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -1154,11 +1156,11 @@
|
||||
"unknownField": "不明なフィールド",
|
||||
"unexpectedField_withName": "予期しないフィールド\"{{name}}\"",
|
||||
"loadingTemplates": "読み込み中 {{name}}",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします.",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします",
|
||||
"validateConnections": "接続とグラフを確認する",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"newWorkflowDesc": "新しいワークフローを作りますか?",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型:{type}}",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型: {{type}}",
|
||||
"unsupportedArrayItemType": "サポートされていない配列項目型です \"{{type}}\"",
|
||||
"unableToLoadWorkflow": "ワークフローが読み込めません",
|
||||
"unableToValidateWorkflow": "ワークフローを確認できません",
|
||||
@@ -1201,13 +1203,13 @@
|
||||
"downloadBoard": "ボードをダウンロード",
|
||||
"changeBoard": "ボードを変更",
|
||||
"loading": "ロード中...",
|
||||
"topMessage": "このボードには、以下の機能で使用されている画像が含まれています:",
|
||||
"bottomMessage": "このボードおよび画像を削除すると、現在これらを利用している機能はリセットされます。",
|
||||
"topMessage": "この選択には、次の機能で使用される画像が含まれています:",
|
||||
"bottomMessage": "この画像を削除すると、現在利用している機能はリセットされます。",
|
||||
"clearSearch": "検索をクリア",
|
||||
"deleteBoard": "ボードの削除",
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
|
||||
"deletedBoardsCannotbeRestored": "削除したボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は未分類の状態になります。",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
@@ -1222,9 +1224,12 @@
|
||||
"imagesWithCount_other": "{{count}} の画像",
|
||||
"updateBoardError": "ボード更新エラー",
|
||||
"selectedForAutoAdd": "自動追加に選択済み",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は作成者に対して非公開の未分類状態になります。",
|
||||
"noBoards": "{{boardType}} ボードがありません",
|
||||
"viewBoards": "ボードを表示"
|
||||
"viewBoards": "ボードを表示",
|
||||
"uncategorizedImages": "分類されていない画像",
|
||||
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除",
|
||||
"deletedImagesCannotBeRestored": "削除した画像は復元できません."
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -1247,7 +1252,8 @@
|
||||
"paramRatio": {
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
"生成された画像の縦横比。",
|
||||
"SD1.5 モデルの場合は 512x512 に相当する画像サイズ (ピクセル数) が推奨され, SDXL モデルの場合は 1024x1024 に相当するサイズが推奨されます."
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
@@ -1289,25 +1295,49 @@
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
"heading": "アップスケール手法",
|
||||
"paragraphs": [
|
||||
"高解像度修正のために画像を拡大するために使用される方法。"
|
||||
]
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
"heading": "アップスケールモデル",
|
||||
"paragraphs": [
|
||||
"アップスケールモデルは、ディテールを追加する前に画像を出力サイズに合わせて拡大縮小します。サポートされているアップスケールモデルであればどれでも使用できますが、写真や線画など、特定の種類の画像に特化したモデルもあります。"
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成される画像のアスペクト比。比率を変更すると、幅と高さもそれに応じて更新されます。",
|
||||
"「最適化」は、選択したモデルの幅と高さを最適な寸法に設定します。"
|
||||
]
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で実行されるステップの数。",
|
||||
"生成ステップと似ています。"
|
||||
]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
"heading": "VAE",
|
||||
"paragraphs": [
|
||||
"AI 出力を最終画像に変換するために使用されるモデル。"
|
||||
]
|
||||
},
|
||||
"scale": {
|
||||
"heading": "スケール"
|
||||
"heading": "スケール",
|
||||
"paragraphs": [
|
||||
"スケールは出力画像のサイズを制御し、入力画像の解像度の倍数に基づいて決定されます。例えば、1024x1024の画像を2倍に拡大すると、2048x2048の出力が生成されます。"
|
||||
]
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
"heading": "スケジューラー",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で使用されるスケジューラ。",
|
||||
"生成スケジューラに似ています。"
|
||||
]
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "モード",
|
||||
@@ -1316,13 +1346,23 @@
|
||||
]
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "モデル"
|
||||
"heading": "モデル",
|
||||
"paragraphs": [
|
||||
"生成に使用されるモデル。異なるモデルは、異なる美的結果とコンテンツを生成するように特化するようにトレーニングされています。"
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "高さ"
|
||||
"heading": "高さ",
|
||||
"paragraphs": [
|
||||
"生成される画像の高さ。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"各生成で実行されるステップの数.",
|
||||
"通常, ステップ数が多いほど, より高品質な画像が作成されますが生成時間も長くなります."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "モード",
|
||||
@@ -1331,10 +1371,18 @@
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
"heading": "シード"
|
||||
"heading": "シード",
|
||||
"paragraphs": [
|
||||
"生成に使用する始動ノイズを制御します.",
|
||||
"同じ生成設定で同一の結果を生成するには, 「ランダム」オプションを無効にします."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "生成回数"
|
||||
"heading": "生成回数",
|
||||
"paragraphs": [
|
||||
"生成する画像の数。",
|
||||
"動的プロンプトが有効になっている場合、各プロンプトはこの回数生成されます。"
|
||||
]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
@@ -1343,16 +1391,29 @@
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "幅"
|
||||
"heading": "幅",
|
||||
"paragraphs": [
|
||||
"生成される画像の幅。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA"
|
||||
"heading": "LoRA",
|
||||
"paragraphs": [
|
||||
"ベースモデルと組み合わせて使用する軽量モデル."
|
||||
]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "重み"
|
||||
"heading": "重み",
|
||||
"paragraphs": [
|
||||
"LoRA の重み. 重みを大きくすると, 最終的な画像への影響が大きくなります."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale"
|
||||
"heading": "Downscale",
|
||||
"paragraphs": [
|
||||
"埋め込む前にどの程度のダウンスケーリングが行われるか。",
|
||||
"ダウンスケーリングを大きくするとパフォーマンスは向上しますが、品質は低下します。"
|
||||
]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "重み",
|
||||
@@ -1438,7 +1499,8 @@
|
||||
"heading": "ダイナミックプロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトは,単一のプロンプトを複数のプロンプトに解析します.",
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます."
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます.",
|
||||
"1 つのプロンプト内で構文を何度でも使用できますが, 生成されるプロンプトの数を Max Prompts 設定で制限するようにしてください."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1458,6 +1520,159 @@
|
||||
"paragraphs": [
|
||||
"プロンプトまたは コントロールネットのいずれかを重視します."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"paragraphs": [
|
||||
"CPU または GPU でノイズを生成するかどうかを制御します.",
|
||||
"CPU ノイズを有効にすると, 特定のシードによってどのマシンでも同じ画像が生成されます.",
|
||||
"CPU ノイズを有効にしてもパフォーマンスに影響はありません."
|
||||
],
|
||||
"heading": "CPUノイズを使用する"
|
||||
},
|
||||
"dynamicPromptsMaxPrompts": {
|
||||
"heading": "最大プロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトによって生成できるプロンプトの数を制限します."
|
||||
]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
"paragraphs": [
|
||||
"プロンプトを生成するときにシードがどのように使用されるかを制御します.",
|
||||
"反復ごとに固有のシードを使用します. 単一のシードでプロンプトのバリエーションを試す場合に使用します.",
|
||||
"たとえば, プロンプトが 5 つある場合, 各画像は同じシードを使用します.",
|
||||
"「画像ごと」では, 画像ごとに固有のシード値が使用されます. これにより、より多くのバリエーションが得られます."
|
||||
],
|
||||
"heading": "シード行動"
|
||||
},
|
||||
"imageFit": {
|
||||
"paragraphs": [
|
||||
"初期画像の幅と高さを出力画像に合わせてサイズ変更します. 有効にすることをお勧めします."
|
||||
],
|
||||
"heading": "初期画像を出力サイズに合わせる"
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "充填方法",
|
||||
"paragraphs": [
|
||||
"アウトペインティングまたはインペインティングのプロセス中に埋め込む方法."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"ガイダンス値が高すぎると過飽和状態になる可能性があり、ガイダンス値が高すぎるか低すぎると生成結果に歪みが生じる可能性があります。ガイダンスはFLUX DEVモデルにのみ適用されます。"
|
||||
],
|
||||
"heading": "ガイダンス"
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"生成されたイメージがラスター レイヤーとどの程度異なるかを制御します。",
|
||||
"強度が低いほど、結合された表示ラスターレイヤーに近くなります。強度が高いほど、グローバルプロンプトに大きく依存します。",
|
||||
"表示されるコンテンツを持つラスター レイヤーがない場合、この設定は無視されます。"
|
||||
],
|
||||
"heading": "ディノイジングストレングス"
|
||||
},
|
||||
"refinerStart": {
|
||||
"heading": "リファイナースタート",
|
||||
"paragraphs": [
|
||||
"生成プロセスのどの時点でリファイナーが使用され始めるか。",
|
||||
"0 はリファイナーが生成プロセス全体で使用されることを意味し、0.8 は、リファイナーが生成プロセスの最後の 20% で使用されることを意味します。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "イメージtoイメージの最適化",
|
||||
"paragraphs": [
|
||||
"「イメージtoイメージを最適化」を有効にすると、Fluxモデルを用いた画像間変換およびインペインティング変換において、より段階的なノイズ除去強度スケールが適用されます。この設定により、画像に適用される変化量を制御する能力が向上しますが、標準のノイズ除去強度スケールを使用したい場合はオフにすることができます。この設定は現在調整中で、ベータ版です。"
|
||||
]
|
||||
},
|
||||
"refinerPositiveAestheticScore": {
|
||||
"heading": "ポジティブ美的スコア",
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美的スコアの高い画像に類似するように生成を重み付けします。"
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"CFG スケールの値が高すぎると、飽和しすぎて生成結果が歪む可能性があります。 "
|
||||
],
|
||||
"heading": "CFGスケール"
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"paragraphs": [
|
||||
"VAE エンコードおよびデコード時に使用される精度。",
|
||||
"Fp16/Half 精度は、画像のわずかな変化を犠牲にして、より効率的です。"
|
||||
],
|
||||
"heading": "VAE精度"
|
||||
},
|
||||
"refinerModel": {
|
||||
"heading": "リファイナーモデル",
|
||||
"paragraphs": [
|
||||
"生成プロセスの精製部分で使用されるモデル。",
|
||||
"世代モデルに似ています。"
|
||||
]
|
||||
},
|
||||
"refinerCfgScale": {
|
||||
"heading": "CFGスケール",
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスに与える影響を制御する。",
|
||||
"生成CFG スケールに似ています。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingYAxis": {
|
||||
"heading": "シームレスタイリングY軸",
|
||||
"paragraphs": [
|
||||
"画像を垂直軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "プロセス前のスケール値",
|
||||
"paragraphs": [
|
||||
"「自動」は、画像生成プロセスの前に、選択した領域をモデルに最適なサイズに拡大縮小します。",
|
||||
"「手動」では、画像生成プロセスの前に、選択した領域を拡大縮小する幅と高さを選択できます。"
|
||||
]
|
||||
},
|
||||
"creativity": {
|
||||
"heading": "クリエイティビティ",
|
||||
"paragraphs": [
|
||||
"クリエイティビティは、ディテールを追加する際のモデルに与えられる自由度を制御します。クリエイティビティが低いと元のイメージに近いままになり、クリエイティビティが高いとより多くの変化を加えることができます。プロンプトを使用する場合、クリエイティビティが高いとプロンプトの影響が増します。"
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "高解像度修正を有効にする",
|
||||
"paragraphs": [
|
||||
"モデルに最適な解像度よりも高い解像度で、高品質な画像を生成します。通常、生成された画像内の重複を防ぐために使用されます。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingXAxis": {
|
||||
"heading": "シームレスタイリングX軸",
|
||||
"paragraphs": [
|
||||
"画像を水平軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"paragraphs": [
|
||||
"ゼロ端末 SNR (ztsnr) を使用してトレーニングされたモデルに使用される、CFG ガイダンスのリスケールマルチプライヤー。",
|
||||
"これらのモデルの場合、推奨値は 0.7 です。"
|
||||
],
|
||||
"heading": "CFG リスケールマルチプライヤー"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "ストラクチャ",
|
||||
"paragraphs": [
|
||||
"ストラクチャは、出力画像が元のレイアウトにどれだけ忠実に従うかを制御します。低いストラクチャでは大幅な変更が可能ですが、高いストラクチャでは元の構成とレイアウトが厳密に維持されます。"
|
||||
]
|
||||
},
|
||||
"refinerNegativeAestheticScore": {
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美観スコアが低い画像に類似するように生成に重み付けします。"
|
||||
],
|
||||
"heading": "ネガティブ美的スコア"
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商用ライセンス",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev]モデルは、FLUX [dev]非商用ライセンスに基づいてライセンスされています。Invokeでこのモデルタイプを商用目的で使用する場合は、当社のウェブサイトをご覧ください。"
|
||||
]
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -1630,7 +1845,106 @@
|
||||
"workflows": "ワークフロー",
|
||||
"ascending": "昇順",
|
||||
"name": "名前",
|
||||
"descending": "降順"
|
||||
"descending": "降順",
|
||||
"searchPlaceholder": "名前、説明、タグで検索",
|
||||
"projectWorkflows": "プロジェクトワークフロー",
|
||||
"searchWorkflows": "ワークフローを検索",
|
||||
"updated": "アップデート",
|
||||
"published": "公表",
|
||||
"builder": {
|
||||
"label": "ラベル",
|
||||
"containerPlaceholder": "空のコンテナ",
|
||||
"showDescription": "説明を表示",
|
||||
"emptyRootPlaceholderEditMode": "開始するには、フォーム要素またはノード フィールドをここにドラッグします。",
|
||||
"divider": "仕切り",
|
||||
"deleteAllElements": "すべてのフォーム要素を削除",
|
||||
"heading": "見出し",
|
||||
"nodeField": "ノードフィールド",
|
||||
"zoomToNode": "ノードにズーム",
|
||||
"dropdown": "ドロップダウン",
|
||||
"resetOptions": "オプションをリセット",
|
||||
"both": "両方",
|
||||
"builder": "フォームビルダー",
|
||||
"text": "テキスト",
|
||||
"row": "行",
|
||||
"multiLine": "マルチライン",
|
||||
"resetAllNodeFields": "すべてのノードフィールドをリセット",
|
||||
"slider": "スライダー",
|
||||
"layout": "レイアウト",
|
||||
"addToForm": "フォームに追加",
|
||||
"headingPlaceholder": "空の見出し",
|
||||
"nodeFieldTooltip": "ノード フィールドを追加するには、ワークフロー エディターのフィールドにある小さなプラス記号ボタンをクリックするか、フィールド名をフォームにドラッグします。",
|
||||
"workflowBuilderAlphaWarning": "ワークフロービルダーは現在アルファ版です。安定版リリースまでに互換性に影響する変更が発生する可能性があります。",
|
||||
"component": "コンポーネント",
|
||||
"textPlaceholder": "空のテキスト",
|
||||
"emptyRootPlaceholderViewMode": "このワークフローのフォームの作成を開始するには、[編集] をクリックします。",
|
||||
"addOption": "オプションを追加",
|
||||
"singleLine": "単線",
|
||||
"numberInput": "数値入力",
|
||||
"column": "列",
|
||||
"container": "コンテナ",
|
||||
"containerRowLayout": "コンテナ(行レイアウト)",
|
||||
"containerColumnLayout": "コンテナ(列レイアウト)",
|
||||
"maximum": "最大",
|
||||
"published": "公開済み",
|
||||
"publishedWorkflowOutputs": "アウトプット",
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
"download": "ダウンロード",
|
||||
"savingWorkflow": "ワークフローを保存しています...",
|
||||
"problemSavingWorkflow": "ワークフローの保存に関する問題",
|
||||
"convertGraph": "グラフを変換",
|
||||
"downloadWorkflow": "ファイルに保存",
|
||||
"saveWorkflow": "ワークフローを保存",
|
||||
"userWorkflows": "ユーザーワークフロー",
|
||||
"yourWorkflows": "あなたのワークフロー",
|
||||
"edit": "編集",
|
||||
"workflowLibrary": "ワークフローライブラリ",
|
||||
"workflowSaved": "ワークフローが保存されました",
|
||||
"clearWorkflowSearchFilter": "ワークフロー検索フィルタをクリア",
|
||||
"workflowCleared": "ワークフローが作成されました",
|
||||
"autoLayout": "オートレイアウト",
|
||||
"view": "ビュー",
|
||||
"saveChanges": "変更を保存",
|
||||
"noDescription": "説明なし",
|
||||
"recommended": "あなたへのおすすめ",
|
||||
"noRecentWorkflows": "最近のワークフローがありません",
|
||||
"problemLoading": "ワークフローのローディングに関する問題",
|
||||
"newWorkflowCreated": "新しいワークフローが作成されました",
|
||||
"noWorkflows": "ワークフローがありません",
|
||||
"copyShareLink": "共有リンクをコピー",
|
||||
"copyShareLinkForWorkflow": "ワークフローの共有リンクをコピー",
|
||||
"workflowThumbnail": "ワークフローサムネイル",
|
||||
"loadWorkflow": "$t(common.load) ワークフロー",
|
||||
"shared": "共有",
|
||||
"openWorkflow": "ワークフローを開く",
|
||||
"emptyStringPlaceholder": "<空の文字列>",
|
||||
"browseWorkflows": "ワークフローを閲覧する",
|
||||
"saveWorkflowAs": "ワークフローとして保存",
|
||||
"private": "プライベート",
|
||||
"deselectAll": "すべて選択解除",
|
||||
"delete": "削除",
|
||||
"openLibrary": "ライブラリを開く",
|
||||
"loadMore": "もっと読み込む",
|
||||
"saveWorkflowToProject": "ワークフローをプロジェクトに保存",
|
||||
"created": "作成されました",
|
||||
"workflowEditorMenu": "ワークフローエディターメニュー",
|
||||
"defaultWorkflows": "デフォルトワークフロー",
|
||||
"allLoaded": "すべてのワークフローが読み込まれました",
|
||||
"filterByTags": "タグでフィルター",
|
||||
"recentlyOpened": "最近開いた",
|
||||
"opened": "オープン",
|
||||
"deleteWorkflow": "ワークフローを削除",
|
||||
"deleteWorkflow2": "このワークフローを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"loadFromGraph": "グラフからワークフローをロード",
|
||||
"workflowName": "ワークフロー名",
|
||||
"loading": "ワークフローをロードしています",
|
||||
"uploadWorkflow": "ファイルからロードする"
|
||||
},
|
||||
"system": {
|
||||
"logNamespaces": {
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"boards": "Bảng",
|
||||
"selectedForAutoAdd": "Đã Chọn Để Tự động thêm",
|
||||
"myBoard": "Bảng Của Tôi",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"changeBoard": "Thay Đổi Bảng",
|
||||
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
|
||||
"updateBoardError": "Lỗi khi cập nhật Bảng",
|
||||
@@ -41,18 +41,21 @@
|
||||
"deleteBoard": "Xoá Bảng",
|
||||
"deleteBoardAndImages": "Xoá Bảng Lẫn Hình ảnh",
|
||||
"deleteBoardOnly": "Chỉ Xoá Bảng",
|
||||
"deletedBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Xoá bảng này lẫn ảnh của nó sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"deletedBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Việc xóa ảnh sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"menuItemAutoAdd": "Tự động thêm cho Bảng này",
|
||||
"move": "Di Chuyển",
|
||||
"topMessage": "Bảng này chứa ảnh được dùng với những tính năng sau:",
|
||||
"topMessage": "Lựa chọn này chứa ảnh được dùng với những tính năng sau:",
|
||||
"uncategorized": "Chưa Sắp Xếp",
|
||||
"archived": "Được Lưu Trữ",
|
||||
"loading": "Đang Tải...",
|
||||
"selectBoard": "Chọn Bảng",
|
||||
"archiveBoard": "Lưu trữ Bảng",
|
||||
"unarchiveBoard": "Ngừng Lưu Trữ Bảng",
|
||||
"assetsWithCount_other": "{{count}} tài nguyên"
|
||||
"assetsWithCount_other": "{{count}} tài nguyên",
|
||||
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
|
||||
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
|
||||
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
|
||||
},
|
||||
"gallery": {
|
||||
"swapImages": "Đổi Hình Ảnh",
|
||||
@@ -789,7 +792,9 @@
|
||||
"modelPickerFallbackNoModelsInstalled2": "Nhấp vào <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải.",
|
||||
"modelPickerFallbackNoModelsInstalled": "Không Có Sẵn Model.",
|
||||
"manageModels": "Quản Lý Model",
|
||||
"hfTokenReset": "Làm Mới HF Token"
|
||||
"hfTokenReset": "Làm Mới HF Token",
|
||||
"relatedModels": "Model Liên Quan",
|
||||
"showOnlyRelatedModels": "Liên Quan"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -1715,12 +1720,16 @@
|
||||
"fitBboxToLayers": "Xếp Vừa Hộp Giới Hạn Vào Layer",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Phong Cách Và Thành Phần",
|
||||
"style": "Chỉ Lấy Phong Cách",
|
||||
"style": "Phong Cách (Đơn Giản)",
|
||||
"composition": "Chỉ Lấy Thành Phần",
|
||||
"ipAdapterMethod": "Cách Thức",
|
||||
"compositionDesc": "Áp dụng cách trình bày và bỏ qua phong cách mẫu.",
|
||||
"fullDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) & thành phần (cách trình bày).",
|
||||
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày."
|
||||
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày. Tên trước đây là Chỉ Lấy Phong Cách.",
|
||||
"styleStrong": "Phong Cách (Mạnh Mẽ)",
|
||||
"styleStrongDesc": "Áp dụng cách trình bày mạnh mẽ, với một chút giảm nhẹ ảnh hưởng lên thành phần.",
|
||||
"stylePrecise": "Phong Cách (Chính Xác)",
|
||||
"stylePreciseDesc": "Áp dụng cách trình bày chính xác, loại bỏ các chủ thể ảnh hưởng."
|
||||
},
|
||||
"deletePrompt": "Xoá Lệnh",
|
||||
"rasterLayer": "Layer Dạng Raster",
|
||||
@@ -2053,7 +2062,7 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
@@ -2102,7 +2111,11 @@
|
||||
"imageInfluence": "Ảnh Chi Phối",
|
||||
"medium": "Vừa",
|
||||
"highest": "Cao Nhất"
|
||||
}
|
||||
},
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2243,8 +2256,8 @@
|
||||
"problemUnpublishingWorkflowDescription": "Có vấn đề khi ngừng đăng tải workflow. Vui lòng thử lại sau.",
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"imagen3IncompatibleGenerationMode": "Google Imagen3 chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2426,8 +2439,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"CogView4: Hỗ trợ model CogView4 ở Canvas và Workflow.",
|
||||
"Cập nhật Dependency: Invoke bây giờ sẽ chạy trên phiên bản mới nhất của các dependency của nó, bao gồm Python 3.12 và Pytorch 2.6.0"
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -10,7 +10,9 @@ import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatch
|
||||
import { buildChatGPT4oGraph } from 'features/nodes/util/graph/generation/buildChatGPT4oGraph';
|
||||
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
|
||||
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
|
||||
import { buildFluxKontextGraph } from 'features/nodes/util/graph/generation/buildFluxKontextGraph';
|
||||
import { buildImagen3Graph } from 'features/nodes/util/graph/generation/buildImagen3Graph';
|
||||
import { buildImagen4Graph } from 'features/nodes/util/graph/generation/buildImagen4Graph';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
@@ -54,8 +56,12 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
return await buildCogView4Graph(state, manager);
|
||||
case 'imagen3':
|
||||
return await buildImagen3Graph(state, manager);
|
||||
case 'imagen4':
|
||||
return await buildImagen4Graph(state, manager);
|
||||
case 'chatgpt-4o':
|
||||
return await buildChatGPT4oGraph(state, manager);
|
||||
case 'flux-kontext':
|
||||
return await buildFluxKontextGraph(state, manager);
|
||||
default:
|
||||
assert(false, `No graph builders for base ${base}`);
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ export type AppFeature =
|
||||
| 'hfToken'
|
||||
| 'retryQueueItem'
|
||||
| 'cancelAndClearAll'
|
||||
| 'chatGPT4oModels';
|
||||
| 'chatGPT4oHigh';
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import { selectUiSlice, textAreaSizesStateChanged } from 'features/ui/store/uiSlice';
|
||||
import { debounce } from 'lodash-es';
|
||||
import { type RefObject, useCallback, useEffect, useMemo } from 'react';
|
||||
|
||||
type Options = {
|
||||
trackWidth: boolean;
|
||||
trackHeight: boolean;
|
||||
initialWidth?: number;
|
||||
initialHeight?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Persists the width and/or height of a text area to redux.
|
||||
* @param id The unique id of this textarea, used as key to storage
|
||||
* @param ref A ref to the textarea element
|
||||
* @param options.trackWidth Whether to track width
|
||||
* @param options.trackHeight Whether to track width
|
||||
* @param options.initialWidth An optional initial width in pixels
|
||||
* @param options.initialHeight An optional initial height in pixels
|
||||
*/
|
||||
export const usePersistedTextAreaSize = (id: string, ref: RefObject<HTMLTextAreaElement>, options: Options) => {
|
||||
const { dispatch, getState } = useAppStore();
|
||||
|
||||
const onResize = useCallback(
|
||||
(size: Partial<Dimensions>) => {
|
||||
dispatch(textAreaSizesStateChanged({ id, size }));
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const debouncedOnResize = useMemo(() => debounce(onResize, 300), [onResize]);
|
||||
|
||||
useEffect(() => {
|
||||
const el = ref.current;
|
||||
if (!el) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Nothing to do here if we are not tracking anything.
|
||||
if (!options.trackHeight && !options.trackWidth) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Before registering the observer, grab the stored size from state - we may need to restore the size.
|
||||
const storedSize = selectUiSlice(getState()).textAreaSizes[id];
|
||||
|
||||
// Prefer to restore the stored size, falling back to initial size if it exists
|
||||
if (storedSize?.width !== undefined) {
|
||||
el.style.width = `${storedSize.width}px`;
|
||||
} else if (options.initialWidth !== undefined) {
|
||||
el.style.width = `${options.initialWidth}px`;
|
||||
}
|
||||
|
||||
if (storedSize?.height !== undefined) {
|
||||
el.style.height = `${storedSize.height}px`;
|
||||
} else if (options.initialHeight !== undefined) {
|
||||
el.style.height = `${options.initialHeight}px`;
|
||||
}
|
||||
|
||||
let currentHeight = el.offsetHeight;
|
||||
let currentWidth = el.offsetWidth;
|
||||
|
||||
const resizeObserver = new ResizeObserver(() => {
|
||||
// We only want to push the changes if a tracked dimension changes
|
||||
let didChange = false;
|
||||
const newSize: Partial<Dimensions> = {};
|
||||
|
||||
if (options.trackHeight) {
|
||||
if (el.offsetHeight !== currentHeight) {
|
||||
didChange = true;
|
||||
currentHeight = el.offsetHeight;
|
||||
}
|
||||
newSize.height = currentHeight;
|
||||
}
|
||||
|
||||
if (options.trackWidth) {
|
||||
if (el.offsetWidth !== currentWidth) {
|
||||
didChange = true;
|
||||
currentWidth = el.offsetWidth;
|
||||
}
|
||||
newSize.width = currentWidth;
|
||||
}
|
||||
|
||||
if (didChange) {
|
||||
debouncedOnResize(newSize);
|
||||
}
|
||||
});
|
||||
|
||||
resizeObserver.observe(el);
|
||||
|
||||
return () => {
|
||||
debouncedOnResize.cancel();
|
||||
resizeObserver.disconnect();
|
||||
};
|
||||
}, [
|
||||
debouncedOnResize,
|
||||
dispatch,
|
||||
getState,
|
||||
id,
|
||||
options.initialHeight,
|
||||
options.initialWidth,
|
||||
options.trackHeight,
|
||||
options.trackWidth,
|
||||
ref,
|
||||
]);
|
||||
};
|
||||
@@ -0,0 +1,28 @@
|
||||
import { Spinner } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useAllEntityAdapters } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { computed } from 'nanostores';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
export const CanvasBusySpinner = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const allEntityAdapters = useAllEntityAdapters();
|
||||
const $isPendingRectCalculation = useMemo(
|
||||
() =>
|
||||
computed(
|
||||
allEntityAdapters.map(({ transformer }) => transformer.$isPendingRectCalculation),
|
||||
(...values) => values.some((v) => v)
|
||||
),
|
||||
[allEntityAdapters]
|
||||
);
|
||||
const isPendingRectCalculation = useStore($isPendingRectCalculation);
|
||||
const isRasterizing = useStore(canvasManager.stateApi.$isRasterizing);
|
||||
const isCompositing = useStore(canvasManager.compositor.$isBusy);
|
||||
|
||||
if (isRasterizing || isCompositing || isPendingRectCalculation) {
|
||||
return <Spinner opacity={0.3} />;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
CanvasBusySpinner.displayName = 'CanvasBusySpinner';
|
||||
@@ -12,6 +12,7 @@ import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
|
||||
import { CanvasAlertsPreserveMask } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsPreserveMask';
|
||||
import { CanvasAlertsSelectedEntityStatus } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSelectedEntityStatus';
|
||||
import { CanvasAlertsSendingToGallery } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
|
||||
import { CanvasBusySpinner } from 'features/controlLayers/components/CanvasBusySpinner';
|
||||
import { CanvasContextMenuGlobalMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuGlobalMenuItems';
|
||||
import { CanvasContextMenuSelectedEntityMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuSelectedEntityMenuItems';
|
||||
import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea';
|
||||
@@ -106,6 +107,9 @@ export const CanvasMainPanelContent = memo(() => {
|
||||
<MenuContent />
|
||||
</Menu>
|
||||
</Flex>
|
||||
<Flex position="absolute" bottom={4} insetInlineEnd={4}>
|
||||
<CanvasBusySpinner />
|
||||
</Flex>
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
)}
|
||||
|
||||
@@ -2,10 +2,11 @@ import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoLayer } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { replaceCanvasEntityObjectsWithImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@@ -23,27 +24,27 @@ export const ControlLayerSettingsEmptyState = memo(() => {
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const pullBboxIntoLayer = usePullBboxIntoLayer(entityIdentifier);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
PullBboxButton: (
|
||||
<Button onClick={pullBboxIntoLayer} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, pullBboxIntoLayer, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans
|
||||
i18nKey="controlLayers.controlLayerEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
<Trans i18nKey="controlLayers.controlLayerEmptyState" components={components} />
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
</Flex>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { InpaintMaskSettings } from 'features/controlLayers/components/InpaintMask/InpaintMaskSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { InpaintMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
@@ -28,6 +29,7 @@ export const InpaintMask = memo(({ id }: Props) => {
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<InpaintMaskSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</InpaintMaskAdapterGate>
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
// import { Button, Flex } from '@invoke-ai/ui-library';
|
||||
// import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
// import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
// import { useTranslation } from 'react-i18next';
|
||||
// import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
// Removed buttons because denosie limit is not helpful for many architectures
|
||||
// Users can access with right click menu instead.
|
||||
// If buttons for noise or new features are deemed important in the future, add them back here.
|
||||
export const InpaintMaskAddButtons = () => {
|
||||
// Buttons are temporarily hidden. To restore, uncomment the code below.
|
||||
return null;
|
||||
// const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
// const { t } = useTranslation();
|
||||
// const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
|
||||
// const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
|
||||
// return (
|
||||
// <Flex w="full" p={2} justifyContent="center">
|
||||
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskDenoiseLimit}>
|
||||
// {t('controlLayers.denoiseLimit')}
|
||||
// </Button>
|
||||
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskNoise}>
|
||||
// {t('controlLayers.imageNoise')}
|
||||
// </Button>
|
||||
// </Flex>
|
||||
// );
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
import type { IconButtonProps } from '@invoke-ai/ui-library';
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
|
||||
type Props = Omit<IconButtonProps, 'aria-label'> & {
|
||||
onDelete: () => void;
|
||||
};
|
||||
|
||||
export const InpaintMaskDeleteModifierButton = memo(({ onDelete, ...rest }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<IconButton
|
||||
tooltip={t('common.delete')}
|
||||
variant="link"
|
||||
aria-label={t('common.delete')}
|
||||
icon={<PiXBold />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskDeleteModifierButton.displayName = 'InpaintMaskDeleteNoiseButton';
|
||||
@@ -0,0 +1,70 @@
|
||||
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import {
|
||||
inpaintMaskDenoiseLimitChanged,
|
||||
inpaintMaskDenoiseLimitDeleted,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskDenoiseLimitSlider = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selectDenoiseLimit = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskDenoiseLimitSlider').denoiseLimit
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const denoiseLimit = useAppSelector(selectDenoiseLimit);
|
||||
|
||||
const handleDenoiseLimitChange = useCallback(
|
||||
(value: number) => {
|
||||
dispatch(inpaintMaskDenoiseLimitChanged({ entityIdentifier, denoiseLimit: value }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
const onDeleteDenoiseLimit = useCallback(() => {
|
||||
dispatch(inpaintMaskDenoiseLimitDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (denoiseLimit === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Text fontSize="sm">{t('controlLayers.denoiseLimit')}</Text>
|
||||
<Flex alignItems="center" gap={1}>
|
||||
<Text fontSize="sm">{denoiseLimit.toFixed(2)}</Text>
|
||||
<InpaintMaskDeleteModifierButton onDelete={onDeleteDenoiseLimit} />
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Slider
|
||||
aria-label={t('controlLayers.denoiseLimit')}
|
||||
value={denoiseLimit}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
onChange={handleDenoiseLimitChange}
|
||||
>
|
||||
<SliderTrack>
|
||||
<SliderFilledTrack />
|
||||
</SliderTrack>
|
||||
<SliderThumb />
|
||||
</Slider>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskDenoiseLimitSlider.displayName = 'InpaintMaskDenoiseLimitSlider';
|
||||
@@ -7,6 +7,7 @@ import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/component
|
||||
import { CanvasEntityMenuItemsMergeDown } from 'features/controlLayers/components/common/CanvasEntityMenuItemsMergeDown';
|
||||
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { InpaintMaskMenuItemsAddModifiers } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsAddModifiers';
|
||||
import { InpaintMaskMenuItemsConvertToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsConvertToSubMenu';
|
||||
import { InpaintMaskMenuItemsCopyToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsCopyToSubMenu';
|
||||
import { memo } from 'react';
|
||||
@@ -20,6 +21,8 @@ export const InpaintMaskMenuItems = memo(() => {
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<InpaintMaskMenuItemsAddModifiers />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsMergeDown />
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskMenuItemsAddModifiers = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
|
||||
const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem onClick={addInpaintMaskNoise} isDisabled={isBusy}>
|
||||
{t('controlLayers.addImageNoise')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={addInpaintMaskDenoiseLimit} isDisabled={isBusy}>
|
||||
{t('controlLayers.addDenoiseLimit')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskMenuItemsAddModifiers.displayName = 'InpaintMaskMenuItemsAddNoise';
|
||||
@@ -0,0 +1,67 @@
|
||||
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { inpaintMaskNoiseChanged, inpaintMaskNoiseDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskNoiseSlider = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selectNoiseLevel = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskNoiseSlider').noiseLevel
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const noiseLevel = useAppSelector(selectNoiseLevel);
|
||||
|
||||
const handleNoiseChange = useCallback(
|
||||
(value: number) => {
|
||||
dispatch(inpaintMaskNoiseChanged({ entityIdentifier, noiseLevel: value }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
const onDeleteNoise = useCallback(() => {
|
||||
dispatch(inpaintMaskNoiseDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (noiseLevel === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Text fontSize="sm">{t('controlLayers.imageNoise')}</Text>
|
||||
<Flex alignItems="center" gap={1}>
|
||||
<Text fontSize="sm">{Math.round(noiseLevel * 100)}%</Text>
|
||||
<InpaintMaskDeleteModifierButton onDelete={onDeleteNoise} />
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Slider
|
||||
aria-label={t('controlLayers.imageNoise')}
|
||||
value={noiseLevel}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
onChange={handleNoiseChange}
|
||||
>
|
||||
<SliderTrack>
|
||||
<SliderFilledTrack />
|
||||
</SliderTrack>
|
||||
<SliderThumb />
|
||||
</Slider>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskNoiseSlider.displayName = 'InpaintMaskNoiseSlider';
|
||||
@@ -0,0 +1,47 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { InpaintMaskDenoiseLimitSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskDenoiseLimitSlider';
|
||||
import { InpaintMaskNoiseSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskNoiseSlider';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
const buildSelectHasDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
|
||||
return entity.denoiseLimit !== undefined;
|
||||
});
|
||||
|
||||
const buildSelectHasNoiseLevel = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
|
||||
return entity.noiseLevel !== undefined;
|
||||
});
|
||||
|
||||
export const InpaintMaskSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const selectHasDenoiseLimit = useMemo(() => buildSelectHasDenoiseLimit(entityIdentifier), [entityIdentifier]);
|
||||
const selectHasNoiseLevel = useMemo(() => buildSelectHasNoiseLevel(entityIdentifier), [entityIdentifier]);
|
||||
|
||||
const hasDenoiseLimit = useAppSelector(selectHasDenoiseLimit);
|
||||
const hasNoiseLevel = useAppSelector(selectHasNoiseLevel);
|
||||
|
||||
if (!hasNoiseLevel && !hasDenoiseLimit) {
|
||||
// If we show the <InpaintMaskAddButtons /> below, we can remove this check.
|
||||
// Until then, if there are no sliders to show for the mask settings, return null. This prevents rendering an
|
||||
// empty settings wrapper div, which adds unnecessary space in the UI.
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<CanvasEntitySettingsWrapper>
|
||||
{/* {!hasNoiseLevel && !hasDenoiseLimit && <InpaintMaskAddButtons />} */}
|
||||
{hasNoiseLevel && <InpaintMaskNoiseSlider />}
|
||||
{hasDenoiseLimit && <InpaintMaskDenoiseLimitSlider />}
|
||||
</CanvasEntitySettingsWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskSettings.displayName = 'InpaintMaskSettings';
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
$shift,
|
||||
CompositeSlider,
|
||||
@@ -16,7 +17,6 @@ import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { snapToNearest } from 'features/controlLayers/konva/util';
|
||||
import { round } from 'lodash-es';
|
||||
import { computed } from 'nanostores';
|
||||
import type { KeyboardEvent } from 'react';
|
||||
import { memo, useCallback, useEffect, useState } from 'react';
|
||||
import { PiCaretDownBold, PiMagnifyingGlassMinusBold, PiMagnifyingGlassPlusBold } from 'react-icons/pi';
|
||||
@@ -68,9 +68,16 @@ const sliderDefaultValue = mapRawValueToSliderValue(100);
|
||||
|
||||
const snapCandidates = marks.slice(1, marks.length - 1);
|
||||
|
||||
const inputFieldSx = {
|
||||
paddingInlineEnd: 7,
|
||||
_focusVisible: {
|
||||
zIndex: 0,
|
||||
},
|
||||
} satisfies SystemStyleObject;
|
||||
|
||||
export const CanvasToolbarScale = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const [localScale, setLocalScale] = useState(scale * 100);
|
||||
|
||||
const onChangeSlider = useCallback(
|
||||
@@ -115,7 +122,7 @@ export const CanvasToolbarScale = memo(() => {
|
||||
return (
|
||||
<Flex alignItems="center">
|
||||
<ZoomOutButton />
|
||||
<Popover>
|
||||
<Popover isLazy lazyBehavior="unmount">
|
||||
<PopoverAnchor>
|
||||
<NumberInput
|
||||
variant="outline"
|
||||
@@ -132,7 +139,7 @@ export const CanvasToolbarScale = memo(() => {
|
||||
onKeyDown={onKeyDown}
|
||||
clampValueOnBlur={false}
|
||||
>
|
||||
<NumberInputField paddingInlineEnd={7} title="" _focusVisible={{ zIndex: 0 }} />
|
||||
<NumberInputField title="" sx={inputFieldSx} />
|
||||
<PopoverTrigger>
|
||||
<IconButton
|
||||
aria-label="open-slider"
|
||||
@@ -171,16 +178,17 @@ CanvasToolbarScale.displayName = 'CanvasToolbarScale';
|
||||
|
||||
const SCALE_SNAPS = [0.1, 0.15, 0.2, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 5, 7.5, 10, 15, 20];
|
||||
|
||||
const ZoomOutButton = () => {
|
||||
const ZoomOutButton = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const onClick = useCallback(() => {
|
||||
const scale = canvasManager.stage.$scale.get();
|
||||
const nextScale =
|
||||
SCALE_SNAPS.slice()
|
||||
.reverse()
|
||||
.find((snap) => snap < scale) ?? canvasManager.stage.config.MIN_SCALE;
|
||||
canvasManager.stage.setScale(Math.max(nextScale, canvasManager.stage.config.MIN_SCALE));
|
||||
}, [canvasManager.stage, scale]);
|
||||
}, [canvasManager.stage]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -192,15 +200,17 @@ const ZoomOutButton = () => {
|
||||
isDisabled={scale <= canvasManager.stage.config.MIN_SCALE}
|
||||
/>
|
||||
);
|
||||
};
|
||||
});
|
||||
ZoomOutButton.displayName = 'ZoomOutButton';
|
||||
|
||||
const ZoomInButton = () => {
|
||||
const ZoomInButton = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const onClick = useCallback(() => {
|
||||
const scale = canvasManager.stage.$scale.get();
|
||||
const nextScale = SCALE_SNAPS.find((snap) => snap > scale) ?? canvasManager.stage.config.MAX_SCALE;
|
||||
canvasManager.stage.setScale(Math.min(nextScale, canvasManager.stage.config.MAX_SCALE));
|
||||
}, [canvasManager.stage, scale]);
|
||||
}, [canvasManager.stage]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -212,4 +222,5 @@ const ZoomInButton = () => {
|
||||
isDisabled={scale >= canvasManager.stage.config.MAX_SCALE}
|
||||
/>
|
||||
);
|
||||
};
|
||||
});
|
||||
ZoomInButton.displayName = 'ZoomInButton';
|
||||
|
||||
@@ -168,3 +168,33 @@ export const useEntityAdapter = (
|
||||
assert(adapter, 'useEntityAdapter must be used within a EntityAdapterContext');
|
||||
return adapter;
|
||||
};
|
||||
|
||||
export const useAllEntityAdapters = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const regionalGuidanceAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.regionMasks.subscribe,
|
||||
canvasManager.adapters.regionMasks.getSnapshot
|
||||
);
|
||||
const rasterLayerAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.rasterLayers.subscribe,
|
||||
canvasManager.adapters.rasterLayers.getSnapshot
|
||||
);
|
||||
const controlLayerAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.controlLayers.subscribe,
|
||||
canvasManager.adapters.controlLayers.getSnapshot
|
||||
);
|
||||
const inpaintMaskAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.inpaintMasks.subscribe,
|
||||
canvasManager.adapters.inpaintMasks.getSnapshot
|
||||
);
|
||||
const allEntityAdapters = useMemo(() => {
|
||||
return [
|
||||
...Array.from(rasterLayerAdapters.values()),
|
||||
...Array.from(controlLayerAdapters.values()),
|
||||
...Array.from(inpaintMaskAdapters.values()),
|
||||
...Array.from(regionalGuidanceAdapters.values()),
|
||||
];
|
||||
}, [controlLayerAdapters, inpaintMaskAdapters, rasterLayerAdapters, regionalGuidanceAdapters]);
|
||||
|
||||
return allEntityAdapters;
|
||||
};
|
||||
|
||||
@@ -6,6 +6,8 @@ import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskDenoiseLimitAdded,
|
||||
inpaintMaskNoiseAdded,
|
||||
rasterLayerAdded,
|
||||
referenceImageAdded,
|
||||
rgAdded,
|
||||
@@ -27,6 +29,7 @@ import type {
|
||||
import {
|
||||
initialChatGPT4oReferenceImage,
|
||||
initialControlNet,
|
||||
initialFluxKontextReferenceImage,
|
||||
initialIPAdapter,
|
||||
initialT2IAdapter,
|
||||
} from 'features/controlLayers/store/util';
|
||||
@@ -85,6 +88,12 @@ export const selectDefaultRefImageConfig = createSelector(
|
||||
return referenceImage;
|
||||
}
|
||||
|
||||
if (selectedMainModel?.base === 'flux-kontext') {
|
||||
const referenceImage = deepClone(initialFluxKontextReferenceImage);
|
||||
referenceImage.model = zModelIdentifierField.parse(selectedMainModel);
|
||||
return referenceImage;
|
||||
}
|
||||
|
||||
const { data } = query;
|
||||
let model: IPAdapterModelConfig | null = null;
|
||||
if (data) {
|
||||
@@ -222,6 +231,24 @@ export const useAddRegionalGuidanceNegativePrompt = (entityIdentifier: CanvasEnt
|
||||
return runc;
|
||||
};
|
||||
|
||||
export const useAddInpaintMaskNoise = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskNoiseAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddInpaintMaskDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskDenoiseLimitAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const buildSelectValidRegionalGuidanceActions = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>
|
||||
) => {
|
||||
|
||||
@@ -2,9 +2,12 @@ import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
selectIsChatGTP4o,
|
||||
selectIsCogView4,
|
||||
selectIsFluxKontext,
|
||||
selectIsImagen3,
|
||||
selectIsImagen4,
|
||||
selectIsSD3,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectActiveReferenceImageEntities } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityType } from 'features/controlLayers/store/types';
|
||||
import { useMemo } from 'react';
|
||||
import type { Equals } from 'tsafe';
|
||||
@@ -14,24 +17,30 @@ export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => {
|
||||
const isSD3 = useAppSelector(selectIsSD3);
|
||||
const isCogView4 = useAppSelector(selectIsCogView4);
|
||||
const isImagen3 = useAppSelector(selectIsImagen3);
|
||||
const isImagen4 = useAppSelector(selectIsImagen4);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
|
||||
const isFluxKontext = useAppSelector(selectIsFluxKontext);
|
||||
const activeReferenceImageEntities = useAppSelector(selectActiveReferenceImageEntities);
|
||||
|
||||
const isEntityTypeEnabled = useMemo<boolean>(() => {
|
||||
switch (entityType) {
|
||||
case 'reference_image':
|
||||
return !isSD3 && !isCogView4 && !isImagen3;
|
||||
if (isFluxKontext) {
|
||||
return activeReferenceImageEntities.length === 0;
|
||||
}
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4;
|
||||
case 'regional_guidance':
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
case 'control_layer':
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
|
||||
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
case 'inpaint_mask':
|
||||
return !isImagen3 && !isChatGPT4o;
|
||||
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
case 'raster_layer':
|
||||
return !isImagen3 && !isChatGPT4o;
|
||||
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
|
||||
default:
|
||||
assert<Equals<typeof entityType, never>>(false);
|
||||
}
|
||||
}, [entityType, isSD3, isCogView4, isImagen3, isChatGPT4o]);
|
||||
}, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, activeReferenceImageEntities]);
|
||||
|
||||
return isEntityTypeEnabled;
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { withResult, withResultAsync } from 'common/util/result';
|
||||
import { CanvasCacheModule } from 'features/controlLayers/konva/CanvasCacheModule';
|
||||
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
|
||||
import type { CanvasEntityAdapter, CanvasEntityAdapterFromType } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
@@ -426,6 +427,145 @@ export class CanvasCompositorModule extends CanvasModuleBase {
|
||||
return this.mergeByEntityIdentifiers(entityIdentifiers, false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates and uploads a grayscale representation of the inpaint mask image noise or denoise limit values.
|
||||
* This produces an image with a white background where the mask is represented by dark values.
|
||||
*
|
||||
* @param adapters The adapters for the canvas entities to composite
|
||||
* @param rect The region to include in the rasterized image
|
||||
* @param attribute The attribute to use for grayscale values (defaults to 'noiseLevel')
|
||||
* @param uploadOptions Options for uploading the image
|
||||
* @param forceUpload If true, the image is always re-uploaded, returning a new image DTO
|
||||
* @returns A promise that resolves to the image DTO
|
||||
*/
|
||||
getGrayscaleMaskCompositeImageDTO = async (
|
||||
adapters: CanvasEntityAdapterInpaintMask[],
|
||||
rect: Rect,
|
||||
attribute: 'noiseLevel' | 'denoiseLimit' = 'noiseLevel',
|
||||
invertMask: boolean = false,
|
||||
uploadOptions: SetOptional<Omit<UploadImageArg, 'file'>, 'image_category'> = { is_intermediate: true },
|
||||
forceUpload?: boolean
|
||||
): Promise<ImageDTO> => {
|
||||
assert(rect.width > 0 && rect.height > 0, 'Unable to rasterize empty rect');
|
||||
// Use a unique hash that includes the attribute name for caching
|
||||
const hash = this.getCompositeHash(adapters, { rect, attribute, invertMask, grayscale: true });
|
||||
const cachedImageName = forceUpload ? undefined : this.manager.cache.imageNameCache.get(hash);
|
||||
|
||||
let imageDTO: ImageDTO | null = null;
|
||||
|
||||
if (cachedImageName) {
|
||||
imageDTO = await getImageDTOSafe(cachedImageName);
|
||||
if (imageDTO) {
|
||||
this.log.debug({ rect, imageName: cachedImageName, imageDTO }, 'Using cached grayscale composite image');
|
||||
return imageDTO;
|
||||
}
|
||||
this.log.warn({ rect, imageName: cachedImageName }, 'Cached grayscale image name not found, recompositing');
|
||||
}
|
||||
|
||||
// Create a white background canvas
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = rect.width;
|
||||
canvas.height = rect.height;
|
||||
|
||||
const ctx = canvas.getContext('2d');
|
||||
assert(ctx !== null, 'Canvas 2D context is null');
|
||||
|
||||
// Fill with white first (creates white background)
|
||||
ctx.fillStyle = 'white';
|
||||
ctx.fillRect(0, 0, rect.width, rect.height);
|
||||
|
||||
// Apply special compositing mode
|
||||
ctx.globalCompositeOperation = 'darken';
|
||||
|
||||
// Draw each adapter's content
|
||||
for (const adapter of adapters) {
|
||||
this.log.debug({ entityIdentifier: adapter.entityIdentifier }, 'Drawing entity to grayscale composite canvas');
|
||||
|
||||
// Get the canvas from the adapter
|
||||
const adapterCanvas = adapter.getCanvas(rect);
|
||||
|
||||
// Create a temporary canvas for grayscale conversion
|
||||
const tempCanvas = document.createElement('canvas');
|
||||
tempCanvas.width = adapterCanvas.width;
|
||||
tempCanvas.height = adapterCanvas.height;
|
||||
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
assert(tempCtx !== null, 'Temp canvas 2D context is null');
|
||||
|
||||
// Draw the original adapter canvas to the temp canvas
|
||||
tempCtx.drawImage(adapterCanvas, 0, 0);
|
||||
|
||||
// Get the image data for processing
|
||||
const imageData = tempCtx.getImageData(0, 0, tempCanvas.width, tempCanvas.height);
|
||||
const data = imageData.data;
|
||||
|
||||
const attributeValue = typeof adapter.state[attribute] === 'number' ? (adapter.state[attribute] as number) : 1.0; // Default to full strength if attribute is undefined
|
||||
|
||||
// Process all pixels in the image data
|
||||
for (let i = 0; i < data.length; i += 4) {
|
||||
// Make sure we're accessing valid array indices
|
||||
if (i + 3 < data.length) {
|
||||
// input has transparency
|
||||
// Calculate grayscale value: white (255) for no mask, darker for stronger mask
|
||||
let grayValue = 255; // Default to white for unmasked areas
|
||||
if (invertMask ? (data[i + 3] ?? 0) < 128 : (data[i + 3] ?? 0) > 127) {
|
||||
grayValue = Math.max(0, Math.min(255, 255 - Math.round(255 * attributeValue)));
|
||||
}
|
||||
|
||||
data[i] = grayValue; // R
|
||||
data[i + 1] = grayValue; // G
|
||||
data[i + 2] = grayValue; // B
|
||||
data[i + 3] = 255; // A (output is fully opaque)
|
||||
}
|
||||
}
|
||||
|
||||
imageData.data.set(data); // Update the image data with the processed values
|
||||
|
||||
// Put the processed image data back to the temp canvas
|
||||
tempCtx.putImageData(imageData, 0, 0);
|
||||
|
||||
// Draw the temp canvas to the main canvas
|
||||
ctx.drawImage(tempCanvas, 0, 0);
|
||||
}
|
||||
|
||||
// Convert to blob and upload
|
||||
this.$isProcessing.set(true);
|
||||
const blobResult = await withResultAsync(() => canvasToBlob(canvas));
|
||||
this.$isProcessing.set(false);
|
||||
|
||||
if (blobResult.isErr()) {
|
||||
this.log.error(
|
||||
{ error: serializeError(blobResult.error) },
|
||||
'Failed to convert grayscale composite canvas to blob'
|
||||
);
|
||||
throw blobResult.error;
|
||||
}
|
||||
|
||||
const blob = blobResult.value;
|
||||
|
||||
if (this.manager._isDebugging) {
|
||||
previewBlob(blob, 'Grayscale Composite');
|
||||
}
|
||||
|
||||
this.$isUploading.set(true);
|
||||
const uploadResult = await withResultAsync(() =>
|
||||
uploadImage({
|
||||
file: new File([blob], 'canvas-grayscale-composite.png', { type: 'image/png' }),
|
||||
image_category: 'general',
|
||||
...uploadOptions,
|
||||
})
|
||||
);
|
||||
this.$isUploading.set(false);
|
||||
|
||||
if (uploadResult.isErr()) {
|
||||
throw uploadResult.error;
|
||||
}
|
||||
|
||||
imageDTO = uploadResult.value;
|
||||
this.manager.cache.imageNameCache.set(hash, imageDTO.image_name);
|
||||
return imageDTO;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the transparency of the composite of the give adapters.
|
||||
* @param adapters The adapters to composite
|
||||
|
||||
@@ -24,12 +24,13 @@ import {
|
||||
selectCanvasSlice,
|
||||
selectEntity,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
type CanvasEntityIdentifier,
|
||||
type CanvasRenderableEntityState,
|
||||
isRasterLayerEntityIdentifier,
|
||||
type Rect,
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRenderableEntityState,
|
||||
LifecycleCallback,
|
||||
Rect,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { isRasterLayerEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import Konva from 'konva';
|
||||
import { atom } from 'nanostores';
|
||||
@@ -40,11 +41,6 @@ import stableHash from 'stable-hash';
|
||||
import { assert } from 'tsafe';
|
||||
import type { Jsonifiable, JsonObject } from 'type-fest';
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`. We'll need to do a
|
||||
// type assertion below in the `onInit` method, which calls these callbacks.
|
||||
type InitCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
export abstract class CanvasEntityAdapterBase<
|
||||
T extends CanvasRenderableEntityState,
|
||||
U extends string,
|
||||
@@ -118,7 +114,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
/**
|
||||
* Callbacks that are executed when the module is initialized.
|
||||
*/
|
||||
private static initCallbacks = new Set<InitCallback>();
|
||||
private static initCallbacks = new Set<LifecycleCallback>();
|
||||
|
||||
/**
|
||||
* Register a callback to be run when an entity adapter is initialized.
|
||||
@@ -165,7 +161,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
* return false;
|
||||
* });
|
||||
*/
|
||||
static registerInitCallback = (callback: InitCallback) => {
|
||||
static registerInitCallback = (callback: LifecycleCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
roundRect,
|
||||
} from 'features/controlLayers/konva/util';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import type { Coordinate, LifecycleCallback, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import Konva from 'konva';
|
||||
import type { GroupConfig } from 'konva/lib/Group';
|
||||
@@ -123,7 +123,7 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
/**
|
||||
* Whether the transformer is currently calculating the rect of the parent.
|
||||
*/
|
||||
$isPendingRectCalculation = atom<boolean>(true);
|
||||
$isPendingRectCalculation = atom<boolean>(false);
|
||||
|
||||
/**
|
||||
* A set of subscriptions that should be cleaned up when the transformer is destroyed.
|
||||
@@ -177,6 +177,11 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
*/
|
||||
transformMutex = new Mutex();
|
||||
|
||||
/**
|
||||
* Callbacks that are executed when the bbox is updated.
|
||||
*/
|
||||
private static bboxUpdatedCallbacks = new Set<LifecycleCallback>();
|
||||
|
||||
konva: {
|
||||
transformer: Konva.Transformer;
|
||||
proxyRect: Konva.Rect;
|
||||
@@ -908,6 +913,8 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.parent.renderer.konva.objectGroup.setAttrs(groupAttrs);
|
||||
this.parent.bufferRenderer.konva.group.setAttrs(groupAttrs);
|
||||
}
|
||||
|
||||
CanvasEntityTransformer.runBboxUpdatedCallbacks(this.parent);
|
||||
};
|
||||
|
||||
calculateRect = debounce(() => {
|
||||
@@ -1026,6 +1033,23 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.konva.outlineRect.visible(false);
|
||||
};
|
||||
|
||||
static registerBboxUpdatedCallback = (callback: LifecycleCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
this.bboxUpdatedCallbacks.delete(wrapped);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
this.bboxUpdatedCallbacks.add(wrapped);
|
||||
};
|
||||
|
||||
private static runBboxUpdatedCallbacks = (adapter: CanvasEntityAdapter) => {
|
||||
for (const callback of this.bboxUpdatedCallbacks) {
|
||||
callback(adapter);
|
||||
}
|
||||
};
|
||||
|
||||
repr = () => {
|
||||
return {
|
||||
id: this.id,
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import type { Property } from 'csstype';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import { getKonvaNodeDebugAttrs, getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { getKonvaNodeDebugAttrs, getPrefixedId, getRectUnion } from 'features/controlLayers/konva/util';
|
||||
import type { Coordinate, Dimensions, Rect, StageAttrs } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
import { clamp } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { atom, computed } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
|
||||
type CanvasStageModuleConfig = {
|
||||
@@ -26,6 +26,14 @@ type CanvasStageModuleConfig = {
|
||||
* The padding in pixels to use when fitting the layers to the stage.
|
||||
*/
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: number;
|
||||
/**
|
||||
* The snap points for the scale of the canvas.
|
||||
*/
|
||||
SCALE_SNAP_POINTS: number[];
|
||||
/**
|
||||
* The tolerance for snapping the scale of the canvas, as a fraction of the scale.
|
||||
*/
|
||||
SCALE_SNAP_TOLERANCE: number;
|
||||
};
|
||||
|
||||
const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
@@ -33,6 +41,8 @@ const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
MAX_SCALE: 20,
|
||||
SCALE_FACTOR: 0.999,
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: 48,
|
||||
SCALE_SNAP_POINTS: [0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5],
|
||||
SCALE_SNAP_TOLERANCE: 0.05,
|
||||
};
|
||||
|
||||
export class CanvasStageModule extends CanvasModuleBase {
|
||||
@@ -43,6 +53,11 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
readonly manager: CanvasManager;
|
||||
readonly log: Logger;
|
||||
|
||||
// State for scale snapping logic
|
||||
private _intendedScale: number = 1;
|
||||
private _activeSnapPoint: number | null = null;
|
||||
private _snapTimeout: number | null = null;
|
||||
|
||||
container: HTMLDivElement;
|
||||
konva: { stage: Konva.Stage };
|
||||
|
||||
@@ -55,6 +70,7 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
height: 0,
|
||||
scale: 0,
|
||||
});
|
||||
$scale = computed(this.$stageAttrs, (attrs) => attrs.scale);
|
||||
|
||||
subscriptions = new Set<() => void>();
|
||||
resizeObserver: ResizeObserver | null = null;
|
||||
@@ -76,6 +92,9 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
container,
|
||||
}),
|
||||
};
|
||||
|
||||
// Initialize intended scale to the default stage scale
|
||||
this._intendedScale = this.konva.stage.scaleX();
|
||||
}
|
||||
|
||||
setContainer = (container: HTMLDivElement) => {
|
||||
@@ -167,6 +186,18 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits the bbox and layers to the stage. The union of the bbox and the visible layers will be centered and scaled
|
||||
* to fit the stage with some padding.
|
||||
*/
|
||||
fitBboxAndLayersToStage = (): void => {
|
||||
const layersRect = this.manager.compositor.getVisibleRectOfType();
|
||||
const bboxRect = this.manager.stateApi.getBbox().rect;
|
||||
const unionRect = getRectUnion(layersRect, bboxRect);
|
||||
this.log.trace({ bboxRect, layersRect, unionRect }, 'Fitting bbox and layers to stage');
|
||||
this.fitRect(unionRect);
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits a rectangle to the stage. The rectangle will be centered and scaled to fit the stage with some padding.
|
||||
*
|
||||
@@ -195,14 +226,27 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
-rect.y * scale + this.config.FIT_LAYERS_TO_STAGE_PADDING_PX + (availableHeight - rect.height * scale) / 2
|
||||
);
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
// When fitting the stage, we update the intended scale and reset any active snap.
|
||||
this._intendedScale = scale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
const tween = new Konva.Tween({
|
||||
node: this.konva.stage,
|
||||
duration: 0.15,
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
easing: Konva.Easings.EaseInOut,
|
||||
onUpdate: () => {
|
||||
this.syncStageAttrs();
|
||||
},
|
||||
onFinish: () => {
|
||||
this.syncStageAttrs();
|
||||
tween.destroy();
|
||||
},
|
||||
});
|
||||
|
||||
this.syncStageAttrs({ x, y, scale });
|
||||
tween.play();
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -230,26 +274,41 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
* Constrains a scale to be within the valid range
|
||||
*/
|
||||
constrainScale = (scale: number): number => {
|
||||
return clamp(Math.round(scale * 100) / 100, this.config.MIN_SCALE, this.config.MAX_SCALE);
|
||||
return clamp(scale, this.config.MIN_SCALE, this.config.MAX_SCALE);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the scale of the stage. If center is provided, the stage will zoom in/out on that point.
|
||||
* @param scale The new scale to set
|
||||
* @param center The center of the stage to zoom in/out on
|
||||
* Programmatically sets the scale of the stage, overriding any active snapping.
|
||||
* If a center point is provided, the stage will zoom on that point.
|
||||
* @param scale The new scale to set.
|
||||
* @param center The center point for the zoom.
|
||||
*/
|
||||
setScale = (scale: number, center: Coordinate = this.getCenter(true)): void => {
|
||||
this.log.trace('Setting scale');
|
||||
setScale = (scale: number, center?: Coordinate): void => {
|
||||
this.log.trace({ scale }, 'Programmatically setting scale');
|
||||
const newScale = this.constrainScale(scale);
|
||||
|
||||
const { x, y } = this.getPosition();
|
||||
// When scale is set programmatically, update the intended scale and reset any active snap.
|
||||
this._intendedScale = newScale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
this._applyScale(newScale, center);
|
||||
};
|
||||
|
||||
/**
|
||||
* Applies a scale to the stage, adjusting the position to keep the given center point stationary.
|
||||
* This internal method does NOT modify snapping state.
|
||||
*/
|
||||
private _applyScale = (newScale: number, center?: Coordinate): void => {
|
||||
const oldScale = this.getScale();
|
||||
|
||||
const deltaX = (center.x - x) / oldScale;
|
||||
const deltaY = (center.y - y) / oldScale;
|
||||
const _center = center ?? this.getCenter(true);
|
||||
const { x, y } = this.getPosition();
|
||||
|
||||
const newX = Math.floor(center.x - deltaX * newScale);
|
||||
const newY = Math.floor(center.y - deltaY * newScale);
|
||||
const deltaX = (_center.x - x) / oldScale;
|
||||
const deltaY = (_center.y - y) / oldScale;
|
||||
|
||||
const newX = _center.x - deltaX * newScale;
|
||||
const newY = _center.y - deltaY * newScale;
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
x: newX,
|
||||
@@ -263,6 +322,7 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
|
||||
onStageMouseWheel = (e: KonvaEventObject<WheelEvent>) => {
|
||||
e.evt.preventDefault();
|
||||
this._snapTimeout && window.clearTimeout(this._snapTimeout);
|
||||
|
||||
if (e.evt.ctrlKey || e.evt.metaKey) {
|
||||
return;
|
||||
@@ -271,12 +331,59 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
// We need the absolute cursor position - not the scaled position
|
||||
const cursorPos = this.konva.stage.getPointerPosition();
|
||||
|
||||
if (cursorPos) {
|
||||
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
|
||||
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
|
||||
const scale = this.manager.stage.getScale() * this.config.SCALE_FACTOR ** delta;
|
||||
this.manager.stage.setScale(scale, cursorPos);
|
||||
if (!cursorPos) {
|
||||
return;
|
||||
}
|
||||
|
||||
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
|
||||
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
|
||||
|
||||
// Update the intended scale based on the last intended scale, creating a continuous zoom feel
|
||||
const newIntendedScale = this._intendedScale * this.config.SCALE_FACTOR ** delta;
|
||||
this._intendedScale = this.constrainScale(newIntendedScale);
|
||||
|
||||
// Pass control to the snapping logic
|
||||
this._updateScaleWithSnapping(cursorPos);
|
||||
|
||||
this._snapTimeout = window.setTimeout(() => {
|
||||
// After a short delay, we can reset the intended scale to the current scale
|
||||
// This allows for continuous zooming without snapping back to the last snapped scale
|
||||
this._intendedScale = this.getScale();
|
||||
}, 100);
|
||||
};
|
||||
|
||||
/**
|
||||
* Implements "sticky" snap logic.
|
||||
* - If not snapped, checks if the intended scale is close enough to a snap point to engage the snap.
|
||||
* - If snapped, checks if the intended scale has moved far enough away to break the snap.
|
||||
* - Applies the resulting scale to the stage.
|
||||
*/
|
||||
private _updateScaleWithSnapping = (center: Coordinate) => {
|
||||
// If we are currently snapped, check if we should break out
|
||||
if (this._activeSnapPoint !== null) {
|
||||
const threshold = this._activeSnapPoint * this.config.SCALE_SNAP_TOLERANCE;
|
||||
if (Math.abs(this._intendedScale - this._activeSnapPoint) > threshold) {
|
||||
// User has scrolled far enough to break the snap
|
||||
this._activeSnapPoint = null;
|
||||
this._applyScale(this._intendedScale, center);
|
||||
}
|
||||
// Else, do nothing - we remain snapped at the current scale, creating a "dead zone"
|
||||
return;
|
||||
}
|
||||
|
||||
// If we are not snapped, check if we should snap to a point
|
||||
for (const snapPoint of this.config.SCALE_SNAP_POINTS) {
|
||||
const threshold = snapPoint * this.config.SCALE_SNAP_TOLERANCE;
|
||||
if (Math.abs(this._intendedScale - snapPoint) < threshold) {
|
||||
// Engage the snap
|
||||
this._activeSnapPoint = snapPoint;
|
||||
this._applyScale(snapPoint, center);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not snapping and not breaking a snap, just update to the intended scale
|
||||
this._applyScale(this._intendedScale, center);
|
||||
};
|
||||
|
||||
onStagePointerDown = (e: KonvaEventObject<PointerEvent>) => {
|
||||
|
||||
@@ -8,6 +8,7 @@ import { selectModel } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectBbox } from 'features/controlLayers/store/selectors';
|
||||
import type { Coordinate, Rect, Tool } from 'features/controlLayers/store/types';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import Konva from 'konva';
|
||||
import { noop } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
@@ -235,7 +236,7 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
if (tool !== 'bbox') {
|
||||
return NO_ANCHORS;
|
||||
}
|
||||
if (model?.base === 'imagen3' || model?.base === 'chatgpt-4o') {
|
||||
if (model?.base && API_BASE_MODELS.includes(model.base)) {
|
||||
// The bbox is not resizable in these modes
|
||||
return NO_ANCHORS;
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ import {
|
||||
import { simplifyFlatNumbersArray } from 'features/controlLayers/util/simplify';
|
||||
import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { ASPECT_RATIO_MAP } from 'features/parameters/components/Bbox/constants';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import type { IRect } from 'konva/lib/types';
|
||||
import { isEqual, merge } from 'lodash-es';
|
||||
@@ -68,7 +69,13 @@ import type {
|
||||
IPMethodV2,
|
||||
T2IAdapterConfig,
|
||||
} from './types';
|
||||
import { getEntityIdentifier, isChatGPT4oAspectRatioID, isImagen3AspectRatioID, isRenderableEntity } from './types';
|
||||
import {
|
||||
getEntityIdentifier,
|
||||
isChatGPT4oAspectRatioID,
|
||||
isFluxKontextAspectRatioID,
|
||||
isImagenAspectRatioID,
|
||||
isRenderableEntity,
|
||||
} from './types';
|
||||
import {
|
||||
converters,
|
||||
getControlLayerState,
|
||||
@@ -80,6 +87,7 @@ import {
|
||||
initialChatGPT4oReferenceImage,
|
||||
initialControlLoRA,
|
||||
initialControlNet,
|
||||
initialFluxKontextReferenceImage,
|
||||
initialFLUXRedux,
|
||||
initialIPAdapter,
|
||||
initialT2IAdapter,
|
||||
@@ -685,6 +693,16 @@ export const canvasSlice = createSlice({
|
||||
return;
|
||||
}
|
||||
|
||||
if (entity.ipAdapter.model.base === 'flux-kontext') {
|
||||
// Switching to flux-kontext
|
||||
entity.ipAdapter = {
|
||||
...initialFluxKontextReferenceImage,
|
||||
image: entity.ipAdapter.image,
|
||||
model: entity.ipAdapter.model,
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
if (entity.ipAdapter.model.type === 'flux_redux') {
|
||||
// Switching to flux_redux
|
||||
entity.ipAdapter = {
|
||||
@@ -1095,6 +1113,30 @@ export const canvasSlice = createSlice({
|
||||
state.inpaintMasks.entities = [data];
|
||||
state.selectedEntityIdentifier = { type: 'inpaint_mask', id: data.id };
|
||||
},
|
||||
inpaintMaskNoiseAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = 0.15; // Default noise level
|
||||
}
|
||||
},
|
||||
inpaintMaskNoiseChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ noiseLevel: number }, 'inpaint_mask'>>
|
||||
) => {
|
||||
const { entityIdentifier, noiseLevel } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = noiseLevel;
|
||||
}
|
||||
},
|
||||
inpaintMaskNoiseDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = undefined;
|
||||
}
|
||||
},
|
||||
inpaintMaskConvertedToRegionalGuidance: {
|
||||
reducer: (
|
||||
state,
|
||||
@@ -1133,6 +1175,30 @@ export const canvasSlice = createSlice({
|
||||
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
},
|
||||
inpaintMaskDenoiseLimitAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = 1.0; // Default denoise limit
|
||||
}
|
||||
},
|
||||
inpaintMaskDenoiseLimitChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ denoiseLimit: number }, 'inpaint_mask'>>
|
||||
) => {
|
||||
const { entityIdentifier, denoiseLimit } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = denoiseLimit;
|
||||
}
|
||||
},
|
||||
inpaintMaskDenoiseLimitDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = undefined;
|
||||
}
|
||||
},
|
||||
//#region BBox
|
||||
bboxScaledWidthChanged: (state, action: PayloadAction<number>) => {
|
||||
const gridSize = getGridSize(state.bbox.modelBase);
|
||||
@@ -1236,7 +1302,10 @@ export const canvasSlice = createSlice({
|
||||
state.bbox.aspectRatio.id = id;
|
||||
if (id === 'Free') {
|
||||
state.bbox.aspectRatio.isLocked = false;
|
||||
} else if (state.bbox.modelBase === 'imagen3' && isImagen3AspectRatioID(id)) {
|
||||
} else if (
|
||||
(state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'imagen4') &&
|
||||
isImagenAspectRatioID(id)
|
||||
) {
|
||||
// Imagen3 has specific output sizes that are not exactly the same as the aspect ratio. Need special handling.
|
||||
if (id === '16:9') {
|
||||
state.bbox.rect.width = 1408;
|
||||
@@ -1270,6 +1339,31 @@ export const canvasSlice = createSlice({
|
||||
}
|
||||
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
|
||||
state.bbox.aspectRatio.isLocked = true;
|
||||
} else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
|
||||
if (id === '3:4') {
|
||||
state.bbox.rect.width = 880;
|
||||
state.bbox.rect.height = 1184;
|
||||
} else if (id === '4:3') {
|
||||
state.bbox.rect.width = 1184;
|
||||
state.bbox.rect.height = 880;
|
||||
} else if (id === '9:16') {
|
||||
state.bbox.rect.width = 752;
|
||||
state.bbox.rect.height = 1392;
|
||||
} else if (id === '16:9') {
|
||||
state.bbox.rect.width = 1392;
|
||||
state.bbox.rect.height = 752;
|
||||
} else if (id === '21:9') {
|
||||
state.bbox.rect.width = 1568;
|
||||
state.bbox.rect.height = 672;
|
||||
} else if (id === '9:21') {
|
||||
state.bbox.rect.width = 672;
|
||||
state.bbox.rect.height = 1568;
|
||||
} else if (id === '1:1') {
|
||||
state.bbox.rect.width = 1024;
|
||||
state.bbox.rect.height = 1024;
|
||||
}
|
||||
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
|
||||
state.bbox.aspectRatio.isLocked = true;
|
||||
} else {
|
||||
state.bbox.aspectRatio.isLocked = true;
|
||||
state.bbox.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio;
|
||||
@@ -1742,7 +1836,7 @@ export const canvasSlice = createSlice({
|
||||
const base = model?.base;
|
||||
if (isMainModelBase(base) && state.bbox.modelBase !== base) {
|
||||
state.bbox.modelBase = base;
|
||||
if (base === 'imagen3' || base === 'chatgpt-4o') {
|
||||
if (API_BASE_MODELS.includes(base)) {
|
||||
state.bbox.aspectRatio.isLocked = true;
|
||||
state.bbox.aspectRatio.value = 1;
|
||||
state.bbox.aspectRatio.id = '1:1';
|
||||
@@ -1865,6 +1959,12 @@ export const {
|
||||
// Inpaint mask
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskConvertedToRegionalGuidance,
|
||||
inpaintMaskNoiseAdded,
|
||||
inpaintMaskNoiseChanged,
|
||||
inpaintMaskNoiseDeleted,
|
||||
inpaintMaskDenoiseLimitAdded,
|
||||
inpaintMaskDenoiseLimitChanged,
|
||||
inpaintMaskDenoiseLimitDeleted,
|
||||
// inpaintMaskRecalled,
|
||||
} = canvasSlice.actions;
|
||||
|
||||
@@ -1881,7 +1981,7 @@ export const canvasPersistConfig: PersistConfig<CanvasState> = {
|
||||
};
|
||||
|
||||
const syncScaledSize = (state: CanvasState) => {
|
||||
if (state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'chatgpt-4o') {
|
||||
if (API_BASE_MODELS.includes(state.bbox.modelBase)) {
|
||||
// Imagen3 has fixed sizes. Scaled bbox is not supported.
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -381,7 +381,9 @@ export const selectIsFLUX = createParamsSelector((params) => params.model?.base
|
||||
export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3');
|
||||
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
|
||||
export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
|
||||
export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4');
|
||||
export const selectIsChatGTP4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o');
|
||||
export const selectIsFluxKontext = createParamsSelector((params) => params.model?.base === 'flux-kontext');
|
||||
|
||||
export const selectModel = createParamsSelector((params) => params.model);
|
||||
export const selectModelKey = createParamsSelector((params) => params.model?.key);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import { fetchModelConfigByIdentifier } from 'features/metadata/util/modelFetchingHelpers';
|
||||
import { zMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import type { ParameterLoRAModel } from 'features/parameters/types/parameterSchemas';
|
||||
@@ -257,6 +258,13 @@ const zChatGPT4oReferenceImageConfig = z.object({
|
||||
});
|
||||
export type ChatGPT4oReferenceImageConfig = z.infer<typeof zChatGPT4oReferenceImageConfig>;
|
||||
|
||||
const zFluxKontextReferenceImageConfig = z.object({
|
||||
type: z.literal('flux_kontext_reference_image'),
|
||||
image: zImageWithDims.nullable(),
|
||||
model: zServerValidatedModelIdentifierField.nullable(),
|
||||
});
|
||||
export type FluxKontextReferenceImageConfig = z.infer<typeof zFluxKontextReferenceImageConfig>;
|
||||
|
||||
const zCanvasEntityBase = z.object({
|
||||
id: zId,
|
||||
name: zName,
|
||||
@@ -267,7 +275,12 @@ const zCanvasEntityBase = z.object({
|
||||
const zCanvasReferenceImageState = zCanvasEntityBase.extend({
|
||||
type: z.literal('reference_image'),
|
||||
// This should be named `referenceImage` but we need to keep it as `ipAdapter` for backwards compatibility
|
||||
ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zChatGPT4oReferenceImageConfig]),
|
||||
ipAdapter: z.discriminatedUnion('type', [
|
||||
zIPAdapterConfig,
|
||||
zFLUXReduxConfig,
|
||||
zChatGPT4oReferenceImageConfig,
|
||||
zFluxKontextReferenceImageConfig,
|
||||
]),
|
||||
});
|
||||
export type CanvasReferenceImageState = z.infer<typeof zCanvasReferenceImageState>;
|
||||
|
||||
@@ -279,6 +292,9 @@ export const isFLUXReduxConfig = (config: CanvasReferenceImageState['ipAdapter']
|
||||
export const isChatGPT4oReferenceImageConfig = (
|
||||
config: CanvasReferenceImageState['ipAdapter']
|
||||
): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image';
|
||||
export const isFluxKontextReferenceImageConfig = (
|
||||
config: CanvasReferenceImageState['ipAdapter']
|
||||
): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image';
|
||||
|
||||
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
|
||||
export type FillStyle = z.infer<typeof zFillStyle>;
|
||||
@@ -310,6 +326,8 @@ const zCanvasInpaintMaskState = zCanvasEntityBase.extend({
|
||||
fill: zFill,
|
||||
opacity: zOpacity,
|
||||
objects: z.array(zCanvasObjectState),
|
||||
noiseLevel: z.number().gte(0).lte(1).optional(),
|
||||
denoiseLimit: z.number().gte(0).lte(1).optional(),
|
||||
});
|
||||
export type CanvasInpaintMaskState = z.infer<typeof zCanvasInpaintMaskState>;
|
||||
|
||||
@@ -403,16 +421,20 @@ export type StagingAreaImage = {
|
||||
offsetY: number;
|
||||
};
|
||||
|
||||
export const zAspectRatioID = z.enum(['Free', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']);
|
||||
export const zAspectRatioID = z.enum(['Free', '21:9', '9:21', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']);
|
||||
|
||||
export const zImagen3AspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']);
|
||||
export const isImagen3AspectRatioID = (v: unknown): v is z.infer<typeof zImagen3AspectRatioID> =>
|
||||
export const isImagenAspectRatioID = (v: unknown): v is z.infer<typeof zImagen3AspectRatioID> =>
|
||||
zImagen3AspectRatioID.safeParse(v).success;
|
||||
|
||||
export const zChatGPT4oAspectRatioID = z.enum(['3:2', '1:1', '2:3']);
|
||||
export const isChatGPT4oAspectRatioID = (v: unknown): v is z.infer<typeof zChatGPT4oAspectRatioID> =>
|
||||
zChatGPT4oAspectRatioID.safeParse(v).success;
|
||||
|
||||
export const zFluxKontextAspectRatioID = z.enum(['21:9', '4:3', '1:1', '3:4', '9:21', '16:9', '9:16']);
|
||||
export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer<typeof zFluxKontextAspectRatioID> =>
|
||||
zFluxKontextAspectRatioID.safeParse(v).success;
|
||||
|
||||
export type AspectRatioID = z.infer<typeof zAspectRatioID>;
|
||||
export const isAspectRatioID = (v: unknown): v is AspectRatioID => zAspectRatioID.safeParse(v).success;
|
||||
|
||||
@@ -609,3 +631,7 @@ export const isMaskEntityIdentifier = (
|
||||
): entityIdentifier is CanvasEntityIdentifier<'inpaint_mask' | 'regional_guidance'> => {
|
||||
return isInpaintMaskEntityIdentifier(entityIdentifier) || isRegionalGuidanceEntityIdentifier(entityIdentifier);
|
||||
};
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`.
|
||||
export type LifecycleCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
@@ -10,6 +10,7 @@ import type {
|
||||
ChatGPT4oReferenceImageConfig,
|
||||
ControlLoRAConfig,
|
||||
ControlNetConfig,
|
||||
FluxKontextReferenceImageConfig,
|
||||
FLUXReduxConfig,
|
||||
ImageWithDims,
|
||||
IPAdapterConfig,
|
||||
@@ -83,6 +84,11 @@ export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = {
|
||||
image: null,
|
||||
model: null,
|
||||
};
|
||||
export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = {
|
||||
type: 'flux_kontext_reference_image',
|
||||
image: null,
|
||||
model: null,
|
||||
};
|
||||
export const initialT2IAdapter: T2IAdapterConfig = {
|
||||
type: 't2i_adapter',
|
||||
model: null,
|
||||
@@ -199,6 +205,8 @@ export const getInpaintMaskState = (
|
||||
style: 'diagonal',
|
||||
color: getInpaintMaskFillColor(),
|
||||
},
|
||||
noiseLevel: undefined,
|
||||
denoiseLimit: undefined,
|
||||
};
|
||||
merge(entityState, overrides);
|
||||
return entityState;
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import type { MainModelBase } from 'features/nodes/types/common';
|
||||
import { getGridSize, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import {
|
||||
getGridSize,
|
||||
getOptimalDimension,
|
||||
isInSDXLTrainingDimensions,
|
||||
} from 'features/parameters/util/optimalDimension';
|
||||
|
||||
/**
|
||||
* Scales the bounding box dimensions to the optimal dimension. The optimal dimensions should be the trained dimension
|
||||
@@ -10,6 +14,11 @@ import { getGridSize, getOptimalDimension } from 'features/parameters/util/optim
|
||||
* @param modelBase The base model
|
||||
*/
|
||||
export const getScaledBoundingBoxDimensions = (dimensions: Dimensions, modelBase: MainModelBase): Dimensions => {
|
||||
// Special cases: Return original if SDXL and in training dimensions
|
||||
if (modelBase === 'sdxl' && isInSDXLTrainingDimensions(dimensions.width, dimensions.height)) {
|
||||
return { ...dimensions };
|
||||
}
|
||||
|
||||
const optimalDimension = getOptimalDimension(modelBase);
|
||||
const gridSize = getGridSize(modelBase);
|
||||
const width = roundToMultiple(dimensions.width, gridSize);
|
||||
|
||||
@@ -26,19 +26,26 @@ import { atom } from 'nanostores';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListAllImageNamesForBoardQuery } from 'services/api/endpoints/boards';
|
||||
import { useDeleteBoardAndImagesMutation, useDeleteBoardMutation } from 'services/api/endpoints/images';
|
||||
import {
|
||||
useDeleteBoardAndImagesMutation,
|
||||
useDeleteBoardMutation,
|
||||
useDeleteUncategorizedImagesMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import type { BoardDTO } from 'services/api/types';
|
||||
|
||||
export const $boardToDelete = atom<BoardDTO | null>(null);
|
||||
export const $boardToDelete = atom<BoardDTO | 'none' | null>(null);
|
||||
|
||||
const DeleteBoardModal = () => {
|
||||
useAssertSingleton('DeleteBoardModal');
|
||||
const boardToDelete = useStore($boardToDelete);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const boardId = useMemo(() => (boardToDelete === 'none' ? 'none' : boardToDelete?.board_id), [boardToDelete]);
|
||||
|
||||
const { currentData: boardImageNames, isFetching: isFetchingBoardNames } = useListAllImageNamesForBoardQuery(
|
||||
boardToDelete?.board_id
|
||||
boardId
|
||||
? {
|
||||
board_id: boardToDelete?.board_id,
|
||||
board_id: boardId,
|
||||
categories: undefined,
|
||||
is_intermediate: undefined,
|
||||
}
|
||||
@@ -71,10 +78,13 @@ const DeleteBoardModal = () => {
|
||||
|
||||
const [deleteBoardAndImages, { isLoading: isDeleteBoardAndImagesLoading }] = useDeleteBoardAndImagesMutation();
|
||||
|
||||
const [deleteUncategorizedImages, { isLoading: isDeleteUncategorizedImagesLoading }] =
|
||||
useDeleteUncategorizedImagesMutation();
|
||||
|
||||
const imageUsageSummary = useAppSelector(selectImageUsageSummary);
|
||||
|
||||
const handleDeleteBoardOnly = useCallback(() => {
|
||||
if (!boardToDelete) {
|
||||
if (!boardToDelete || boardToDelete === 'none') {
|
||||
return;
|
||||
}
|
||||
deleteBoardOnly(boardToDelete.board_id);
|
||||
@@ -82,13 +92,21 @@ const DeleteBoardModal = () => {
|
||||
}, [boardToDelete, deleteBoardOnly]);
|
||||
|
||||
const handleDeleteBoardAndImages = useCallback(() => {
|
||||
if (!boardToDelete) {
|
||||
if (!boardToDelete || boardToDelete === 'none') {
|
||||
return;
|
||||
}
|
||||
deleteBoardAndImages(boardToDelete.board_id);
|
||||
$boardToDelete.set(null);
|
||||
}, [boardToDelete, deleteBoardAndImages]);
|
||||
|
||||
const handleDeleteUncategorizedImages = useCallback(() => {
|
||||
if (!boardToDelete || boardToDelete !== 'none') {
|
||||
return;
|
||||
}
|
||||
deleteUncategorizedImages();
|
||||
$boardToDelete.set(null);
|
||||
}, [boardToDelete, deleteUncategorizedImages]);
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
$boardToDelete.set(null);
|
||||
}, []);
|
||||
@@ -96,8 +114,12 @@ const DeleteBoardModal = () => {
|
||||
const cancelRef = useRef<HTMLButtonElement>(null);
|
||||
|
||||
const isLoading = useMemo(
|
||||
() => isDeleteBoardAndImagesLoading || isDeleteBoardOnlyLoading || isFetchingBoardNames,
|
||||
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames]
|
||||
() =>
|
||||
isDeleteBoardAndImagesLoading ||
|
||||
isDeleteBoardOnlyLoading ||
|
||||
isFetchingBoardNames ||
|
||||
isDeleteUncategorizedImagesLoading,
|
||||
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames, isDeleteUncategorizedImagesLoading]
|
||||
);
|
||||
|
||||
if (!boardToDelete) {
|
||||
@@ -109,7 +131,7 @@ const DeleteBoardModal = () => {
|
||||
<AlertDialogOverlay>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader fontSize="lg" fontWeight="bold">
|
||||
{t('common.delete')} {boardToDelete.board_name}
|
||||
{t('common.delete')} {boardToDelete === 'none' ? t('boards.uncategorizedImages') : boardToDelete.board_name}
|
||||
</AlertDialogHeader>
|
||||
|
||||
<AlertDialogBody>
|
||||
@@ -125,11 +147,13 @@ const DeleteBoardModal = () => {
|
||||
bottomMessage={t('boards.bottomMessage')}
|
||||
/>
|
||||
)}
|
||||
<Text>
|
||||
{boardToDelete.is_private
|
||||
? t('boards.deletedPrivateBoardsCannotbeRestored')
|
||||
: t('boards.deletedBoardsCannotbeRestored')}
|
||||
</Text>
|
||||
{boardToDelete !== 'none' && (
|
||||
<Text>
|
||||
{boardToDelete.is_private
|
||||
? t('boards.deletedPrivateBoardsCannotbeRestored')
|
||||
: t('boards.deletedBoardsCannotbeRestored')}
|
||||
</Text>
|
||||
)}
|
||||
<Text>{t('gallery.deleteImagePermanent')}</Text>
|
||||
</Flex>
|
||||
</AlertDialogBody>
|
||||
@@ -138,12 +162,21 @@ const DeleteBoardModal = () => {
|
||||
<Button ref={cancelRef} onClick={handleClose}>
|
||||
{t('boards.cancel')}
|
||||
</Button>
|
||||
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
|
||||
{t('boards.deleteBoardOnly')}
|
||||
</Button>
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
|
||||
{t('boards.deleteBoardAndImages')}
|
||||
</Button>
|
||||
{boardToDelete !== 'none' && (
|
||||
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
|
||||
{t('boards.deleteBoardOnly')}
|
||||
</Button>
|
||||
)}
|
||||
{boardToDelete !== 'none' && (
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
|
||||
{t('boards.deleteBoardAndImages')}
|
||||
</Button>
|
||||
)}
|
||||
{boardToDelete === 'none' && (
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteUncategorizedImages}>
|
||||
{t('boards.deleteAllUncategorizedImages')}
|
||||
</Button>
|
||||
)}
|
||||
</Flex>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
|
||||
@@ -7,9 +7,11 @@ import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDownloadBold, PiPlusBold } from 'react-icons/pi';
|
||||
import { PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
|
||||
import { useBulkDownloadImagesMutation } from 'services/api/endpoints/images';
|
||||
|
||||
import { $boardToDelete } from './DeleteBoardModal';
|
||||
|
||||
type Props = {
|
||||
children: ContextMenuProps<HTMLDivElement>['children'];
|
||||
};
|
||||
@@ -33,6 +35,10 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
|
||||
bulkDownload({ image_names: [], board_id: 'none' });
|
||||
}, [bulkDownload]);
|
||||
|
||||
const setUncategorizedImagesAsToBeDeleted = useCallback(() => {
|
||||
$boardToDelete.set('none');
|
||||
}, []);
|
||||
|
||||
const renderMenuFunc = useCallback(
|
||||
() => (
|
||||
<MenuList visibility="visible">
|
||||
@@ -47,10 +53,26 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
|
||||
{t('boards.downloadBoard')}
|
||||
</MenuItem>
|
||||
)}
|
||||
<MenuItem
|
||||
color="error.300"
|
||||
icon={<PiTrashSimpleBold />}
|
||||
onClick={setUncategorizedImagesAsToBeDeleted}
|
||||
isDestructive
|
||||
>
|
||||
{t('boards.deleteAllUncategorizedImages')}
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
</MenuList>
|
||||
),
|
||||
[autoAssignBoardOnClick, handleBulkDownload, handleSetAutoAdd, isBulkDownloadEnabled, isSelectedForAutoAdd, t]
|
||||
[
|
||||
autoAssignBoardOnClick,
|
||||
handleBulkDownload,
|
||||
handleSetAutoAdd,
|
||||
isBulkDownloadEnabled,
|
||||
isSelectedForAutoAdd,
|
||||
t,
|
||||
setUncategorizedImagesAsToBeDeleted,
|
||||
]
|
||||
);
|
||||
|
||||
return <ContextMenu renderMenu={renderMenuFunc}>{children}</ContextMenu>;
|
||||
|
||||
@@ -19,9 +19,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusySafe();
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -31,9 +31,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -43,9 +43,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -55,9 +55,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
|
||||
@@ -19,6 +19,7 @@ type Props = {
|
||||
withDownload?: boolean;
|
||||
withCopy?: boolean;
|
||||
extraCopyActions?: { label: string; getData: (data: unknown) => unknown }[];
|
||||
wrapData?: boolean;
|
||||
} & FlexProps;
|
||||
|
||||
const overlayscrollbarsOptions = getOverlayScrollbarsParams({
|
||||
@@ -29,7 +30,16 @@ const overlayscrollbarsOptions = getOverlayScrollbarsParams({
|
||||
const ChakraPre = chakra('pre');
|
||||
|
||||
const DataViewer = (props: Props) => {
|
||||
const { label, data, fileName, withDownload = true, withCopy = true, extraCopyActions, ...rest } = props;
|
||||
const {
|
||||
label,
|
||||
data,
|
||||
fileName,
|
||||
withDownload = true,
|
||||
withCopy = true,
|
||||
extraCopyActions,
|
||||
wrapData = true,
|
||||
...rest
|
||||
} = props;
|
||||
const dataString = useMemo(() => (isString(data) ? data : formatter.Serialize(data)) ?? '', [data]);
|
||||
const shift = useShiftModifier();
|
||||
const clipboard = useClipboard();
|
||||
@@ -53,7 +63,7 @@ const DataViewer = (props: Props) => {
|
||||
<Flex bg="base.800" borderRadius="base" flexGrow={1} w="full" h="full" position="relative" {...rest}>
|
||||
<Box position="absolute" top={0} left={0} right={0} bottom={0} overflow="auto" p={2} fontSize="sm">
|
||||
<OverlayScrollbarsComponent defer style={overlayScrollbarsStyles} options={overlayscrollbarsOptions}>
|
||||
<ChakraPre whiteSpace="pre-wrap">{dataString}</ChakraPre>
|
||||
<ChakraPre whiteSpace={wrapData ? 'pre-wrap' : undefined}>{dataString}</ChakraPre>
|
||||
</OverlayScrollbarsComponent>
|
||||
</Box>
|
||||
<Flex position="absolute" top={0} insetInlineEnd={0} p={2}>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { selectDefaultIPAdapter, selectDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { CanvasEntityAdapterBase } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterBase';
|
||||
import { CanvasEntityTransformer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import {
|
||||
@@ -20,6 +20,7 @@ import type {
|
||||
CanvasControlLayerState,
|
||||
CanvasEntityIdentifier,
|
||||
CanvasEntityType,
|
||||
CanvasImageState,
|
||||
CanvasInpaintMaskState,
|
||||
CanvasRasterLayerState,
|
||||
CanvasRegionalGuidanceState,
|
||||
@@ -34,7 +35,7 @@ import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type { FieldIdentifier } from 'features/nodes/types/field';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { imageDTOToFile, imagesApi, uploadImage } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -142,14 +143,14 @@ export const createNewCanvasEntityFromImage = (arg: {
|
||||
*
|
||||
* Using 'raster_layer' for the type and enabling `withResize` replicates the common img2img flow.
|
||||
*/
|
||||
export const newCanvasFromImage = (arg: {
|
||||
export const newCanvasFromImage = async (arg: {
|
||||
imageDTO: ImageDTO;
|
||||
type: CanvasEntityType | 'regional_guidance_with_reference_image';
|
||||
withResize: boolean;
|
||||
withResize?: boolean;
|
||||
dispatch: AppDispatch;
|
||||
getState: () => RootState;
|
||||
}) => {
|
||||
const { type, imageDTO, withResize, dispatch, getState } = arg;
|
||||
const { type, imageDTO, withResize = false, dispatch, getState } = arg;
|
||||
const state = getState();
|
||||
|
||||
const base = selectBboxModelBase(state);
|
||||
@@ -158,20 +159,29 @@ export const newCanvasFromImage = (arg: {
|
||||
const optimalDimension = getOptimalDimension(base);
|
||||
const { width, height } = calculateNewSize(ratio, optimalDimension ** 2, base);
|
||||
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
const { x, y } = selectBboxRect(state);
|
||||
let imageObject: CanvasImageState;
|
||||
|
||||
const addInitCallback = (id: string) => {
|
||||
CanvasEntityAdapterBase.registerInitCallback(async (adapter) => {
|
||||
if (withResize && (width !== imageDTO.width || height !== imageDTO.height)) {
|
||||
const resizedImageDTO = await uploadImage({
|
||||
file: await imageDTOToFile(imageDTO),
|
||||
image_category: 'general',
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
resize_to: { width, height },
|
||||
});
|
||||
imageObject = imageDTOToImageObject(resizedImageDTO);
|
||||
} else {
|
||||
imageObject = imageDTOToImageObject(imageDTO);
|
||||
}
|
||||
|
||||
const addFitOnLayerInitCallback = (adapterId: string) => {
|
||||
CanvasEntityTransformer.registerBboxUpdatedCallback((adapter) => {
|
||||
// Skip the callback if the adapter is not the one we are creating
|
||||
if (adapter.id !== id) {
|
||||
return false;
|
||||
if (adapter.id !== adapterId) {
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
// Fit the layer to the bbox w/ fill strategy
|
||||
await adapter.transformer.startTransform({ silent: true });
|
||||
adapter.transformer.fitToBboxFill();
|
||||
await adapter.transformer.applyTransform();
|
||||
return true;
|
||||
adapter.manager.stage.fitBboxAndLayersToStage();
|
||||
return Promise.resolve(true);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -180,11 +190,8 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('raster_layer'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRasterLayerState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -195,12 +202,9 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('control_layer'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
controlAdapter: deepClone(initialControlNet),
|
||||
} satisfies Partial<CanvasControlLayerState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -211,11 +215,8 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('inpaint_mask'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasInpaintMaskState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -226,11 +227,8 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('regional_guidance'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRegionalGuidanceState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
|
||||
@@ -17,7 +17,9 @@ export const BASE_COLOR_MAP: Record<BaseModelType, string> = {
|
||||
flux: 'gold',
|
||||
cogview4: 'red',
|
||||
imagen3: 'pink',
|
||||
imagen4: 'pink',
|
||||
'chatgpt-4o': 'pink',
|
||||
'flux-kontext': 'pink',
|
||||
};
|
||||
|
||||
const ModelBaseBadge = ({ base }: Props) => {
|
||||
|
||||
@@ -4,9 +4,11 @@ import { FloatFieldSlider } from 'features/nodes/components/flow/nodes/Invocatio
|
||||
import ChatGPT4oModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ChatGPT4oModelFieldInputComponent';
|
||||
import { FloatFieldCollectionInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/FloatFieldCollectionInputComponent';
|
||||
import { FloatGeneratorFieldInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/FloatGeneratorFieldComponent';
|
||||
import FluxKontextModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/FluxKontextModelFieldInputComponent';
|
||||
import { ImageFieldCollectionInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ImageFieldCollectionInputComponent';
|
||||
import { ImageGeneratorFieldInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ImageGeneratorFieldComponent';
|
||||
import Imagen3ModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/Imagen3ModelFieldInputComponent';
|
||||
import Imagen4ModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/Imagen4ModelFieldInputComponent';
|
||||
import { IntegerFieldCollectionInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/IntegerFieldCollectionInputComponent';
|
||||
import { IntegerGeneratorFieldInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/IntegerGeneratorFieldComponent';
|
||||
import ModelIdentifierFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ModelIdentifierFieldInputComponent';
|
||||
@@ -49,6 +51,8 @@ import {
|
||||
isFloatFieldInputTemplate,
|
||||
isFloatGeneratorFieldInputInstance,
|
||||
isFloatGeneratorFieldInputTemplate,
|
||||
isFluxKontextModelFieldInputInstance,
|
||||
isFluxKontextModelFieldInputTemplate,
|
||||
isFluxMainModelFieldInputInstance,
|
||||
isFluxMainModelFieldInputTemplate,
|
||||
isFluxReduxModelFieldInputInstance,
|
||||
@@ -63,6 +67,8 @@ import {
|
||||
isImageGeneratorFieldInputTemplate,
|
||||
isImagen3ModelFieldInputInstance,
|
||||
isImagen3ModelFieldInputTemplate,
|
||||
isImagen4ModelFieldInputInstance,
|
||||
isImagen4ModelFieldInputTemplate,
|
||||
isIntegerFieldCollectionInputInstance,
|
||||
isIntegerFieldCollectionInputTemplate,
|
||||
isIntegerFieldInputInstance,
|
||||
@@ -407,6 +413,20 @@ export const InputFieldRenderer = memo(({ nodeId, fieldName, settings }: Props)
|
||||
return <Imagen3ModelFieldInputComponent nodeId={nodeId} field={field} fieldTemplate={template} />;
|
||||
}
|
||||
|
||||
if (isImagen4ModelFieldInputTemplate(template)) {
|
||||
if (!isImagen4ModelFieldInputInstance(field)) {
|
||||
return null;
|
||||
}
|
||||
return <Imagen4ModelFieldInputComponent nodeId={nodeId} field={field} fieldTemplate={template} />;
|
||||
}
|
||||
|
||||
if (isFluxKontextModelFieldInputTemplate(template)) {
|
||||
if (!isFluxKontextModelFieldInputInstance(field)) {
|
||||
return null;
|
||||
}
|
||||
return <FluxKontextModelFieldInputComponent nodeId={nodeId} field={field} fieldTemplate={template} />;
|
||||
}
|
||||
|
||||
if (isChatGPT4oModelFieldInputTemplate(template)) {
|
||||
if (!isChatGPT4oModelFieldInputInstance(field)) {
|
||||
return null;
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { ModelFieldCombobox } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ModelFieldCombobox';
|
||||
import { fieldFluxKontextModelValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type {
|
||||
FluxKontextModelFieldInputInstance,
|
||||
FluxKontextModelFieldInputTemplate,
|
||||
} from 'features/nodes/types/field';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useFluxKontextModels } from 'services/api/hooks/modelsByType';
|
||||
import type { ApiModelConfig } from 'services/api/types';
|
||||
|
||||
import type { FieldComponentProps } from './types';
|
||||
|
||||
const FluxKontextModelFieldInputComponent = (
|
||||
props: FieldComponentProps<FluxKontextModelFieldInputInstance, FluxKontextModelFieldInputTemplate>
|
||||
) => {
|
||||
const { nodeId, field } = props;
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const [modelConfigs, { isLoading }] = useFluxKontextModels();
|
||||
|
||||
const onChange = useCallback(
|
||||
(value: ApiModelConfig | null) => {
|
||||
if (!value) {
|
||||
return;
|
||||
}
|
||||
dispatch(
|
||||
fieldFluxKontextModelValueChanged({
|
||||
nodeId,
|
||||
fieldName: field.name,
|
||||
value,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, field.name, nodeId]
|
||||
);
|
||||
|
||||
return (
|
||||
<ModelFieldCombobox
|
||||
value={field.value}
|
||||
modelConfigs={modelConfigs}
|
||||
isLoadingConfigs={isLoading}
|
||||
onChange={onChange}
|
||||
required={props.fieldTemplate.required}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(FluxKontextModelFieldInputComponent);
|
||||
@@ -0,0 +1,46 @@
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { ModelFieldCombobox } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ModelFieldCombobox';
|
||||
import { fieldImagen4ModelValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type { Imagen4ModelFieldInputInstance, Imagen4ModelFieldInputTemplate } from 'features/nodes/types/field';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useImagen4Models } from 'services/api/hooks/modelsByType';
|
||||
import type { ApiModelConfig } from 'services/api/types';
|
||||
|
||||
import type { FieldComponentProps } from './types';
|
||||
|
||||
const Imagen4ModelFieldInputComponent = (
|
||||
props: FieldComponentProps<Imagen4ModelFieldInputInstance, Imagen4ModelFieldInputTemplate>
|
||||
) => {
|
||||
const { nodeId, field } = props;
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const [modelConfigs, { isLoading }] = useImagen4Models();
|
||||
|
||||
const onChange = useCallback(
|
||||
(value: ApiModelConfig | null) => {
|
||||
if (!value) {
|
||||
return;
|
||||
}
|
||||
dispatch(
|
||||
fieldImagen4ModelValueChanged({
|
||||
nodeId,
|
||||
fieldName: field.name,
|
||||
value,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, field.name, nodeId]
|
||||
);
|
||||
|
||||
return (
|
||||
<ModelFieldCombobox
|
||||
value={field.value}
|
||||
modelConfigs={modelConfigs}
|
||||
isLoadingConfigs={isLoading}
|
||||
onChange={onChange}
|
||||
required={props.fieldTemplate.required}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(Imagen4ModelFieldInputComponent);
|
||||
@@ -19,6 +19,10 @@ import { useGetBatchStatusQuery } from 'services/api/endpoints/queue';
|
||||
import { useGetWorkflowQuery } from 'services/api/endpoints/workflows';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type FieldIdentiferWithLabel = FieldIdentifier & { label: string | null };
|
||||
type FieldIdentiferWithLabelAndType = FieldIdentiferWithLabel & { type: string };
|
||||
|
||||
export const $isPublishing = atom(false);
|
||||
export const $isInPublishFlow = atom(false);
|
||||
export const $outputNodeId = atom<string | null>(null);
|
||||
export const $isSelectingOutputNode = atom(false);
|
||||
@@ -53,21 +57,26 @@ export const selectFieldIdentifiersWithInvocationTypes = createSelector(
|
||||
selectWorkflowFormNodeFieldFieldIdentifiersDeduped,
|
||||
selectNodesSlice,
|
||||
(fieldIdentifiers, nodes) => {
|
||||
const result: { nodeId: string; fieldName: string; type: string }[] = [];
|
||||
const result: FieldIdentiferWithLabelAndType[] = [];
|
||||
for (const fieldIdentifier of fieldIdentifiers) {
|
||||
const node = nodes.nodes.find((node) => node.id === fieldIdentifier.nodeId);
|
||||
assert(isInvocationNode(node), `Node ${fieldIdentifier.nodeId} not found`);
|
||||
result.push({ nodeId: fieldIdentifier.nodeId, fieldName: fieldIdentifier.fieldName, type: node.data.type });
|
||||
result.push({
|
||||
nodeId: fieldIdentifier.nodeId,
|
||||
fieldName: fieldIdentifier.fieldName,
|
||||
type: node.data.type,
|
||||
label: node.data.inputs[fieldIdentifier.fieldName]?.label ?? null,
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
);
|
||||
|
||||
export const getPublishInputs = (fieldIdentifiers: (FieldIdentifier & { type: string })[], templates: Templates) => {
|
||||
export const getPublishInputs = (fieldIdentifiers: FieldIdentiferWithLabelAndType[], templates: Templates) => {
|
||||
// Certain field types are not allowed to be input fields on a published workflow
|
||||
const publishable: FieldIdentifier[] = [];
|
||||
const unpublishable: FieldIdentifier[] = [];
|
||||
const publishable: FieldIdentiferWithLabel[] = [];
|
||||
const unpublishable: FieldIdentiferWithLabel[] = [];
|
||||
for (const fieldIdentifier of fieldIdentifiers) {
|
||||
const fieldTemplate = templates[fieldIdentifier.type]?.inputs[fieldIdentifier.fieldName];
|
||||
if (!fieldTemplate) {
|
||||
@@ -121,10 +130,13 @@ const NODE_TYPE_PUBLISH_DENYLIST = [
|
||||
'metadata_to_controlnets',
|
||||
'metadata_to_ip_adapters',
|
||||
'metadata_to_t2i_adapters',
|
||||
'google_imagen3_generate',
|
||||
'google_imagen3_edit',
|
||||
'chatgpt_create_image',
|
||||
'chatgpt_edit_image',
|
||||
'google_imagen3_generate_image',
|
||||
'google_imagen3_edit_image',
|
||||
'google_imagen4_generate_image',
|
||||
'chatgpt_4o_generate_image',
|
||||
'chatgpt_4o_edit_image',
|
||||
'flux_kontext_generate_image',
|
||||
'flux_kontext_edit_image',
|
||||
];
|
||||
|
||||
export const selectHasUnpublishableNodes = createSelector(selectNodes, (nodes) => {
|
||||
|
||||
@@ -34,12 +34,14 @@ import type {
|
||||
FieldValue,
|
||||
FloatFieldValue,
|
||||
FloatGeneratorFieldValue,
|
||||
FluxKontextModelFieldValue,
|
||||
FluxReduxModelFieldValue,
|
||||
FluxVAEModelFieldValue,
|
||||
ImageFieldCollectionValue,
|
||||
ImageFieldValue,
|
||||
ImageGeneratorFieldValue,
|
||||
Imagen3ModelFieldValue,
|
||||
Imagen4ModelFieldValue,
|
||||
IntegerFieldCollectionValue,
|
||||
IntegerFieldValue,
|
||||
IntegerGeneratorFieldValue,
|
||||
@@ -74,12 +76,14 @@ import {
|
||||
zFloatFieldCollectionValue,
|
||||
zFloatFieldValue,
|
||||
zFloatGeneratorFieldValue,
|
||||
zFluxKontextModelFieldValue,
|
||||
zFluxReduxModelFieldValue,
|
||||
zFluxVAEModelFieldValue,
|
||||
zImageFieldCollectionValue,
|
||||
zImageFieldValue,
|
||||
zImageGeneratorFieldValue,
|
||||
zImagen3ModelFieldValue,
|
||||
zImagen4ModelFieldValue,
|
||||
zIntegerFieldCollectionValue,
|
||||
zIntegerFieldValue,
|
||||
zIntegerGeneratorFieldValue,
|
||||
@@ -519,9 +523,15 @@ export const nodesSlice = createSlice({
|
||||
fieldImagen3ModelValueChanged: (state, action: FieldValueAction<Imagen3ModelFieldValue>) => {
|
||||
fieldValueReducer(state, action, zImagen3ModelFieldValue);
|
||||
},
|
||||
fieldImagen4ModelValueChanged: (state, action: FieldValueAction<Imagen4ModelFieldValue>) => {
|
||||
fieldValueReducer(state, action, zImagen4ModelFieldValue);
|
||||
},
|
||||
fieldChatGPT4oModelValueChanged: (state, action: FieldValueAction<ChatGPT4oModelFieldValue>) => {
|
||||
fieldValueReducer(state, action, zChatGPT4oModelFieldValue);
|
||||
},
|
||||
fieldFluxKontextModelValueChanged: (state, action: FieldValueAction<FluxKontextModelFieldValue>) => {
|
||||
fieldValueReducer(state, action, zFluxKontextModelFieldValue);
|
||||
},
|
||||
fieldEnumModelValueChanged: (state, action: FieldValueAction<EnumFieldValue>) => {
|
||||
fieldValueReducer(state, action, zEnumFieldValue);
|
||||
},
|
||||
@@ -690,7 +700,9 @@ export const {
|
||||
fieldSigLipModelValueChanged,
|
||||
fieldFluxReduxModelValueChanged,
|
||||
fieldImagen3ModelValueChanged,
|
||||
fieldImagen4ModelValueChanged,
|
||||
fieldChatGPT4oModelValueChanged,
|
||||
fieldFluxKontextModelValueChanged,
|
||||
fieldFloatGeneratorValueChanged,
|
||||
fieldIntegerGeneratorValueChanged,
|
||||
fieldStringGeneratorValueChanged,
|
||||
|
||||
@@ -76,10 +76,23 @@ const zBaseModel = z.enum([
|
||||
'flux',
|
||||
'cogview4',
|
||||
'imagen3',
|
||||
'imagen4',
|
||||
'chatgpt-4o',
|
||||
'flux-kontext',
|
||||
]);
|
||||
export type BaseModelType = z.infer<typeof zBaseModel>;
|
||||
export const zMainModelBase = z.enum(['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'cogview4', 'imagen3', 'chatgpt-4o']);
|
||||
export const zMainModelBase = z.enum([
|
||||
'sd-1',
|
||||
'sd-2',
|
||||
'sd-3',
|
||||
'sdxl',
|
||||
'flux',
|
||||
'cogview4',
|
||||
'imagen3',
|
||||
'imagen4',
|
||||
'chatgpt-4o',
|
||||
'flux-kontext',
|
||||
]);
|
||||
export type MainModelBase = z.infer<typeof zMainModelBase>;
|
||||
export const isMainModelBase = (base: unknown): base is MainModelBase => zMainModelBase.safeParse(base).success;
|
||||
const zModelType = z.enum([
|
||||
|
||||
@@ -252,10 +252,18 @@ const zImagen3ModelFieldType = zFieldTypeBase.extend({
|
||||
name: z.literal('Imagen3ModelField'),
|
||||
originalType: zStatelessFieldType.optional(),
|
||||
});
|
||||
const zImagen4ModelFieldType = zFieldTypeBase.extend({
|
||||
name: z.literal('Imagen4ModelField'),
|
||||
originalType: zStatelessFieldType.optional(),
|
||||
});
|
||||
const zChatGPT4oModelFieldType = zFieldTypeBase.extend({
|
||||
name: z.literal('ChatGPT4oModelField'),
|
||||
originalType: zStatelessFieldType.optional(),
|
||||
});
|
||||
const zFluxKontextModelFieldType = zFieldTypeBase.extend({
|
||||
name: z.literal('FluxKontextModelField'),
|
||||
originalType: zStatelessFieldType.optional(),
|
||||
});
|
||||
const zSchedulerFieldType = zFieldTypeBase.extend({
|
||||
name: z.literal('SchedulerField'),
|
||||
originalType: zStatelessFieldType.optional(),
|
||||
@@ -307,7 +315,9 @@ const zStatefulFieldType = z.union([
|
||||
zSigLipModelFieldType,
|
||||
zFluxReduxModelFieldType,
|
||||
zImagen3ModelFieldType,
|
||||
zImagen4ModelFieldType,
|
||||
zChatGPT4oModelFieldType,
|
||||
zFluxKontextModelFieldType,
|
||||
zColorFieldType,
|
||||
zSchedulerFieldType,
|
||||
zFloatGeneratorFieldType,
|
||||
@@ -347,7 +357,9 @@ const modelFieldTypeNames = [
|
||||
zSigLipModelFieldType.shape.name.value,
|
||||
zFluxReduxModelFieldType.shape.name.value,
|
||||
zImagen3ModelFieldType.shape.name.value,
|
||||
zImagen4ModelFieldType.shape.name.value,
|
||||
zChatGPT4oModelFieldType.shape.name.value,
|
||||
zFluxKontextModelFieldType.shape.name.value,
|
||||
// Stateless model fields
|
||||
'UNetField',
|
||||
'VAEField',
|
||||
@@ -1207,6 +1219,42 @@ export const isImagen3ModelFieldInputTemplate =
|
||||
buildTemplateTypeGuard<Imagen3ModelFieldInputTemplate>('Imagen3ModelField');
|
||||
// #endregion
|
||||
|
||||
// #region Imagen4ModelField
|
||||
export const zImagen4ModelFieldValue = zModelIdentifierField.optional();
|
||||
const zImagen4ModelFieldInputInstance = zFieldInputInstanceBase.extend({
|
||||
value: zImagen4ModelFieldValue,
|
||||
});
|
||||
const zImagen4ModelFieldInputTemplate = zFieldInputTemplateBase.extend({
|
||||
type: zImagen4ModelFieldType,
|
||||
originalType: zFieldType.optional(),
|
||||
default: zImagen4ModelFieldValue,
|
||||
});
|
||||
export type Imagen4ModelFieldValue = z.infer<typeof zImagen4ModelFieldValue>;
|
||||
export type Imagen4ModelFieldInputInstance = z.infer<typeof zImagen4ModelFieldInputInstance>;
|
||||
export type Imagen4ModelFieldInputTemplate = z.infer<typeof zImagen4ModelFieldInputTemplate>;
|
||||
export const isImagen4ModelFieldInputInstance = buildInstanceTypeGuard(zImagen4ModelFieldInputInstance);
|
||||
export const isImagen4ModelFieldInputTemplate =
|
||||
buildTemplateTypeGuard<Imagen4ModelFieldInputTemplate>('Imagen4ModelField');
|
||||
// #endregion
|
||||
|
||||
// #region FluxKontextModelField
|
||||
export const zFluxKontextModelFieldValue = zModelIdentifierField.optional();
|
||||
const zFluxKontextModelFieldInputInstance = zFieldInputInstanceBase.extend({
|
||||
value: zFluxKontextModelFieldValue,
|
||||
});
|
||||
const zFluxKontextModelFieldInputTemplate = zFieldInputTemplateBase.extend({
|
||||
type: zFluxKontextModelFieldType,
|
||||
originalType: zFieldType.optional(),
|
||||
default: zFluxKontextModelFieldValue,
|
||||
});
|
||||
export type FluxKontextModelFieldValue = z.infer<typeof zFluxKontextModelFieldValue>;
|
||||
export type FluxKontextModelFieldInputInstance = z.infer<typeof zFluxKontextModelFieldInputInstance>;
|
||||
export type FluxKontextModelFieldInputTemplate = z.infer<typeof zFluxKontextModelFieldInputTemplate>;
|
||||
export const isFluxKontextModelFieldInputInstance = buildInstanceTypeGuard(zFluxKontextModelFieldInputInstance);
|
||||
export const isFluxKontextModelFieldInputTemplate =
|
||||
buildTemplateTypeGuard<FluxKontextModelFieldInputTemplate>('FluxKontextModelField');
|
||||
// #endregion
|
||||
|
||||
// #region ChatGPT4oModelField
|
||||
export const zChatGPT4oModelFieldValue = zModelIdentifierField.optional();
|
||||
const zChatGPT4oModelFieldInputInstance = zFieldInputInstanceBase.extend({
|
||||
@@ -1857,6 +1905,8 @@ export const zStatefulFieldValue = z.union([
|
||||
zSigLipModelFieldValue,
|
||||
zFluxReduxModelFieldValue,
|
||||
zImagen3ModelFieldValue,
|
||||
zImagen4ModelFieldValue,
|
||||
zFluxKontextModelFieldValue,
|
||||
zChatGPT4oModelFieldValue,
|
||||
zColorFieldValue,
|
||||
zSchedulerFieldValue,
|
||||
@@ -1949,7 +1999,9 @@ const zStatefulFieldInputTemplate = z.union([
|
||||
zSigLipModelFieldInputTemplate,
|
||||
zFluxReduxModelFieldInputTemplate,
|
||||
zImagen3ModelFieldInputTemplate,
|
||||
zImagen4ModelFieldInputTemplate,
|
||||
zChatGPT4oModelFieldInputTemplate,
|
||||
zFluxKontextModelFieldInputTemplate,
|
||||
zColorFieldInputTemplate,
|
||||
zSchedulerFieldInputTemplate,
|
||||
zStatelessFieldInputTemplate,
|
||||
|
||||
@@ -52,7 +52,11 @@ export const prepareLinearUIBatch = (arg: {
|
||||
count: prompts.length * iterations,
|
||||
// Imagen3's support for seeded generation is iffy, we are just not going too use it in linear UI generations.
|
||||
start:
|
||||
model.base === 'imagen3' ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) : shouldRandomizeSeed ? undefined : seed,
|
||||
model.base === 'imagen3' || model.base === 'imagen4'
|
||||
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
|
||||
: shouldRandomizeSeed
|
||||
? undefined
|
||||
: seed,
|
||||
});
|
||||
|
||||
firstBatchDatumList.push({
|
||||
@@ -74,7 +78,11 @@ export const prepareLinearUIBatch = (arg: {
|
||||
count: iterations,
|
||||
// Imagen3's support for seeded generation is iffy, we are just not going too use in in linear UI generations.
|
||||
start:
|
||||
model.base === 'imagen3' ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) : shouldRandomizeSeed ? undefined : seed,
|
||||
model.base === 'imagen3' || model.base === 'imagen4'
|
||||
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
|
||||
: shouldRandomizeSeed
|
||||
? undefined
|
||||
: seed,
|
||||
});
|
||||
|
||||
secondBatchDatumList.push({
|
||||
|
||||
@@ -14,7 +14,7 @@ import type {
|
||||
VaeSourceNodes,
|
||||
} from 'features/nodes/util/graph/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
|
||||
type AddInpaintArg = {
|
||||
state: RootState;
|
||||
@@ -29,6 +29,7 @@ type AddInpaintArg = {
|
||||
scaledSize: Dimensions;
|
||||
denoising_start: number;
|
||||
fp32: boolean;
|
||||
seed: number;
|
||||
};
|
||||
|
||||
export const addInpaint = async ({
|
||||
@@ -44,6 +45,7 @@ export const addInpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
}: AddInpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
@@ -51,19 +53,45 @@ export const addInpaint = async ({
|
||||
const canvasSettings = selectCanvasSettingsSlice(state);
|
||||
const canvas = selectCanvasSlice(state);
|
||||
|
||||
const { bbox } = canvas;
|
||||
const { rect } = canvas.bbox;
|
||||
|
||||
const rasterAdapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
|
||||
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, bbox.rect, {
|
||||
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
|
||||
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
// Get inpaint mask adapters that have noise settings
|
||||
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);
|
||||
|
||||
// Create a composite noise mask if we have any adapters with noise settings
|
||||
let noiseMaskImage: ImageDTO | null = null;
|
||||
if (noiseMaskAdapters.length > 0) {
|
||||
noiseMaskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
noiseMaskAdapters,
|
||||
rect,
|
||||
'noiseLevel',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Create a composite denoise limit mask
|
||||
const maskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
inpaintMaskAdapters, // denoise limit defaults to 1 for masks that don't have it
|
||||
rect,
|
||||
'denoiseLimit',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
|
||||
const needsScaleBeforeProcessing = !isEqual(scaledSize, originalSize);
|
||||
|
||||
@@ -82,15 +110,38 @@ export const addInpaint = async ({
|
||||
image: { image_name: initialImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
const alphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Resize the noise mask to match the scaled size
|
||||
const resizeNoiseMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_noise_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: noiseMaskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
});
|
||||
|
||||
g.addEdge(resizeImageToScaledSize, 'image', noiseNode, 'image');
|
||||
g.addEdge(resizeNoiseMaskToScaledSize, 'image', noiseNode, 'mask');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
const resizeMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: maskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
const resizeImageToOriginalSize = g.addNode({
|
||||
@@ -117,12 +168,8 @@ export const addInpaint = async ({
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
|
||||
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
if (!isMainModelWithoutUnet(modelLoader)) {
|
||||
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
|
||||
@@ -169,12 +216,23 @@ export const addInpaint = async ({
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
|
||||
const alphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
image: initialImage.image_name ? { image_name: initialImage.image_name } : undefined,
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
mask: { image_name: noiseMaskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
const createGradientMask = g.addNode({
|
||||
id: getPrefixedId('create_gradient_mask'),
|
||||
type: 'create_gradient_mask',
|
||||
@@ -183,9 +241,9 @@ export const addInpaint = async ({
|
||||
edge_radius: params.canvasCoherenceEdgeSize,
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
mask: { image_name: maskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
|
||||
@@ -15,7 +15,7 @@ import type {
|
||||
VaeSourceNodes,
|
||||
} from 'features/nodes/util/graph/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
|
||||
type AddOutpaintArg = {
|
||||
state: RootState;
|
||||
@@ -30,6 +30,7 @@ type AddOutpaintArg = {
|
||||
scaledSize: Dimensions;
|
||||
denoising_start: number;
|
||||
fp32: boolean;
|
||||
seed: number;
|
||||
};
|
||||
|
||||
export const addOutpaint = async ({
|
||||
@@ -45,6 +46,7 @@ export const addOutpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
}: AddOutpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
@@ -61,10 +63,38 @@ export const addOutpaint = async ({
|
||||
});
|
||||
|
||||
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
|
||||
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
const { rect } = canvas.bbox;
|
||||
|
||||
// Get inpaint mask adapters that have noise settings
|
||||
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);
|
||||
|
||||
// Create a composite noise mask if we have any adapters with noise settings
|
||||
let noiseMaskImage: ImageDTO | null = null;
|
||||
if (noiseMaskAdapters.length > 0) {
|
||||
noiseMaskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
noiseMaskAdapters,
|
||||
rect,
|
||||
'noiseLevel',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Create a composite denoise limit mask
|
||||
const maskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
inpaintMaskAdapters, // denoise limit defaults to 1 for masks that don't have it
|
||||
rect,
|
||||
'denoiseLimit',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
|
||||
const infill = getInfill(g, params);
|
||||
|
||||
@@ -72,14 +102,6 @@ export const addOutpaint = async ({
|
||||
|
||||
if (needsScaleBeforeProcessing) {
|
||||
// Scale before processing requires some resizing
|
||||
|
||||
// Combine the inpaint mask and the initial image's alpha channel into a single mask
|
||||
const maskAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
const initialImageAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('image_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
@@ -88,8 +110,8 @@ export const addOutpaint = async ({
|
||||
const maskCombine = g.addNode({
|
||||
id: getPrefixedId('mask_combine'),
|
||||
type: 'mask_combine',
|
||||
mask1: { image_name: maskImage.image_name },
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
|
||||
// Resize the combined and initial image to the scaled size
|
||||
@@ -134,7 +156,32 @@ export const addOutpaint = async ({
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Resize the noise mask to match the scaled size
|
||||
const resizeNoiseMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_noise_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: noiseMaskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
});
|
||||
|
||||
g.addEdge(resizeNoiseMaskToScaledSize, 'image', noiseNode, 'mask');
|
||||
g.addEdge(infill, 'image', noiseNode, 'image');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
}
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
|
||||
@@ -190,12 +237,6 @@ export const addOutpaint = async ({
|
||||
type: i2lNodeType,
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
const maskAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('mask_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
const initialImageAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('image_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
@@ -204,6 +245,7 @@ export const addOutpaint = async ({
|
||||
const maskCombine = g.addNode({
|
||||
id: getPrefixedId('mask_combine'),
|
||||
type: 'mask_combine',
|
||||
mask1: { image_name: maskImage.image_name },
|
||||
});
|
||||
const createGradientMask = g.addNode({
|
||||
id: getPrefixedId('create_gradient_mask'),
|
||||
@@ -214,10 +256,29 @@ export const addOutpaint = async ({
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
g.addEdge(maskCombine, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
image: initialImage.image_name ? { image_name: initialImage.image_name } : undefined,
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
mask: { image_name: noiseMaskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(infill, 'image', noiseNode, 'image');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
|
||||
@@ -137,6 +137,7 @@ export const buildCogView4Graph = async (state: RootState, manager: CanvasManage
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'cogview4_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -153,6 +154,7 @@ export const buildCogView4Graph = async (state: RootState, manager: CanvasManage
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'cogview4_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -212,6 +212,7 @@ export const buildFLUXGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'flux_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -228,6 +229,7 @@ export const buildFLUXGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'flux_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types';
|
||||
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
|
||||
import type { ImageField } from 'features/nodes/types/common';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import {
|
||||
CANVAS_OUTPUT_PREFIX,
|
||||
getBoardField,
|
||||
selectPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { type GraphBuilderReturn, UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
|
||||
import { t } from 'i18next';
|
||||
import { selectMainModelConfig } from 'services/api/endpoints/models';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
export const buildFluxKontextGraph = async (state: RootState, manager: CanvasManager): Promise<GraphBuilderReturn> => {
|
||||
const generationMode = await manager.compositor.getGenerationMode();
|
||||
|
||||
if (generationMode !== 'txt2img') {
|
||||
throw new UnsupportedGenerationModeError(t('toast.fluxKontextIncompatibleGenerationMode'));
|
||||
}
|
||||
|
||||
log.debug({ generationMode }, 'Building Flux Kontext graph');
|
||||
|
||||
const model = selectMainModelConfig(state);
|
||||
|
||||
const canvas = selectCanvasSlice(state);
|
||||
const canvasSettings = selectCanvasSettingsSlice(state);
|
||||
|
||||
const { bbox } = canvas;
|
||||
const { positivePrompt } = selectPresetModifiedPrompts(state);
|
||||
|
||||
assert(model, 'No model found in state');
|
||||
assert(model.base === 'flux-kontext', 'Model is not a Flux Kontext model');
|
||||
|
||||
const is_intermediate = canvasSettings.sendToCanvas;
|
||||
const board = canvasSettings.sendToCanvas ? undefined : getBoardField(state);
|
||||
|
||||
const validRefImages = canvas.referenceImages.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.filter((entity) => isFluxKontextReferenceImageConfig(entity.ipAdapter))
|
||||
.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
|
||||
|
||||
let input_image: ImageField | undefined = undefined;
|
||||
|
||||
if (validRefImages[0]) {
|
||||
assert(validRefImages.length === 1, 'Flux Kontext can have at most one reference image');
|
||||
|
||||
assert(validRefImages[0].ipAdapter.image, 'Image is required for reference image');
|
||||
input_image = {
|
||||
image_name: validRefImages[0].ipAdapter.image.image_name,
|
||||
};
|
||||
}
|
||||
|
||||
if (generationMode === 'txt2img') {
|
||||
const g = new Graph(getPrefixedId('flux_kontext_txt2img_graph'));
|
||||
const fluxKontextImage = g.addNode({
|
||||
// @ts-expect-error: These nodes are not available in the OSS application
|
||||
type: input_image ? 'flux_kontext_edit_image' : 'flux_kontext_generate_image',
|
||||
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
|
||||
model: zModelIdentifierField.parse(model),
|
||||
positive_prompt: positivePrompt,
|
||||
aspect_ratio: bbox.aspectRatio.id,
|
||||
use_cache: false,
|
||||
is_intermediate,
|
||||
board,
|
||||
input_image,
|
||||
prompt_upsampling: true,
|
||||
});
|
||||
g.upsertMetadata({
|
||||
positive_prompt: positivePrompt,
|
||||
model: Graph.getModelMetadataField(model),
|
||||
width: bbox.rect.width,
|
||||
height: bbox.rect.height,
|
||||
});
|
||||
return {
|
||||
g,
|
||||
positivePromptFieldIdentifier: { nodeId: fluxKontextImage.id, fieldName: 'positive_prompt' },
|
||||
};
|
||||
}
|
||||
|
||||
assert<Equals<typeof generationMode, never>>(false, 'Invalid generation mode for Flux Kontext');
|
||||
};
|
||||
@@ -4,7 +4,7 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { isImagen3AspectRatioID } from 'features/controlLayers/store/types';
|
||||
import { isImagenAspectRatioID } from 'features/controlLayers/store/types';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import {
|
||||
@@ -24,7 +24,7 @@ export const buildImagen3Graph = async (state: RootState, manager: CanvasManager
|
||||
const generationMode = await manager.compositor.getGenerationMode();
|
||||
|
||||
if (generationMode !== 'txt2img') {
|
||||
throw new UnsupportedGenerationModeError(t('toast.imagen3IncompatibleGenerationMode'));
|
||||
throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen3' }));
|
||||
}
|
||||
|
||||
log.debug({ generationMode }, 'Building Imagen3 graph');
|
||||
@@ -38,7 +38,7 @@ export const buildImagen3Graph = async (state: RootState, manager: CanvasManager
|
||||
|
||||
assert(model, 'No model found for Imagen3 graph');
|
||||
assert(model.base === 'imagen3', 'Imagen3 graph requires Imagen3 model');
|
||||
assert(isImagen3AspectRatioID(bbox.aspectRatio.id), 'Imagen3 does not support this aspect ratio');
|
||||
assert(isImagenAspectRatioID(bbox.aspectRatio.id), 'Imagen3 does not support this aspect ratio');
|
||||
assert(positivePrompt.length > 0, 'Imagen3 requires positive prompt to have at least one character');
|
||||
|
||||
const is_intermediate = canvasSettings.sendToCanvas;
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { isImagenAspectRatioID } from 'features/controlLayers/store/types';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import {
|
||||
CANVAS_OUTPUT_PREFIX,
|
||||
getBoardField,
|
||||
selectPresetModifiedPrompts,
|
||||
} from 'features/nodes/util/graph/graphBuilderUtils';
|
||||
import { type GraphBuilderReturn, UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
|
||||
import { t } from 'i18next';
|
||||
import { selectMainModelConfig } from 'services/api/endpoints/models';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
export const buildImagen4Graph = async (state: RootState, manager: CanvasManager): Promise<GraphBuilderReturn> => {
|
||||
const generationMode = await manager.compositor.getGenerationMode();
|
||||
|
||||
if (generationMode !== 'txt2img') {
|
||||
throw new UnsupportedGenerationModeError(t('toast.imagenIncompatibleGenerationMode', { model: 'Imagen4' }));
|
||||
}
|
||||
|
||||
log.debug({ generationMode }, 'Building Imagen4 graph');
|
||||
|
||||
const canvas = selectCanvasSlice(state);
|
||||
const canvasSettings = selectCanvasSettingsSlice(state);
|
||||
|
||||
const { bbox } = canvas;
|
||||
const { positivePrompt, negativePrompt } = selectPresetModifiedPrompts(state);
|
||||
const model = selectMainModelConfig(state);
|
||||
|
||||
assert(model, 'No model found for Imagen4 graph');
|
||||
assert(model.base === 'imagen4', 'Imagen4 graph requires Imagen4 model');
|
||||
assert(isImagenAspectRatioID(bbox.aspectRatio.id), 'Imagen4 does not support this aspect ratio');
|
||||
assert(positivePrompt.length > 0, 'Imagen4 requires positive prompt to have at least one character');
|
||||
|
||||
const is_intermediate = canvasSettings.sendToCanvas;
|
||||
const board = canvasSettings.sendToCanvas ? undefined : getBoardField(state);
|
||||
|
||||
if (generationMode === 'txt2img') {
|
||||
const g = new Graph(getPrefixedId('imagen4_txt2img_graph'));
|
||||
const imagen4 = g.addNode({
|
||||
// @ts-expect-error: These nodes are not available in the OSS application
|
||||
type: 'google_imagen4_generate_image',
|
||||
id: getPrefixedId(CANVAS_OUTPUT_PREFIX),
|
||||
model: zModelIdentifierField.parse(model),
|
||||
positive_prompt: positivePrompt,
|
||||
negative_prompt: negativePrompt,
|
||||
aspect_ratio: bbox.aspectRatio.id,
|
||||
enhance_prompt: true,
|
||||
// When enhance_prompt is true, Imagen4 will return a new image every time, ignoring the seed.
|
||||
use_cache: false,
|
||||
is_intermediate,
|
||||
board,
|
||||
});
|
||||
g.upsertMetadata({
|
||||
positive_prompt: positivePrompt,
|
||||
negative_prompt: negativePrompt,
|
||||
width: bbox.rect.width,
|
||||
height: bbox.rect.height,
|
||||
model: Graph.getModelMetadataField(model),
|
||||
});
|
||||
return {
|
||||
g,
|
||||
seedFieldIdentifier: { nodeId: imagen4.id, fieldName: 'seed' },
|
||||
positivePromptFieldIdentifier: { nodeId: imagen4.id, fieldName: 'positive_prompt' },
|
||||
};
|
||||
}
|
||||
|
||||
assert<Equals<typeof generationMode, never>>(false, 'Invalid generation mode for Imagen4');
|
||||
};
|
||||
@@ -198,6 +198,7 @@ export const buildSD1Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: vaePrecision === 'fp32',
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -214,6 +215,7 @@ export const buildSD1Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -162,6 +162,7 @@ export const buildSD3Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sd3_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -178,6 +179,7 @@ export const buildSD3Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sd3_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -205,6 +205,7 @@ export const buildSDXLGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sdxl_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -221,6 +222,7 @@ export const buildSDXLGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sdxl_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -34,7 +34,9 @@ const FIELD_VALUE_FALLBACK_MAP: Record<StatefulFieldType['name'], FieldValue> =
|
||||
SigLipModelField: undefined,
|
||||
FluxReduxModelField: undefined,
|
||||
Imagen3ModelField: undefined,
|
||||
Imagen4ModelField: undefined,
|
||||
ChatGPT4oModelField: undefined,
|
||||
FluxKontextModelField: undefined,
|
||||
FloatGeneratorField: undefined,
|
||||
IntegerGeneratorField: undefined,
|
||||
StringGeneratorField: undefined,
|
||||
|
||||
@@ -16,6 +16,7 @@ import type {
|
||||
FloatFieldCollectionInputTemplate,
|
||||
FloatFieldInputTemplate,
|
||||
FloatGeneratorFieldInputTemplate,
|
||||
FluxKontextModelFieldInputTemplate,
|
||||
FluxMainModelFieldInputTemplate,
|
||||
FluxReduxModelFieldInputTemplate,
|
||||
FluxVAEModelFieldInputTemplate,
|
||||
@@ -23,6 +24,7 @@ import type {
|
||||
ImageFieldInputTemplate,
|
||||
ImageGeneratorFieldInputTemplate,
|
||||
Imagen3ModelFieldInputTemplate,
|
||||
Imagen4ModelFieldInputTemplate,
|
||||
IntegerFieldCollectionInputTemplate,
|
||||
IntegerFieldInputTemplate,
|
||||
IntegerGeneratorFieldInputTemplate,
|
||||
@@ -600,6 +602,32 @@ const buildImagen3ModelFieldInputTemplate: FieldInputTemplateBuilder<Imagen3Mode
|
||||
return template;
|
||||
};
|
||||
|
||||
const buildImagen4ModelFieldInputTemplate: FieldInputTemplateBuilder<Imagen4ModelFieldInputTemplate> = ({
|
||||
schemaObject,
|
||||
baseField,
|
||||
fieldType,
|
||||
}) => {
|
||||
const template: Imagen4ModelFieldInputTemplate = {
|
||||
...baseField,
|
||||
type: fieldType,
|
||||
default: schemaObject.default ?? undefined,
|
||||
};
|
||||
return template;
|
||||
};
|
||||
|
||||
const buildFluxKontextModelFieldInputTemplate: FieldInputTemplateBuilder<FluxKontextModelFieldInputTemplate> = ({
|
||||
schemaObject,
|
||||
baseField,
|
||||
fieldType,
|
||||
}) => {
|
||||
const template: FluxKontextModelFieldInputTemplate = {
|
||||
...baseField,
|
||||
type: fieldType,
|
||||
default: schemaObject.default ?? undefined,
|
||||
};
|
||||
return template;
|
||||
};
|
||||
|
||||
const buildChatGPT4oModelFieldInputTemplate: FieldInputTemplateBuilder<ChatGPT4oModelFieldInputTemplate> = ({
|
||||
schemaObject,
|
||||
baseField,
|
||||
@@ -820,7 +848,9 @@ export const TEMPLATE_BUILDER_MAP: Record<StatefulFieldType['name'], FieldInputT
|
||||
SigLipModelField: buildSigLipModelFieldInputTemplate,
|
||||
FluxReduxModelField: buildFluxReduxModelFieldInputTemplate,
|
||||
Imagen3ModelField: buildImagen3ModelFieldInputTemplate,
|
||||
Imagen4ModelField: buildImagen4ModelFieldInputTemplate,
|
||||
ChatGPT4oModelField: buildChatGPT4oModelFieldInputTemplate,
|
||||
FluxKontextModelField: buildFluxKontextModelFieldInputTemplate,
|
||||
FloatGeneratorField: buildFloatGeneratorFieldInputTemplate,
|
||||
IntegerGeneratorField: buildIntegerGeneratorFieldInputTemplate,
|
||||
StringGeneratorField: buildStringGeneratorFieldInputTemplate,
|
||||
|
||||
@@ -3,12 +3,18 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { bboxAspectRatioIdChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { selectIsChatGTP4o, selectIsImagen3 } from 'features/controlLayers/store/paramsSlice';
|
||||
import {
|
||||
selectIsChatGTP4o,
|
||||
selectIsFluxKontext,
|
||||
selectIsImagen3,
|
||||
selectIsImagen4,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectAspectRatioID } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
isAspectRatioID,
|
||||
zAspectRatioID,
|
||||
zChatGPT4oAspectRatioID,
|
||||
zFluxKontextAspectRatioID,
|
||||
zImagen3AspectRatioID,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import type { ChangeEventHandler } from 'react';
|
||||
@@ -23,18 +29,22 @@ export const BboxAspectRatioSelect = memo(() => {
|
||||
const isStaging = useAppSelector(selectIsStaging);
|
||||
const isImagen3 = useAppSelector(selectIsImagen3);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
|
||||
|
||||
const isImagen4 = useAppSelector(selectIsImagen4);
|
||||
const isFluxKontext = useAppSelector(selectIsFluxKontext);
|
||||
const options = useMemo(() => {
|
||||
// Imagen3 and ChatGPT4o have different aspect ratio options, and do not support freeform sizes
|
||||
if (isImagen3) {
|
||||
if (isImagen3 || isImagen4) {
|
||||
return zImagen3AspectRatioID.options;
|
||||
}
|
||||
if (isChatGPT4o) {
|
||||
return zChatGPT4oAspectRatioID.options;
|
||||
}
|
||||
if (isFluxKontext) {
|
||||
return zFluxKontextAspectRatioID.options;
|
||||
}
|
||||
// All other models
|
||||
return zAspectRatioID.options;
|
||||
}, [isImagen3, isChatGPT4o]);
|
||||
}, [isImagen3, isChatGPT4o, isImagen4, isFluxKontext]);
|
||||
|
||||
const onChange = useCallback<ChangeEventHandler<HTMLSelectElement>>(
|
||||
(e) => {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { AspectRatioID } from 'features/controlLayers/store/types';
|
||||
|
||||
export const ASPECT_RATIO_MAP: Record<Exclude<AspectRatioID, 'Free'>, { ratio: number; inverseID: AspectRatioID }> = {
|
||||
'21:9': { ratio: 21 / 9, inverseID: '9:21' },
|
||||
'16:9': { ratio: 16 / 9, inverseID: '9:16' },
|
||||
'3:2': { ratio: 3 / 2, inverseID: '2:3' },
|
||||
'4:3': { ratio: 4 / 3, inverseID: '4:3' },
|
||||
@@ -8,4 +9,5 @@ export const ASPECT_RATIO_MAP: Record<Exclude<AspectRatioID, 'Free'>, { ratio: n
|
||||
'3:4': { ratio: 3 / 4, inverseID: '4:3' },
|
||||
'2:3': { ratio: 2 / 3, inverseID: '3:2' },
|
||||
'9:16': { ratio: 9 / 16, inverseID: '16:9' },
|
||||
'9:21': { ratio: 9 / 21, inverseID: '21:9' },
|
||||
};
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { selectIsChatGTP4o, selectIsImagen3 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { useIsApiModel } from 'features/parameters/hooks/useIsApiModel';
|
||||
|
||||
export const useIsBboxSizeLocked = () => {
|
||||
const isStaging = useAppSelector(selectIsStaging);
|
||||
const isImagen3 = useAppSelector(selectIsImagen3);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
|
||||
return isImagen3 || isChatGPT4o || isStaging;
|
||||
const isApiModel = useIsApiModel();
|
||||
|
||||
return isApiModel || isStaging;
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePromptChanged, selectNegativePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -15,12 +16,20 @@ import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamNegativePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt);
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
@@ -31,7 +40,6 @@ export const ParamNegativePrompt = memo(() => {
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const _onChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePromptChanged, selectBase, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { ShowDynamicPromptsPreviewButton } from 'features/dynamicPrompts/components/ShowDynamicPromptsPreviewButton';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
@@ -19,6 +20,12 @@ import type { HotkeyCallback } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
initialHeight: 120,
|
||||
};
|
||||
|
||||
export const ParamPositivePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt);
|
||||
@@ -26,6 +33,9 @@ export const ParamPositivePrompt = memo(() => {
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
@@ -36,7 +46,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
@@ -75,7 +84,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
onChange={onChange}
|
||||
minH={40}
|
||||
onKeyDown={onKeyDown}
|
||||
variant="darkFilled"
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
@@ -83,6 +91,8 @@ export const ParamPositivePrompt = memo(() => {
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
resize="vertical"
|
||||
minH={28}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
|
||||
@@ -2,23 +2,18 @@ import { Flex, Link, Text } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectIsChatGTP4o, selectModel } from 'features/controlLayers/store/paramsSlice';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { useMemo } from 'react';
|
||||
import { selectModel } from 'features/controlLayers/store/paramsSlice';
|
||||
import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
|
||||
export const DisabledModelWarning = () => {
|
||||
const { t } = useTranslation();
|
||||
const model = useAppSelector(selectModel);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
|
||||
const areChatGPT4oModelsEnabled = useFeatureStatus('chatGPT4oModels');
|
||||
|
||||
const accountSettingsLink = useStore($accountSettingsLink);
|
||||
const { isChatGPT4oHighModelDisabled } = useIsModelDisabled();
|
||||
|
||||
const isModelDisabled = useMemo(() => {
|
||||
return isChatGPT4o && !areChatGPT4oModelsEnabled;
|
||||
}, [isChatGPT4o, areChatGPT4oModelsEnabled]);
|
||||
|
||||
if (!isModelDisabled) {
|
||||
if (!model || !isChatGPT4oHighModelDisabled(model)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@ import {
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { Group, PickerContextState } from 'common/components/Picker/Picker';
|
||||
import { buildGroup, getRegex, Picker, usePickerContext } from 'common/components/Picker/Picker';
|
||||
import { useDisclosure } from 'common/hooks/useBoolean';
|
||||
@@ -21,7 +22,8 @@ import { $installModelsTab } from 'features/modelManagerV2/subpanels/InstallMode
|
||||
import { BASE_COLOR_MAP } from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge';
|
||||
import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage';
|
||||
import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton';
|
||||
import { MODEL_TYPE_MAP, MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
import { API_BASE_MODELS, MODEL_TYPE_MAP, MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { filesize } from 'filesize';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
@@ -32,12 +34,23 @@ import type { AnyModelConfig, BaseModelType } from 'services/api/types';
|
||||
const getOptionId = (modelConfig: AnyModelConfig) => modelConfig.key;
|
||||
|
||||
const ModelManagerLink = memo((props: ButtonProps) => {
|
||||
const onClickGoToModelManager = useStore($onClickGoToModelManager);
|
||||
const dispatch = useAppDispatch();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(setActiveTab('models'));
|
||||
$installModelsTab.set(3);
|
||||
}, [dispatch]);
|
||||
return <Button size="sm" flexGrow={0} variant="link" color="base.200" onClick={onClick} {...props} />;
|
||||
|
||||
return (
|
||||
<Button
|
||||
size="sm"
|
||||
flexGrow={0}
|
||||
variant="link"
|
||||
color="base.200"
|
||||
onClick={onClickGoToModelManager ?? onClick}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
});
|
||||
ModelManagerLink.displayName = 'ModelManagerLink';
|
||||
|
||||
@@ -47,40 +60,45 @@ const components = {
|
||||
|
||||
const NoOptionsFallback = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isModelsTabDisabled = useAppSelector(selectIsModelsTabDisabled);
|
||||
const onClickGoToModelManager = useStore($onClickGoToModelManager);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={4} alignItems="center">
|
||||
<Text color="base.200">{t('modelManager.modelPickerFallbackNoModelsInstalled')}</Text>
|
||||
<Text color="base.200">
|
||||
<Trans i18nKey="modelManager.modelPickerFallbackNoModelsInstalled2" components={components} />
|
||||
</Text>
|
||||
{(!isModelsTabDisabled || onClickGoToModelManager) && (
|
||||
<Text color="base.200">
|
||||
<Trans i18nKey="modelManager.modelPickerFallbackNoModelsInstalled2" components={components} />
|
||||
</Text>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
NoOptionsFallback.displayName = 'NoOptionsFallback';
|
||||
|
||||
const getGroupIDFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (modelConfig.base === 'chatgpt-4o' || modelConfig.base === 'imagen3') {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'api';
|
||||
}
|
||||
return modelConfig.base;
|
||||
};
|
||||
|
||||
const getGroupNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (modelConfig.base === 'chatgpt-4o' || modelConfig.base === 'imagen3') {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'External API';
|
||||
}
|
||||
return MODEL_TYPE_MAP[modelConfig.base];
|
||||
};
|
||||
|
||||
const getGroupShortNameFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (modelConfig.base === 'chatgpt-4o' || modelConfig.base === 'imagen3') {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'api';
|
||||
}
|
||||
return MODEL_TYPE_SHORT_MAP[modelConfig.base];
|
||||
};
|
||||
|
||||
const getGroupColorSchemeFromModelConfig = (modelConfig: AnyModelConfig): string => {
|
||||
if (modelConfig.base === 'chatgpt-4o' || modelConfig.base === 'imagen3') {
|
||||
if (API_BASE_MODELS.includes(modelConfig.base)) {
|
||||
return 'pink';
|
||||
}
|
||||
return BASE_COLOR_MAP[modelConfig.base];
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
selectIsChatGTP4o,
|
||||
selectIsFluxKontext,
|
||||
selectIsImagen3,
|
||||
selectIsImagen4,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
|
||||
export const useIsApiModel = () => {
|
||||
const isImagen3 = useAppSelector(selectIsImagen3);
|
||||
const isImagen4 = useAppSelector(selectIsImagen4);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
|
||||
const isFluxKontext = useAppSelector(selectIsFluxKontext);
|
||||
|
||||
return isImagen3 || isImagen4 || isChatGPT4o || isFluxKontext;
|
||||
};
|
||||
@@ -0,0 +1,16 @@
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { useCallback } from 'react';
|
||||
|
||||
export const useIsModelDisabled = () => {
|
||||
const isChatGPT4oHighEnabled = useFeatureStatus('chatGPT4oHigh');
|
||||
|
||||
const isChatGPT4oHighModelDisabled = useCallback(
|
||||
(model: ParameterModel) => {
|
||||
return model?.base === 'chatgpt-4o' && model.name.toLowerCase().includes('high') && !isChatGPT4oHighEnabled;
|
||||
},
|
||||
[isChatGPT4oHighEnabled]
|
||||
);
|
||||
|
||||
return { isChatGPT4oHighModelDisabled };
|
||||
};
|
||||
@@ -14,7 +14,9 @@ export const MODEL_TYPE_MAP: Record<BaseModelType, string> = {
|
||||
flux: 'FLUX',
|
||||
cogview4: 'CogView4',
|
||||
imagen3: 'Imagen3',
|
||||
imagen4: 'Imagen4',
|
||||
'chatgpt-4o': 'ChatGPT 4o',
|
||||
'flux-kontext': 'Flux Kontext',
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -30,7 +32,9 @@ export const MODEL_TYPE_SHORT_MAP: Record<BaseModelType, string> = {
|
||||
flux: 'FLUX',
|
||||
cogview4: 'CogView4',
|
||||
imagen3: 'Imagen3',
|
||||
imagen4: 'Imagen4',
|
||||
'chatgpt-4o': 'ChatGPT 4o',
|
||||
'flux-kontext': 'Flux Kontext',
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -73,10 +77,18 @@ export const CLIP_SKIP_MAP: Record<BaseModelType, { maxClip: number; markers: nu
|
||||
maxClip: 0,
|
||||
markers: [],
|
||||
},
|
||||
imagen4: {
|
||||
maxClip: 0,
|
||||
markers: [],
|
||||
},
|
||||
'chatgpt-4o': {
|
||||
maxClip: 0,
|
||||
markers: [],
|
||||
},
|
||||
'flux-kontext': {
|
||||
maxClip: 0,
|
||||
markers: [],
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -114,3 +126,8 @@ export const SCHEDULER_OPTIONS: ComboboxOption[] = [
|
||||
{ value: 'unipc', label: 'UniPC' },
|
||||
{ value: 'unipc_k', label: 'UniPC Karras' },
|
||||
];
|
||||
|
||||
/**
|
||||
* List of base models that make API requests
|
||||
*/
|
||||
export const API_BASE_MODELS = ['imagen3', 'imagen4', 'chatgpt-4o', 'flux-kontext'];
|
||||
|
||||
@@ -19,12 +19,48 @@ export const getOptimalDimension = (base?: BaseModelType | null): number => {
|
||||
case 'sd-3':
|
||||
case 'cogview4':
|
||||
case 'imagen3':
|
||||
case 'imagen4':
|
||||
case 'chatgpt-4o':
|
||||
case 'flux-kontext':
|
||||
default:
|
||||
return 1024;
|
||||
}
|
||||
};
|
||||
|
||||
const SDXL_TRAINING_DIMENSIONS: [number, number][] = [
|
||||
[512, 2048],
|
||||
[512, 1984],
|
||||
[512, 1920],
|
||||
[512, 1856],
|
||||
[576, 1792],
|
||||
[576, 1728],
|
||||
[576, 1664],
|
||||
[640, 1600],
|
||||
[640, 1536],
|
||||
[704, 1472],
|
||||
[704, 1408],
|
||||
[704, 1344],
|
||||
[768, 1344],
|
||||
[768, 1280],
|
||||
[832, 1216],
|
||||
[832, 1152],
|
||||
[896, 1152],
|
||||
[896, 1088],
|
||||
[960, 1088],
|
||||
[960, 1024],
|
||||
[1024, 1024],
|
||||
];
|
||||
|
||||
/**
|
||||
* Checks if the given width and height are in the SDXL training dimensions.
|
||||
* @param width The width to check
|
||||
* @param height The height to check
|
||||
* @returns Whether the width and height are in the SDXL training dimensions (order agnostic)
|
||||
*/
|
||||
export const isInSDXLTrainingDimensions = (width: number, height: number): boolean => {
|
||||
return SDXL_TRAINING_DIMENSIONS.some(([w, h]) => (w === width && h === height) || (w === height && h === width));
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets the grid size for a given base model. For Flux, the grid size is 16, otherwise it is 8.
|
||||
* - sd-1, sd-2, sdxl: 8
|
||||
@@ -46,6 +82,7 @@ export const getGridSize = (base?: BaseModelType | null): number => {
|
||||
case 'sdxl':
|
||||
case 'imagen3':
|
||||
case 'chatgpt-4o':
|
||||
case 'flux-kontext':
|
||||
default:
|
||||
return 8;
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildW
|
||||
import { groupBy } from 'lodash-es';
|
||||
import { useCallback } from 'react';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Batch, EnqueueBatchArg } from 'services/api/types';
|
||||
import type { Batch, EnqueueBatchArg, S } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const enqueueRequestedWorkflows = createAction('app/enqueueRequestedWorkflows');
|
||||
@@ -106,12 +106,13 @@ export const useEnqueueWorkflows = () => {
|
||||
// Derive the input fields from the builder's selected node field elements
|
||||
const fieldIdentifiers = selectFieldIdentifiersWithInvocationTypes(state);
|
||||
const inputs = getPublishInputs(fieldIdentifiers, templates);
|
||||
const api_input_fields = inputs.publishable.map(({ nodeId, fieldName }) => {
|
||||
const api_input_fields = inputs.publishable.map(({ nodeId, fieldName, label }) => {
|
||||
return {
|
||||
kind: 'input',
|
||||
node_id: nodeId,
|
||||
field_name: fieldName,
|
||||
} as const;
|
||||
user_label: label,
|
||||
} satisfies S['FieldIdentifier'];
|
||||
});
|
||||
|
||||
// Derive the output fields from the builder's selected output node
|
||||
@@ -126,7 +127,8 @@ export const useEnqueueWorkflows = () => {
|
||||
kind: 'output',
|
||||
node_id: outputNodeId,
|
||||
field_name: fieldName,
|
||||
} as const;
|
||||
user_label: null,
|
||||
} satisfies S['FieldIdentifier'];
|
||||
});
|
||||
|
||||
assert(nodesState.id, 'Workflow without ID cannot be used for API validation run');
|
||||
|
||||
@@ -31,10 +31,11 @@ import type { WorkflowSettingsState } from 'features/nodes/store/workflowSetting
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isBatchNode, isExecutableNode, isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { resolveBatchValue } from 'features/nodes/util/node/resolveBatchValue';
|
||||
import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled';
|
||||
import type { UpscaleState } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { getGridSize } from 'features/parameters/util/optimalDimension';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
@@ -89,7 +90,7 @@ const debouncedUpdateReasons = debounce(
|
||||
config: AppConfig,
|
||||
store: AppStore,
|
||||
isInPublishFlow: boolean,
|
||||
areChatGPT4oModelsEnabled: boolean
|
||||
isChatGPT4oHighModelDisabled: (model: ParameterModel) => boolean
|
||||
) => {
|
||||
if (tab === 'canvas') {
|
||||
const model = selectMainModelConfig(store.getState());
|
||||
@@ -104,7 +105,7 @@ const debouncedUpdateReasons = debounce(
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
areChatGPT4oModelsEnabled,
|
||||
isChatGPT4oHighModelDisabled,
|
||||
});
|
||||
$reasonsWhyCannotEnqueue.set(reasons);
|
||||
} else if (tab === 'workflows') {
|
||||
@@ -152,7 +153,7 @@ export const useReadinessWatcher = () => {
|
||||
const canvasIsSelectingObject = useStore(canvasManager?.stateApi.$isSegmenting ?? $true);
|
||||
const canvasIsCompositing = useStore(canvasManager?.compositor.$isBusy ?? $true);
|
||||
const isInPublishFlow = useStore($isInPublishFlow);
|
||||
const areChatGPT4oModelsEnabled = useFeatureStatus('chatGPT4oModels');
|
||||
const { isChatGPT4oHighModelDisabled } = useIsModelDisabled();
|
||||
|
||||
useEffect(() => {
|
||||
debouncedUpdateReasons(
|
||||
@@ -173,7 +174,7 @@ export const useReadinessWatcher = () => {
|
||||
config,
|
||||
store,
|
||||
isInPublishFlow,
|
||||
areChatGPT4oModelsEnabled
|
||||
isChatGPT4oHighModelDisabled
|
||||
);
|
||||
}, [
|
||||
store,
|
||||
@@ -193,7 +194,7 @@ export const useReadinessWatcher = () => {
|
||||
upscale,
|
||||
workflowSettings,
|
||||
isInPublishFlow,
|
||||
areChatGPT4oModelsEnabled,
|
||||
isChatGPT4oHighModelDisabled,
|
||||
]);
|
||||
};
|
||||
|
||||
@@ -341,7 +342,7 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
canvasIsRasterizing: boolean;
|
||||
canvasIsCompositing: boolean;
|
||||
canvasIsSelectingObject: boolean;
|
||||
areChatGPT4oModelsEnabled: boolean;
|
||||
isChatGPT4oHighModelDisabled: (model: ParameterModel) => boolean;
|
||||
}) => {
|
||||
const {
|
||||
isConnected,
|
||||
@@ -354,7 +355,7 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
areChatGPT4oModelsEnabled,
|
||||
isChatGPT4oHighModelDisabled,
|
||||
} = arg;
|
||||
const { positivePrompt } = params;
|
||||
const reasons: Reason[] = [];
|
||||
@@ -487,7 +488,7 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
}
|
||||
}
|
||||
|
||||
if (model?.base === 'chatgpt-4o' && !areChatGPT4oModelsEnabled) {
|
||||
if (model && isChatGPT4oHighModelDisabled(model)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.modelDisabledForTrial', { modelName: model.name }) });
|
||||
}
|
||||
|
||||
@@ -515,6 +516,17 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
}
|
||||
});
|
||||
|
||||
const enabledGlobalReferenceLayers = canvas.referenceImages.entities.filter(
|
||||
(referenceImage) => referenceImage.isEnabled
|
||||
);
|
||||
|
||||
// Flux Kontext only supports 1x Reference Image at a time.
|
||||
const referenceImageCount = enabledGlobalReferenceLayers.length;
|
||||
|
||||
if (model?.base === 'flux-kontext' && referenceImageCount > 1) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.fluxKontextMultipleReferenceImages') });
|
||||
}
|
||||
|
||||
canvas.referenceImages.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePrompt2Changed, selectNegativePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -9,10 +10,17 @@ import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLNegativeStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePrompt2Changed, selectPositivePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -9,10 +10,17 @@ import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLPositiveStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -4,13 +4,7 @@ import { EMPTY_ARRAY } from 'app/store/constants';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import {
|
||||
selectIsChatGTP4o,
|
||||
selectIsCogView4,
|
||||
selectIsFLUX,
|
||||
selectIsImagen3,
|
||||
selectIsSD3,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectIsCogView4, selectIsFLUX, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { LoRAList } from 'features/lora/components/LoRAList';
|
||||
import LoRASelect from 'features/lora/components/LoRASelect';
|
||||
import ParamCFGScale from 'features/parameters/components/Core/ParamCFGScale';
|
||||
@@ -20,6 +14,8 @@ import ParamSteps from 'features/parameters/components/Core/ParamSteps';
|
||||
import { DisabledModelWarning } from 'features/parameters/components/MainModel/DisabledModelWarning';
|
||||
import ParamUpscaleCFGScale from 'features/parameters/components/Upscale/ParamUpscaleCFGScale';
|
||||
import ParamUpscaleScheduler from 'features/parameters/components/Upscale/ParamUpscaleScheduler';
|
||||
import { useIsApiModel } from 'features/parameters/hooks/useIsApiModel';
|
||||
import { API_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { MainModelPicker } from 'features/settingsAccordions/components/GenerationSettingsAccordion/MainModelPicker';
|
||||
import { useExpanderToggle } from 'features/settingsAccordions/hooks/useExpanderToggle';
|
||||
import { useStandaloneAccordionToggle } from 'features/settingsAccordions/hooks/useStandaloneAccordionToggle';
|
||||
@@ -40,12 +36,8 @@ export const GenerationSettingsAccordion = memo(() => {
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
const isSD3 = useAppSelector(selectIsSD3);
|
||||
const isCogView4 = useAppSelector(selectIsCogView4);
|
||||
const isImagen3 = useAppSelector(selectIsImagen3);
|
||||
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
|
||||
|
||||
const isApiModel = useMemo(() => {
|
||||
return isImagen3 || isChatGPT4o;
|
||||
}, [isImagen3, isChatGPT4o]);
|
||||
const isApiModel = useIsApiModel();
|
||||
|
||||
const isUpscaling = useMemo(() => {
|
||||
return activeTabName === 'upscaling';
|
||||
@@ -56,7 +48,7 @@ export const GenerationSettingsAccordion = memo(() => {
|
||||
const enabledLoRAsCount = loras.loras.filter((l) => l.isEnabled).length;
|
||||
const loraTabBadges = enabledLoRAsCount ? [`${enabledLoRAsCount} ${t('models.concepts')}`] : EMPTY_ARRAY;
|
||||
const accordionBadges =
|
||||
modelConfig?.base === 'imagen3' || modelConfig?.base === 'chatgpt-4o'
|
||||
modelConfig && API_BASE_MODELS.includes(modelConfig.base)
|
||||
? [modelConfig.name]
|
||||
: modelConfig
|
||||
? [modelConfig.name, modelConfig.base]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user