mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-19 06:48:19 -05:00
Compare commits
123 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bfa6439d4 | ||
|
|
a8d7969a1d | ||
|
|
46bfa24af3 | ||
|
|
a8cb8e128d | ||
|
|
8cef0f5bf5 | ||
|
|
911baeb58b | ||
|
|
312960645b | ||
|
|
50cf285efb | ||
|
|
a214f4fff5 | ||
|
|
2981591c36 | ||
|
|
b08f90c99f | ||
|
|
ab8c739cd8 | ||
|
|
5c5108c28a | ||
|
|
3df7cfd605 | ||
|
|
1ff3d44dba | ||
|
|
c80ad90f72 | ||
|
|
3b4d1b8786 | ||
|
|
c66201c7e1 | ||
|
|
35c7c59455 | ||
|
|
85f98ab3eb | ||
|
|
dac75685be | ||
|
|
d7b5a8b298 | ||
|
|
d3ecaa740f | ||
|
|
b5a6765a3d | ||
|
|
3704573ef8 | ||
|
|
01fbf2ce4d | ||
|
|
96e7003449 | ||
|
|
80197b8856 | ||
|
|
0187bc671e | ||
|
|
31584daabe | ||
|
|
a6cb522fed | ||
|
|
f70be1e415 | ||
|
|
a2901f2b46 | ||
|
|
b61c66c3a9 | ||
|
|
c77f9ec202 | ||
|
|
2c5c35647f | ||
|
|
bf0fdbd10e | ||
|
|
731d317a42 | ||
|
|
e81579f752 | ||
|
|
9a10e98c0b | ||
|
|
27fdc139b7 | ||
|
|
0a00805afc | ||
|
|
7b38143fbd | ||
|
|
4c5ad1b7d7 | ||
|
|
d80cc962ad | ||
|
|
7ccabfa200 | ||
|
|
936d59cc52 | ||
|
|
fc16fb6099 | ||
|
|
c848cbc2e3 | ||
|
|
66fd0f0d8a | ||
|
|
c266f39f06 | ||
|
|
98a44fa4d7 | ||
|
|
c1d230f961 | ||
|
|
68108435ae | ||
|
|
e121bf1f62 | ||
|
|
4835c344b3 | ||
|
|
a589dec122 | ||
|
|
bc67d5c841 | ||
|
|
f3d5691c04 | ||
|
|
b98abc2457 | ||
|
|
7e527ccfb7 | ||
|
|
0f0c911845 | ||
|
|
e4818b967b | ||
|
|
ce3eede26f | ||
|
|
d98725c5e9 | ||
|
|
31a96d2945 | ||
|
|
845a321a43 | ||
|
|
87a44a28ef | ||
|
|
d5b9c3ee5a | ||
|
|
91db136cd1 | ||
|
|
f351ad4b66 | ||
|
|
fb6fb9abbd | ||
|
|
675c990486 | ||
|
|
6ee5cde4bb | ||
|
|
c8077f9430 | ||
|
|
6aabe9959e | ||
|
|
0b58d172d2 | ||
|
|
d7c6e293d7 | ||
|
|
c600bc867d | ||
|
|
f4140dd772 | ||
|
|
a2d8261d40 | ||
|
|
bce88a8873 | ||
|
|
b37e1a3ad6 | ||
|
|
35a088e0a6 | ||
|
|
b936cab039 | ||
|
|
34e4093408 | ||
|
|
d7f93c3cc0 | ||
|
|
d4c4926caa | ||
|
|
558c7db055 | ||
|
|
2ece59b51b | ||
|
|
7dbe39957c | ||
|
|
6fa46d35a5 | ||
|
|
b2a2b38ea8 | ||
|
|
12934da390 | ||
|
|
231bc18188 | ||
|
|
530cd180c5 | ||
|
|
2a92e7b920 | ||
|
|
019e057e29 | ||
|
|
9aa26f883e | ||
|
|
3f727e24b1 | ||
|
|
9e90bf1b20 | ||
|
|
db3964797f | ||
|
|
881efbda1b | ||
|
|
e9ce2ed5f2 | ||
|
|
53ac9eafbf | ||
|
|
9e095006a5 | ||
|
|
21b24c3ba6 | ||
|
|
139ecc10ce | ||
|
|
78ea143b46 | ||
|
|
174249ec15 | ||
|
|
2510ad7431 | ||
|
|
ba5e855a60 | ||
|
|
23627cf18d | ||
|
|
5e20c9a1ca | ||
|
|
933cf5f276 | ||
|
|
41316de659 | ||
|
|
041ccfd68e | ||
|
|
ad24c203a4 | ||
|
|
3fd28ce600 | ||
|
|
32df3bdf6e | ||
|
|
ba69e89e8c | ||
|
|
a8e0c48ddc | ||
|
|
66f6571086 |
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@@ -1,5 +1,5 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku @psychedelicious
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
@@ -9,13 +9,13 @@
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @hipsterusername @jazzhaiku
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
/pyproject.toml @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @psychedelicious @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @psychedelicious @hipsterusername
|
||||
/installer/ @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/configs @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
@@ -24,8 +24,8 @@
|
||||
/invokeai/backend @lstein @blessedcoolant @hipsterusername @jazzhaiku @psychedelicious @maryhipp
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/CLI @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
4
.github/workflows/python-checks.yml
vendored
4
.github/workflows/python-checks.yml
vendored
@@ -67,6 +67,10 @@ jobs:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
|
||||
- name: check pypi classifiers
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||
|
||||
@@ -71,7 +71,14 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
|
||||
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||
|
||||
=== "Invoke v5.10.0 and later"
|
||||
=== "Invoke v5.12 and later"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
|
||||
@@ -13,6 +13,7 @@ If you'd prefer, you can also just download the whole node folder from the linke
|
||||
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
- Community Nodes
|
||||
+ [Anamorphic Tools](#anamorphic-tools)
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
@@ -20,9 +21,12 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
+ [Clothing Mask](#clothing-mask)
|
||||
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||
+ [Curves](#curves)
|
||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||
+ [Enhance Detail](#enhance-detail)
|
||||
+ [Film Grain](#film-grain)
|
||||
+ [Flip Pose](#flip-pose)
|
||||
+ [Flux Ideal Size](#flux-ideal-size)
|
||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
@@ -61,6 +65,13 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
- [Help](#help)
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Anamorphic Tools
|
||||
|
||||
**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/anamorphic-tools
|
||||
|
||||
--------------------------------
|
||||
### Adapters Linked Nodes
|
||||
|
||||
@@ -132,6 +143,13 @@ Node Link: https://github.com/VeyDlin/clahe-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Curves
|
||||
|
||||
**Description:** Adjust an image's curve based on a user-defined string.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/curves-node
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
@@ -162,6 +180,20 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||
|
||||
--------------------------------
|
||||
### Flip Pose
|
||||
|
||||
**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flip-pose-node
|
||||
|
||||
--------------------------------
|
||||
### Flux Ideal Size
|
||||
|
||||
**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flux-ideal-size
|
||||
|
||||
--------------------------------
|
||||
### Generative Grammar-Based Prompt Nodes
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from importlib.metadata import distributions
|
||||
from pathlib import Path
|
||||
from platform import python_version
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
accelerate: str = Field(description="accelerate version")
|
||||
compel: str = Field(description="compel version")
|
||||
cuda: Optional[str] = Field(description="CUDA version")
|
||||
diffusers: str = Field(description="diffusers version")
|
||||
numpy: str = Field(description="Numpy version")
|
||||
opencv: str = Field(description="OpenCV version")
|
||||
onnx: str = Field(description="ONNX version")
|
||||
pillow: str = Field(description="Pillow (PIL) version")
|
||||
python: str = Field(description="Python version")
|
||||
torch: str = Field(description="PyTorch version")
|
||||
torchvision: str = Field(description="PyTorch Vision version")
|
||||
transformers: str = Field(description="transformers version")
|
||||
xformers: Optional[str] = Field(description="xformers version")
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
|
||||
async def get_app_deps() -> AppDependencyVersions:
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
|
||||
async def get_app_deps() -> dict[str, str]:
|
||||
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
|
||||
try:
|
||||
xformers = version("xformers")
|
||||
except PackageNotFoundError:
|
||||
xformers = None
|
||||
return AppDependencyVersions(
|
||||
accelerate=version("accelerate"),
|
||||
compel=version("compel"),
|
||||
cuda=torch.version.cuda,
|
||||
diffusers=version("diffusers"),
|
||||
numpy=version("numpy"),
|
||||
opencv=version("opencv-python"),
|
||||
onnx=version("onnx"),
|
||||
pillow=version("pillow"),
|
||||
python=python_version(),
|
||||
torch=torch.version.__version__,
|
||||
torchvision=version("torchvision"),
|
||||
transformers=version("transformers"),
|
||||
xformers=xformers,
|
||||
)
|
||||
cuda = torch.version.cuda or "N/A"
|
||||
except Exception:
|
||||
cuda = "N/A"
|
||||
|
||||
deps["CUDA"] = cuda
|
||||
|
||||
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
|
||||
|
||||
return sorted_deps
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
|
||||
@@ -146,7 +146,7 @@ async def list_boards(
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
board_id: str = Path(description="The id of the board or 'none' for uncategorized images"),
|
||||
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import Optional
|
||||
from typing import ClassVar, Optional
|
||||
|
||||
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||
@@ -19,6 +20,8 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
|
||||
@@ -27,6 +30,19 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
class ResizeToDimensions(BaseModel):
|
||||
width: int = Field(..., gt=0)
|
||||
height: int = Field(..., gt=0)
|
||||
|
||||
MAX_SIZE: ClassVar[int] = 4096 * 4096
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_total_output_size(self):
|
||||
if self.width * self.height > self.MAX_SIZE:
|
||||
raise ValueError(f"Max total output size for resizing is {self.MAX_SIZE} pixels")
|
||||
return self
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/upload",
|
||||
operation_id="upload_image",
|
||||
@@ -46,6 +62,11 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
resize_to: Optional[str] = Body(
|
||||
default=None,
|
||||
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
|
||||
example='"[1024,1024]"',
|
||||
),
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||
@@ -59,13 +80,33 @@ async def upload_image(
|
||||
contents = await file.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
if crop_visible:
|
||||
try:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to crop image")
|
||||
|
||||
if resize_to:
|
||||
try:
|
||||
dims = json.loads(resize_to)
|
||||
resize_dims = ResizeToDimensions(**dims)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
|
||||
try:
|
||||
# heuristic_resize_fast expects an RGB or RGBA image
|
||||
pil_rgba = pil_image.convert("RGBA")
|
||||
np_image = pil_to_np(pil_rgba)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to resize image")
|
||||
|
||||
extracted_metadata = extract_metadata_from_image(
|
||||
pil_image=pil_image,
|
||||
invokeai_metadata_override=metadata,
|
||||
@@ -356,6 +397,29 @@ async def delete_images_from_list(
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
@images_router.delete(
|
||||
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
|
||||
)
|
||||
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
|
||||
"""Deletes all images that are uncategorized"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id="none", categories=None, is_intermediate=None
|
||||
)
|
||||
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@@ -158,7 +158,7 @@ web_root_path = Path(list(web_dir.__path__)[0])
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
@@ -499,7 +499,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
@@ -613,7 +613,7 @@ def invocation(
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
uiconfig["version"] = version
|
||||
else:
|
||||
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
uiconfig["version"] = "1.0.0"
|
||||
|
||||
cls.UIConfig = UIConfigBase(**uiconfig)
|
||||
@@ -643,6 +643,16 @@ def invocation(
|
||||
|
||||
fields["type"] = (invocation_type_annotation, invocation_type_field_info)
|
||||
|
||||
# Invocation outputs must be registered using the @invocation_output decorator, but it is possible that the
|
||||
# output is registered _after_ this invocation is registered. It depends on module import ordering.
|
||||
#
|
||||
# We can only confirm the output for an invocation is registered after all modules are imported. There's
|
||||
# only really one good time to do that - during application startup, in `run_app.py`, after loading all
|
||||
# custom nodes.
|
||||
#
|
||||
# We can still do some basic validation here - ensure the invoke method is defined and returns an instance
|
||||
# of BaseInvocationOutput.
|
||||
|
||||
# Validate the `invoke()` method is implemented
|
||||
if "invoke" in cls.__abstractmethods__:
|
||||
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
import torch
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel import Compel, ReturnedEmbeddingsType, SplitLongTextMode
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
|
||||
@@ -104,6 +104,7 @@ class CompelInvocation(BaseInvocation):
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
@@ -113,6 +114,13 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
|
||||
@@ -205,6 +213,7 @@ class SDXLPromptInvocationBase:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
@@ -220,7 +229,10 @@ class SDXLPromptInvocationBase:
|
||||
else:
|
||||
c_pooled = None
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
|
||||
@@ -22,7 +22,11 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.app.util.controlnet_utils import (
|
||||
CONTROLNET_MODE_VALUES,
|
||||
CONTROLNET_RESIZE_VALUES,
|
||||
heuristic_resize_fast,
|
||||
)
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
|
||||
@@ -109,7 +113,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
@@ -122,7 +126,7 @@ class HeuristicResizeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
np_resized = heuristic_resize_fast(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image, ImageFilter
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
@@ -42,15 +44,13 @@ class GradientMaskOutput(BaseInvocationOutput):
|
||||
title="Create Gradient Mask",
|
||||
tags=["mask", "denoise"],
|
||||
category="latents",
|
||||
version="1.2.1",
|
||||
version="1.3.0",
|
||||
)
|
||||
class CreateGradientMaskInvocation(BaseInvocation):
|
||||
"""Creates mask for denoising model run."""
|
||||
"""Creates mask for denoising."""
|
||||
|
||||
mask: ImageField = InputField(description="Image which will be masked", ui_order=1)
|
||||
edge_radius: int = InputField(
|
||||
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
|
||||
)
|
||||
edge_radius: int = InputField(default=16, ge=0, description="How far to expand the edges of the mask", ui_order=2)
|
||||
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
|
||||
minimum_denoise: float = InputField(
|
||||
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
|
||||
@@ -81,45 +81,110 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
# Resize the mask_image. Makes the filter 64x faster and doesn't hurt quality in latent scale anyway
|
||||
mask_image = mask_image.resize(
|
||||
(
|
||||
mask_image.width // LATENT_SCALE_FACTOR,
|
||||
mask_image.height // LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.BILINEAR,
|
||||
)
|
||||
|
||||
mask_np_orig = np.array(mask_image, dtype=np.float32)
|
||||
|
||||
self.edge_radius = self.edge_radius // LATENT_SCALE_FACTOR # scale the edge radius to match the mask size
|
||||
|
||||
if self.edge_radius > 0:
|
||||
mask_np = 255 - mask_np_orig # invert so 0 is unmasked (higher values = higher denoise strength)
|
||||
dilated_mask = mask_np.copy()
|
||||
|
||||
# Create kernel based on coherence mode
|
||||
if self.coherence_mode == "Box Blur":
|
||||
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
|
||||
else: # Gaussian Blur OR Staged
|
||||
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
|
||||
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
|
||||
# Create a circular distance kernel that fades from center outward
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
center = self.edge_radius
|
||||
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist <= self.edge_radius:
|
||||
kernel[i, j] = 1.0 - (dist / self.edge_radius)
|
||||
else: # Gaussian Blur or Staged
|
||||
# Create a Gaussian kernel
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
kernel = cv2.getGaussianKernel(
|
||||
kernel_size, self.edge_radius / 2.5
|
||||
) # 2.5 is a magic number (standard deviation capturing)
|
||||
kernel = kernel * kernel.T # Make 2D gaussian kernel
|
||||
kernel = kernel / np.max(kernel) # Normalize center to 1.0
|
||||
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
|
||||
# Ensure values outside radius are 0
|
||||
center = self.edge_radius
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist > self.edge_radius:
|
||||
kernel[i, j] = 0
|
||||
|
||||
# redistribute blur so that the original edges are 0 and blur outwards to 1
|
||||
blur_tensor = (blur_tensor - 0.5) * 2
|
||||
blur_tensor[blur_tensor < 0] = 0.0
|
||||
# 2D max filter
|
||||
mask_tensor = torch.tensor(mask_np)
|
||||
kernel_tensor = torch.tensor(kernel)
|
||||
dilated_mask = 255 - self.max_filter2D_torch(mask_tensor, kernel_tensor).cpu()
|
||||
dilated_mask = dilated_mask.numpy()
|
||||
|
||||
threshold = 1 - self.minimum_denoise
|
||||
threshold = (1 - self.minimum_denoise) * 255
|
||||
|
||||
if self.coherence_mode == "Staged":
|
||||
# wherever the blur_tensor is less than fully masked, convert it to threshold
|
||||
blur_tensor = torch.where((blur_tensor < 1) & (blur_tensor > 0), threshold, blur_tensor)
|
||||
else:
|
||||
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
|
||||
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
|
||||
# wherever expanded mask is darker than the original mask but original was above threshhold, set it to the threshold
|
||||
# makes any expansion areas drop to threshhold. Raising minimum across the image happen outside of this if
|
||||
threshold_mask = (dilated_mask < mask_np_orig) & (mask_np_orig > threshold)
|
||||
dilated_mask = np.where(threshold_mask, threshold, mask_np_orig)
|
||||
|
||||
# wherever expanded mask is less than 255 but greater than threshold, drop it to threshold (minimum denoise)
|
||||
threshold_mask = (dilated_mask > threshold) & (dilated_mask < 255)
|
||||
dilated_mask = np.where(threshold_mask, threshold, dilated_mask)
|
||||
|
||||
else:
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
dilated_mask = mask_np_orig.copy()
|
||||
|
||||
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
|
||||
# convert to tensor
|
||||
dilated_mask = np.clip(dilated_mask, 0, 255).astype(np.uint8)
|
||||
mask_tensor = torch.tensor(dilated_mask, device=torch.device("cpu"))
|
||||
|
||||
# compute a [0, 1] mask from the blur_tensor
|
||||
expanded_mask = torch.where((blur_tensor < 1), 0, 1)
|
||||
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
|
||||
# binary mask for compositing
|
||||
expanded_mask = np.where((dilated_mask < 255), 0, 255)
|
||||
expanded_mask_image = Image.fromarray(expanded_mask.astype(np.uint8), mode="L")
|
||||
expanded_mask_image = expanded_mask_image.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
expanded_image_dto = context.images.save(expanded_mask_image)
|
||||
|
||||
# restore the original mask size
|
||||
dilated_mask = Image.fromarray(dilated_mask.astype(np.uint8))
|
||||
dilated_mask = dilated_mask.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
|
||||
# stack the mask as a tensor, repeating 4 times on dimmension 1
|
||||
dilated_mask_tensor = image_resized_to_grid_as_tensor(dilated_mask, normalize=False)
|
||||
mask_name = context.tensors.save(tensor=dilated_mask_tensor.unsqueeze(0))
|
||||
|
||||
masked_latents_name = None
|
||||
if self.unet is not None and self.vae is not None and self.image is not None:
|
||||
# all three fields must be present at the same time
|
||||
main_model_config = context.models.get_config(self.unet.unet.key)
|
||||
assert isinstance(main_model_config, MainConfigBase)
|
||||
if main_model_config.variant is ModelVariantType.Inpaint:
|
||||
mask = blur_tensor
|
||||
mask = dilated_mask_tensor
|
||||
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
@@ -137,3 +202,29 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
|
||||
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
|
||||
)
|
||||
|
||||
def max_filter2D_torch(self, image: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
This morphological operation is much faster in torch than numpy or opencv
|
||||
For reasonable kernel sizes, the overhead of copying the data to the GPU is not worth it.
|
||||
"""
|
||||
h, w = kernel.shape
|
||||
pad_h, pad_w = h // 2, w // 2
|
||||
|
||||
padded = torch.nn.functional.pad(image, (pad_w, pad_w, pad_h, pad_h), mode="constant", value=0)
|
||||
result = torch.zeros_like(image)
|
||||
|
||||
# This looks like it's inside out, but it does the same thing and is more efficient
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
weight = kernel[i, j]
|
||||
if weight <= 0:
|
||||
continue
|
||||
|
||||
# Extract the region from padded tensor
|
||||
region = padded[i : i + image.shape[0], j : j + image.shape[1]]
|
||||
|
||||
# Apply weight and update max
|
||||
result = torch.maximum(result, region * weight)
|
||||
|
||||
return result
|
||||
|
||||
@@ -437,7 +437,7 @@ class WithWorkflow:
|
||||
workflow = None
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
|
||||
)
|
||||
super().__init_subclass__()
|
||||
@@ -578,7 +578,7 @@ def InputField(
|
||||
|
||||
if default_factory is not _Unset and default_factory is not None:
|
||||
default = default_factory()
|
||||
logger.warn('"default_factory" is not supported, calling it now to set "default"')
|
||||
logger.warning('"default_factory" is not supported, calling it now to set "default"')
|
||||
|
||||
# These are the args we may wish pass to the pydantic `Field()` function
|
||||
field_args = {
|
||||
|
||||
@@ -1218,12 +1218,15 @@ class ApplyMaskToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Add Image Noise",
|
||||
tags=["image", "noise"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.0",
|
||||
)
|
||||
class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add noise to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to add noise to")
|
||||
mask: Optional[ImageField] = InputField(
|
||||
default=None, description="Optional mask determining where to apply noise (black=noise, white=no noise)"
|
||||
)
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
@@ -1267,12 +1270,27 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise = Image.fromarray(noise.astype(numpy.uint8), mode="RGB").resize(
|
||||
(image.width, image.height), Image.Resampling.NEAREST
|
||||
)
|
||||
|
||||
# Create a noisy version of the input image
|
||||
noisy_image = Image.blend(image.convert("RGB"), noise, self.amount).convert("RGBA")
|
||||
|
||||
# Paste back the alpha channel
|
||||
noisy_image.putalpha(alpha)
|
||||
# Apply mask if provided
|
||||
if self.mask is not None:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
image_dto = context.images.save(image=noisy_image)
|
||||
if mask_image.size != image.size:
|
||||
mask_image = mask_image.resize(image.size, Image.Resampling.LANCZOS)
|
||||
|
||||
result_image = image.copy()
|
||||
mask_image = ImageOps.invert(mask_image)
|
||||
result_image.paste(noisy_image, (0, 0), mask=mask_image)
|
||||
else:
|
||||
result_image = noisy_image
|
||||
|
||||
# Paste back the alpha channel from the original image
|
||||
result_image.putalpha(alpha)
|
||||
|
||||
image_dto = context.images.save(image=result_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@@ -42,7 +42,9 @@ class IPAdapterMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
clip_vision_model: Literal["ViT-L", "ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
|
||||
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = Field(
|
||||
description="Method to apply IP Weights with"
|
||||
)
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")
|
||||
|
||||
@@ -1,12 +1,3 @@
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
|
||||
def get_app():
|
||||
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
|
||||
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
|
||||
@@ -18,9 +9,18 @@ def get_app():
|
||||
|
||||
def run_app() -> None:
|
||||
"""The main entrypoint for the app."""
|
||||
# Parse the CLI arguments.
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
# Parse the CLI arguments before doing anything else, which ensures CLI args correctly override settings from other
|
||||
# sources like `invokeai.yaml` or env vars.
|
||||
InvokeAIArgs.parse_args()
|
||||
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# Load config.
|
||||
app_config = get_config()
|
||||
|
||||
@@ -32,6 +32,8 @@ def run_app() -> None:
|
||||
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
|
||||
|
||||
# This import must happen after configure_torch_cuda_allocator() is called, because the module imports torch.
|
||||
from invokeai.app.invocations.baseinvocation import InvocationRegistry
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
@@ -66,6 +68,15 @@ def run_app() -> None:
|
||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
|
||||
|
||||
# Check all invocations and ensure their outputs are registered.
|
||||
for invocation in InvocationRegistry.get_invocation_classes():
|
||||
invocation_type = invocation.get_type()
|
||||
output_annotation = invocation.get_output_annotation()
|
||||
if output_annotation not in InvocationRegistry.get_output_classes():
|
||||
logger.warning(
|
||||
f'Invocation "{invocation_type}" has unregistered output class "{output_annotation.__name__}"'
|
||||
)
|
||||
|
||||
if app_config.dev_reload:
|
||||
# load_custom_nodes seems to bypass jurrigged's import sniffer, so be sure to call it *after* they're already
|
||||
# imported.
|
||||
|
||||
@@ -98,9 +98,18 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Handle board_id filter
|
||||
if board_id == "none":
|
||||
stmt += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
else:
|
||||
stmt += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
|
||||
@@ -24,7 +24,6 @@ from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
INIT_FILE = Path("invokeai.yaml")
|
||||
DB_FILE = Path("invokeai.db")
|
||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
|
||||
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
|
||||
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
|
||||
@@ -93,7 +92,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -176,7 +175,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
|
||||
|
||||
# DEVICE
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
|
||||
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
|
||||
|
||||
# GENERATION
|
||||
|
||||
@@ -196,9 +196,13 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND images.metadata LIKE ?
|
||||
AND (
|
||||
images.metadata LIKE ?
|
||||
OR images.created_at LIKE ?
|
||||
)
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
|
||||
@@ -78,7 +78,7 @@ class ImageService(ImageServiceABC):
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
|
||||
@@ -148,7 +148,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def _clear_pending_jobs(self) -> None:
|
||||
for job in self.list_jobs():
|
||||
if not job.in_terminal_state:
|
||||
self._logger.warning("Cancelling job {job.id}")
|
||||
self._logger.warning(f"Cancelling job {job.id}")
|
||||
self.cancel_job(job)
|
||||
while True:
|
||||
try:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import gc
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
from threading import BoundedSemaphore, Thread
|
||||
@@ -439,6 +440,12 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
|
||||
# GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
|
||||
# Most queue items take seconds to execute, so the relative cost of a GC is very small.
|
||||
# Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
|
||||
# allocation is well worth it.
|
||||
gc.collect()
|
||||
|
||||
self._invoker.services.logger.info(
|
||||
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
|
||||
)
|
||||
|
||||
@@ -104,11 +104,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||
|
||||
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
|
||||
|
||||
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# TODO: how does this work in a multi-user scenario?
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
@@ -118,8 +114,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
if prepend:
|
||||
priority = self._get_highest_priority(queue_id) + 1
|
||||
|
||||
requested_count = calc_session_count(batch)
|
||||
values_to_insert = prepare_values_to_insert(
|
||||
requested_count = await asyncio.to_thread(
|
||||
calc_session_count,
|
||||
batch=batch,
|
||||
)
|
||||
values_to_insert = await asyncio.to_thread(
|
||||
prepare_values_to_insert,
|
||||
queue_id=queue_id,
|
||||
batch=batch,
|
||||
priority=priority,
|
||||
@@ -127,19 +127,16 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
enqueued_count = len(values_to_insert)
|
||||
|
||||
if requested_count > enqueued_count:
|
||||
values_to_insert = values_to_insert[:max_new_queue_items]
|
||||
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
self._conn.commit()
|
||||
with self._conn:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
enqueue_result = EnqueueBatchResult(
|
||||
queue_id=queue_id,
|
||||
|
||||
@@ -230,6 +230,86 @@ def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.
|
||||
return resized
|
||||
|
||||
|
||||
# precompute common kernels
|
||||
_KERNEL3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
||||
# directional masks for NMS
|
||||
_DIRS = [
|
||||
np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], np.uint8),
|
||||
np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], np.uint8),
|
||||
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], np.uint8),
|
||||
np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], np.uint8),
|
||||
]
|
||||
|
||||
|
||||
def heuristic_resize_fast(np_img: np.ndarray, size: tuple[int, int]) -> np.ndarray:
|
||||
h, w = np_img.shape[:2]
|
||||
# early exit
|
||||
if (w, h) == size:
|
||||
return np_img
|
||||
|
||||
# separate alpha channel
|
||||
img = np_img
|
||||
alpha = None
|
||||
if img.ndim == 3 and img.shape[2] == 4:
|
||||
alpha, img = img[:, :, 3], img[:, :, :3]
|
||||
|
||||
# build small sample for unique‐color & binary detection
|
||||
flat = img.reshape(-1, img.shape[-1])
|
||||
N = flat.shape[0]
|
||||
# include four corners to avoid missing extreme values
|
||||
corners = np.vstack([img[0, 0], img[0, w - 1], img[h - 1, 0], img[h - 1, w - 1]])
|
||||
cnt = min(N, 100_000)
|
||||
samp = np.vstack([corners, flat[np.random.choice(N, cnt, replace=False)]])
|
||||
uc = np.unique(samp, axis=0).shape[0]
|
||||
vmin, vmax = samp.min(), samp.max()
|
||||
|
||||
# detect binary edge map & one‐pixel‐edge case
|
||||
is_binary = uc == 2 and vmin < 16 and vmax > 240
|
||||
one_pixel_edge = False
|
||||
if is_binary:
|
||||
# single gray conversion
|
||||
gray0 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
grad = cv2.morphologyEx(gray0, cv2.MORPH_GRADIENT, _KERNEL3)
|
||||
cnt_edge = cv2.countNonZero(grad)
|
||||
cnt_all = cv2.countNonZero((gray0 > 127).astype(np.uint8))
|
||||
one_pixel_edge = (2 * cnt_edge) > cnt_all
|
||||
|
||||
# choose interp for color/seg/grayscale
|
||||
area_new, area_old = size[0] * size[1], w * h
|
||||
if 2 < uc < 200: # segmentation map
|
||||
interp = cv2.INTER_NEAREST
|
||||
elif area_new < area_old:
|
||||
interp = cv2.INTER_AREA
|
||||
else:
|
||||
interp = cv2.INTER_CUBIC
|
||||
|
||||
# single resize pass on RGB
|
||||
resized = cv2.resize(img, size, interpolation=interp)
|
||||
|
||||
if is_binary:
|
||||
# convert to gray & apply NMS via C++ dilate
|
||||
gray_r = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
||||
nms = np.zeros_like(gray_r)
|
||||
for K in _DIRS:
|
||||
d = cv2.dilate(gray_r, K)
|
||||
mask = d == gray_r
|
||||
nms[mask] = gray_r[mask]
|
||||
|
||||
# threshold + thinning if needed
|
||||
_, bw = cv2.threshold(nms, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
out_bin = cv2.ximgproc.thinning(bw) if one_pixel_edge else bw
|
||||
# restore 3 channels
|
||||
resized = np.stack([out_bin] * 3, axis=2)
|
||||
|
||||
# restore alpha with same interp as RGB for consistency
|
||||
if alpha is not None:
|
||||
am = cv2.resize(alpha, size, interpolation=interp)
|
||||
am = (am > 127).astype(np.uint8) * 255
|
||||
resized = np.dstack((resized, am))
|
||||
|
||||
return resized
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
@@ -244,7 +324,7 @@ def np_img_resize(
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = heuristic_resize(np_img, (w, h))
|
||||
np_img = heuristic_resize_fast(np_img, (w, h))
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
@@ -265,7 +345,7 @@ def np_img_resize(
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
@@ -275,7 +355,7 @@ def np_img_resize(
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
|
||||
@@ -12,6 +12,9 @@ from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFie
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None:
|
||||
|
||||
@@ -42,4 +42,5 @@ IP-Adapters:
|
||||
- [InvokeAI/ip_adapter_plus_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_sd15)
|
||||
- [InvokeAI/ip_adapter_plus_face_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15)
|
||||
- [InvokeAI/ip_adapter_sdxl](https://huggingface.co/InvokeAI/ip_adapter_sdxl)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip-adapter-plus_sdxl_vit-h](https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h)
|
||||
@@ -296,7 +296,7 @@ class LoRAConfigBase(ABC, BaseModel):
|
||||
from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict
|
||||
|
||||
sd = mod.load_state_dict(mod.path)
|
||||
value = flux_format_from_state_dict(sd)
|
||||
value = flux_format_from_state_dict(sd, mod.metadata())
|
||||
mod.cache[key] = value
|
||||
return value
|
||||
|
||||
|
||||
@@ -20,6 +20,10 @@ from invokeai.backend.model_manager.taxonomy import (
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
lora_model_from_flux_aitoolkit_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
|
||||
is_state_dict_likely_flux_control,
|
||||
lora_model_from_flux_control_state_dict,
|
||||
@@ -92,6 +96,8 @@ class LoRALoader(ModelLoader):
|
||||
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_flux_control(state_dict=state_dict):
|
||||
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict=state_dict):
|
||||
model = lora_model_from_flux_aitoolkit_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
else:
|
||||
|
||||
@@ -62,11 +62,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
# If this too fails, raise exception.
|
||||
|
||||
model_info = None
|
||||
|
||||
# Handling for our special syntax - we only want the base HF `org/repo` here.
|
||||
repo_id = id.split("::")[0] or id
|
||||
while not model_info:
|
||||
try:
|
||||
model_info = HfApi().model_info(repo_id=id, files_metadata=True, revision=variant)
|
||||
model_info = HfApi().model_info(repo_id=repo_id, files_metadata=True, revision=variant)
|
||||
except RepositoryNotFoundError as excp:
|
||||
raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp
|
||||
raise UnknownMetadataException(f"'{repo_id}' not found. See trace for details.") from excp
|
||||
except RevisionNotFoundError:
|
||||
if variant is None:
|
||||
raise
|
||||
@@ -75,14 +78,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
|
||||
files: list[RemoteModelFile] = []
|
||||
|
||||
_, name = id.split("/")
|
||||
_, name = repo_id.split("/")
|
||||
|
||||
for s in model_info.siblings or []:
|
||||
assert s.rfilename is not None
|
||||
assert s.size is not None
|
||||
files.append(
|
||||
RemoteModelFile(
|
||||
url=hf_hub_url(id, s.rfilename, revision=variant or "main"),
|
||||
url=hf_hub_url(repo_id, s.rfilename, revision=variant or "main"),
|
||||
path=Path(name, s.rfilename),
|
||||
size=s.size,
|
||||
sha256=s.lfs.get("sha256") if s.lfs else None,
|
||||
|
||||
@@ -297,6 +297,15 @@ ip_adapter_sdxl = StarterModel(
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_plus_sdxl = StarterModel(
|
||||
name="Precise Reference (IP Adapter Plus ViT-H)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h/resolve/main/ip-adapter-plus_sdxl_vit-h.safetensors",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter Plus SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
@@ -672,6 +681,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
ip_adapter_plus_sd1,
|
||||
ip_adapter_plus_face_sd1,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
ip_adapter_flux,
|
||||
qr_code_cnet_sd1,
|
||||
qr_code_cnet_sdxl,
|
||||
@@ -744,6 +754,7 @@ sdxl_bundle: list[StarterModel] = [
|
||||
juggernaut_sdxl,
|
||||
sdxl_fp16_vae_fix,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
|
||||
@@ -137,6 +137,7 @@ class FluxLoRAFormat(str, Enum):
|
||||
Kohya = "flux.kohya"
|
||||
OneTrainer = "flux.onetrainer"
|
||||
Control = "flux.control"
|
||||
AIToolkit = "flux.aitoolkit"
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
|
||||
@@ -46,6 +46,10 @@ class ModelPatcher:
|
||||
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
|
||||
ti_list: List[Tuple[str, TextualInversionModelRaw]],
|
||||
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
|
||||
if len(ti_list) == 0:
|
||||
yield tokenizer, TextualInversionManager(tokenizer)
|
||||
return
|
||||
|
||||
init_tokens_count = None
|
||||
new_tokens_added = None
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
from typing import Dict, Iterable, Optional, Tuple
|
||||
|
||||
@@ -7,6 +8,7 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
@@ -23,6 +25,7 @@ class LayerPatcher:
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
force_direct_patching: bool = False,
|
||||
force_sidecar_patching: bool = False,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply 'smart' model patching that chooses whether to use direct patching or a sidecar wrapper for each
|
||||
module.
|
||||
@@ -44,6 +47,7 @@ class LayerPatcher:
|
||||
dtype=dtype,
|
||||
force_direct_patching=force_direct_patching,
|
||||
force_sidecar_patching=force_sidecar_patching,
|
||||
suppress_warning_layers=suppress_warning_layers,
|
||||
)
|
||||
|
||||
yield
|
||||
@@ -70,6 +74,7 @@ class LayerPatcher:
|
||||
dtype: torch.dtype,
|
||||
force_direct_patching: bool,
|
||||
force_sidecar_patching: bool,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply a single LoRA patch to a model using the 'smart' patching strategy that chooses whether to use direct
|
||||
patching or a sidecar wrapper for each module.
|
||||
@@ -89,9 +94,17 @@ class LayerPatcher:
|
||||
if not layer_key.startswith(prefix):
|
||||
continue
|
||||
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
try:
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
except AttributeError:
|
||||
if suppress_warning_layers and suppress_warning_layers.search(layer_key):
|
||||
pass
|
||||
else:
|
||||
logger = InvokeAILogger.get_logger(LayerPatcher.__name__)
|
||||
logger.warning("Failed to find module for LoRA layer key: %s", layer_key)
|
||||
continue
|
||||
|
||||
# Decide whether to use direct patching or a sidecar patch.
|
||||
# Direct patching is preferred, because it results in better runtime speed.
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import _group_by_layer
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool:
|
||||
if metadata:
|
||||
try:
|
||||
software = json.loads(metadata.get("software", "{}"))
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
return software.get("name") == "ai-toolkit"
|
||||
# metadata got lost somewhere
|
||||
return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys())
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroupedStateDict:
|
||||
transformer: dict[str, Any] = field(default_factory=dict)
|
||||
# might also grow CLIP and T5 submodels
|
||||
|
||||
|
||||
def _group_state_by_submodel(state_dict: dict[str, Any]) -> GroupedStateDict:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
grouped = GroupedStateDict()
|
||||
for key, value in state_dict.items():
|
||||
submodel_name, param_name = key.split(".", 1)
|
||||
match submodel_name:
|
||||
case "diffusion_model":
|
||||
grouped.transformer[param_name] = value
|
||||
case _:
|
||||
logger.warning(f"Unexpected submodel name: {submodel_name}")
|
||||
return grouped
|
||||
|
||||
|
||||
def _rename_peft_lora_keys(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
||||
"""Renames keys from the PEFT LoRA format to the InvokeAI format."""
|
||||
renamed_state_dict = {}
|
||||
for key, value in state_dict.items():
|
||||
renamed_key = key.replace(".lora_A.", ".lora_down.").replace(".lora_B.", ".lora_up.")
|
||||
renamed_state_dict[renamed_key] = value
|
||||
return renamed_state_dict
|
||||
|
||||
|
||||
def lora_model_from_flux_aitoolkit_state_dict(state_dict: dict[str, torch.Tensor]) -> ModelPatchRaw:
|
||||
state_dict = _rename_peft_lora_keys(state_dict)
|
||||
by_layer = _group_by_layer(state_dict)
|
||||
by_model = _group_state_by_submodel(by_layer)
|
||||
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
for layer_key, layer_state_dict in by_model.transformer.items():
|
||||
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
return ModelPatchRaw(layers=layers)
|
||||
@@ -1,4 +1,7 @@
|
||||
from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
@@ -11,7 +14,7 @@ from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_u
|
||||
)
|
||||
|
||||
|
||||
def flux_format_from_state_dict(state_dict):
|
||||
def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None:
|
||||
if is_state_dict_likely_in_flux_kohya_format(state_dict):
|
||||
return FluxLoRAFormat.Kohya
|
||||
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict):
|
||||
@@ -20,5 +23,7 @@ def flux_format_from_state_dict(state_dict):
|
||||
return FluxLoRAFormat.Diffusers
|
||||
elif is_state_dict_likely_flux_control(state_dict):
|
||||
return FluxLoRAFormat.Control
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict, metadata):
|
||||
return FluxLoRAFormat.AIToolkit
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -30,18 +30,13 @@ class RectifiedFlowInpaintExtension:
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region.
|
||||
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
mask = torch.where(self._inpaint_mask >= t_prev + eps, 1.0, 0.0).to(
|
||||
dtype=self._inpaint_mask.dtype, device=self._inpaint_mask.device
|
||||
)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ const config: KnipConfig = {
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
// This feature is (temprarily?) disabled
|
||||
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"cmdk": "^1.1.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"filesize": "^10.1.6",
|
||||
"fracturedjsonjs": "^4.0.2",
|
||||
"fracturedjsonjs": "^4.1.0",
|
||||
"framer-motion": "^11.10.0",
|
||||
"i18next": "^25.0.1",
|
||||
"i18next-http-backend": "^3.0.2",
|
||||
|
||||
8
invokeai/frontend/web/pnpm-lock.yaml
generated
8
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -54,8 +54,8 @@ dependencies:
|
||||
specifier: ^10.1.6
|
||||
version: 10.1.6
|
||||
fracturedjsonjs:
|
||||
specifier: ^4.0.2
|
||||
version: 4.0.2
|
||||
specifier: ^4.1.0
|
||||
version: 4.1.0
|
||||
framer-motion:
|
||||
specifier: ^11.10.0
|
||||
version: 11.10.0(react-dom@18.3.1)(react@18.3.1)
|
||||
@@ -5280,8 +5280,8 @@ packages:
|
||||
signal-exit: 4.1.0
|
||||
dev: true
|
||||
|
||||
/fracturedjsonjs@4.0.2:
|
||||
resolution: {integrity: sha512-+vGJH9wK0EEhbbn50V2sOebLRaar1VL3EXr02kxchIwpkhQk0ItrPjIOtYPYuU9hNFpVzxjrPgzjtMJih+ae4A==}
|
||||
/fracturedjsonjs@4.1.0:
|
||||
resolution: {integrity: sha512-qy6LPA8OOiiyRHt5/sNKDayD7h5r3uHmHxSOLbBsgtU/hkt5vOVWOR51MdfDbeCNfj7k/dKCRbXYm8FBAJcgWQ==}
|
||||
dev: false
|
||||
|
||||
/framer-motion@10.18.0(react-dom@18.3.1)(react@18.3.1):
|
||||
|
||||
@@ -24,15 +24,18 @@
|
||||
"autoAddBoard": "Auto-Add Board",
|
||||
"boards": "Boards",
|
||||
"selectedForAutoAdd": "Selected for Auto-Add",
|
||||
"bottomMessage": "Deleting this board and its images will reset any features currently using them.",
|
||||
"bottomMessage": "Deleting images will reset any features currently using them.",
|
||||
"cancel": "Cancel",
|
||||
"changeBoard": "Change Board",
|
||||
"clearSearch": "Clear Search",
|
||||
"deleteBoard": "Delete Board",
|
||||
"deleteBoardAndImages": "Delete Board and Images",
|
||||
"deleteBoardOnly": "Delete Board Only",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"uncategorizedImages": "Uncategorized Images",
|
||||
"deleteAllUncategorizedImages": "Delete All Uncategorized Images",
|
||||
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
|
||||
"hideBoards": "Hide Boards",
|
||||
"loading": "Loading...",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
@@ -46,7 +49,7 @@
|
||||
"searchBoard": "Search Boards...",
|
||||
"selectBoard": "Select a Board",
|
||||
"shared": "Shared Boards",
|
||||
"topMessage": "This board contains images used in the following features:",
|
||||
"topMessage": "This selection contains images used in the following features:",
|
||||
"unarchiveBoard": "Unarchive Board",
|
||||
"uncategorized": "Uncategorized",
|
||||
"viewBoards": "View Boards",
|
||||
@@ -1907,11 +1910,13 @@
|
||||
"addPositivePrompt": "Add $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "Add $t(controlLayers.negativePrompt)",
|
||||
"addReferenceImage": "Add $t(controlLayers.referenceImage)",
|
||||
"addImageNoise": "Add $t(controlLayers.imageNoise)",
|
||||
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
|
||||
"addControlLayer": "Add $t(controlLayers.controlLayer)",
|
||||
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
|
||||
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
|
||||
"addGlobalReferenceImage": "Add $t(controlLayers.globalReferenceImage)",
|
||||
"addDenoiseLimit": "Add $t(controlLayers.denoiseLimit)",
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
@@ -2009,8 +2014,10 @@
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
|
||||
"imageNoise": "Image Noise",
|
||||
"denoiseLimit": "Denoise Limit",
|
||||
"warnings": {
|
||||
"problemsFound": "Problems found",
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
@@ -2419,9 +2426,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke uses PyTorch 2.7.0, which is required for these GPUs.",
|
||||
"Model Relationships: Link LoRAs to main models, and the LoRAs will show up first in the list.",
|
||||
"IP Adapter: New Style (Strong) and Style (Precise) methods for SDXL and SD1.5 models."
|
||||
"Inpainting: Per-mask noise levels and denoise limits.",
|
||||
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -883,7 +883,8 @@
|
||||
"problemUnpublishingWorkflow": "Problema durante l'annullamento della pubblicazione del flusso di lavoro",
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1085,11 +1086,11 @@
|
||||
"menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
|
||||
"cancel": "Annulla",
|
||||
"addBoard": "Aggiungi Bacheca",
|
||||
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
|
||||
"bottomMessage": "L'eliminazione delle immagini reimposterà tutte le funzionalità che le stanno utilizzando.",
|
||||
"changeBoard": "Cambia Bacheca",
|
||||
"loading": "Caricamento in corso ...",
|
||||
"clearSearch": "Cancella Ricerca",
|
||||
"topMessage": "Questa bacheca contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"topMessage": "Questa selezione contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"move": "Sposta",
|
||||
"myBoard": "Bacheca",
|
||||
"searchBoard": "Cerca bacheche ...",
|
||||
@@ -1100,7 +1101,7 @@
|
||||
"deleteBoardOnly": "solo la Bacheca",
|
||||
"deleteBoard": "Elimina Bacheca",
|
||||
"deleteBoardAndImages": "Bacheca e Immagini",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate nella bacheca \"Non categorizzato\".",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate in uno stato non categorizzato.",
|
||||
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
|
||||
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
|
||||
"movingImagesToBoard_other": "Spostare {{count}} immagini nella bacheca:",
|
||||
@@ -1122,8 +1123,11 @@
|
||||
"noBoards": "Nessuna bacheca {{boardType}}",
|
||||
"hideBoards": "Nascondi bacheche",
|
||||
"viewBoards": "Visualizza bacheche",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche cancellate non possono essere ripristinate. Selezionando 'Cancella solo bacheca', le immagini verranno spostate nella bacheca \"Non categorizzato\" privata dell'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca"
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\", le immagini verranno spostate in uno stato privato e non categorizzato per l'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
|
||||
"uncategorizedImages": "Immagini non categorizzate",
|
||||
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
|
||||
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@@ -2295,7 +2299,7 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
@@ -2344,7 +2348,11 @@
|
||||
"lowest": "Il più basso",
|
||||
"medium": "Medio",
|
||||
"highest": "La più alta"
|
||||
}
|
||||
},
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2444,8 +2452,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"GPU Nvidia 50xx: Invoke utilizza PyTorch 2.7.0, necessario per queste GPU.",
|
||||
"Relazioni tra modelli: collega i LoRA ai modelli principali e i LoRA verranno visualizzati per primi nell'elenco."
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -392,7 +392,7 @@
|
||||
"title": "全選択"
|
||||
},
|
||||
"addNode": {
|
||||
"desc": "ノード追加メニューを開く.",
|
||||
"desc": "ノード追加メニューを開く。",
|
||||
"title": "ノードを追加"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
@@ -652,7 +652,9 @@
|
||||
"filterModels": "フィルターモデル",
|
||||
"modelPickerFallbackNoModelsInstalled": "モデルがインストールされていません.",
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット"
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -872,7 +874,8 @@
|
||||
"problemDeletingWorkflow": "ワークフローが削除された問題",
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -1153,11 +1156,11 @@
|
||||
"unknownField": "不明なフィールド",
|
||||
"unexpectedField_withName": "予期しないフィールド\"{{name}}\"",
|
||||
"loadingTemplates": "読み込み中 {{name}}",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします.",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします",
|
||||
"validateConnections": "接続とグラフを確認する",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"newWorkflowDesc": "新しいワークフローを作りますか?",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型:{type}}",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型: {{type}}",
|
||||
"unsupportedArrayItemType": "サポートされていない配列項目型です \"{{type}}\"",
|
||||
"unableToLoadWorkflow": "ワークフローが読み込めません",
|
||||
"unableToValidateWorkflow": "ワークフローを確認できません",
|
||||
@@ -1200,13 +1203,13 @@
|
||||
"downloadBoard": "ボードをダウンロード",
|
||||
"changeBoard": "ボードを変更",
|
||||
"loading": "ロード中...",
|
||||
"topMessage": "このボードには、以下の機能で使用されている画像が含まれています:",
|
||||
"bottomMessage": "このボードおよび画像を削除すると、現在これらを利用している機能はリセットされます。",
|
||||
"topMessage": "この選択には、次の機能で使用される画像が含まれています:",
|
||||
"bottomMessage": "この画像を削除すると、現在利用している機能はリセットされます。",
|
||||
"clearSearch": "検索をクリア",
|
||||
"deleteBoard": "ボードの削除",
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
|
||||
"deletedBoardsCannotbeRestored": "削除したボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は未分類の状態になります。",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
@@ -1221,9 +1224,12 @@
|
||||
"imagesWithCount_other": "{{count}} の画像",
|
||||
"updateBoardError": "ボード更新エラー",
|
||||
"selectedForAutoAdd": "自動追加に選択済み",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は作成者に対して非公開の未分類状態になります。",
|
||||
"noBoards": "{{boardType}} ボードがありません",
|
||||
"viewBoards": "ボードを表示"
|
||||
"viewBoards": "ボードを表示",
|
||||
"uncategorizedImages": "分類されていない画像",
|
||||
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除",
|
||||
"deletedImagesCannotBeRestored": "削除した画像は復元できません."
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -1246,7 +1252,8 @@
|
||||
"paramRatio": {
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
"生成された画像の縦横比。",
|
||||
"SD1.5 モデルの場合は 512x512 に相当する画像サイズ (ピクセル数) が推奨され, SDXL モデルの場合は 1024x1024 に相当するサイズが推奨されます."
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
@@ -1288,25 +1295,49 @@
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
"heading": "アップスケール手法",
|
||||
"paragraphs": [
|
||||
"高解像度修正のために画像を拡大するために使用される方法。"
|
||||
]
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
"heading": "アップスケールモデル",
|
||||
"paragraphs": [
|
||||
"アップスケールモデルは、ディテールを追加する前に画像を出力サイズに合わせて拡大縮小します。サポートされているアップスケールモデルであればどれでも使用できますが、写真や線画など、特定の種類の画像に特化したモデルもあります。"
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成される画像のアスペクト比。比率を変更すると、幅と高さもそれに応じて更新されます。",
|
||||
"「最適化」は、選択したモデルの幅と高さを最適な寸法に設定します。"
|
||||
]
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で実行されるステップの数。",
|
||||
"生成ステップと似ています。"
|
||||
]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
"heading": "VAE",
|
||||
"paragraphs": [
|
||||
"AI 出力を最終画像に変換するために使用されるモデル。"
|
||||
]
|
||||
},
|
||||
"scale": {
|
||||
"heading": "スケール"
|
||||
"heading": "スケール",
|
||||
"paragraphs": [
|
||||
"スケールは出力画像のサイズを制御し、入力画像の解像度の倍数に基づいて決定されます。例えば、1024x1024の画像を2倍に拡大すると、2048x2048の出力が生成されます。"
|
||||
]
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
"heading": "スケジューラー",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で使用されるスケジューラ。",
|
||||
"生成スケジューラに似ています。"
|
||||
]
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "モード",
|
||||
@@ -1315,13 +1346,23 @@
|
||||
]
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "モデル"
|
||||
"heading": "モデル",
|
||||
"paragraphs": [
|
||||
"生成に使用されるモデル。異なるモデルは、異なる美的結果とコンテンツを生成するように特化するようにトレーニングされています。"
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "高さ"
|
||||
"heading": "高さ",
|
||||
"paragraphs": [
|
||||
"生成される画像の高さ。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"各生成で実行されるステップの数.",
|
||||
"通常, ステップ数が多いほど, より高品質な画像が作成されますが生成時間も長くなります."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "モード",
|
||||
@@ -1330,10 +1371,18 @@
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
"heading": "シード"
|
||||
"heading": "シード",
|
||||
"paragraphs": [
|
||||
"生成に使用する始動ノイズを制御します.",
|
||||
"同じ生成設定で同一の結果を生成するには, 「ランダム」オプションを無効にします."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "生成回数"
|
||||
"heading": "生成回数",
|
||||
"paragraphs": [
|
||||
"生成する画像の数。",
|
||||
"動的プロンプトが有効になっている場合、各プロンプトはこの回数生成されます。"
|
||||
]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
@@ -1342,16 +1391,29 @@
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "幅"
|
||||
"heading": "幅",
|
||||
"paragraphs": [
|
||||
"生成される画像の幅。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA"
|
||||
"heading": "LoRA",
|
||||
"paragraphs": [
|
||||
"ベースモデルと組み合わせて使用する軽量モデル."
|
||||
]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "重み"
|
||||
"heading": "重み",
|
||||
"paragraphs": [
|
||||
"LoRA の重み. 重みを大きくすると, 最終的な画像への影響が大きくなります."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale"
|
||||
"heading": "Downscale",
|
||||
"paragraphs": [
|
||||
"埋め込む前にどの程度のダウンスケーリングが行われるか。",
|
||||
"ダウンスケーリングを大きくするとパフォーマンスは向上しますが、品質は低下します。"
|
||||
]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "重み",
|
||||
@@ -1437,7 +1499,8 @@
|
||||
"heading": "ダイナミックプロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトは,単一のプロンプトを複数のプロンプトに解析します.",
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます."
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます.",
|
||||
"1 つのプロンプト内で構文を何度でも使用できますが, 生成されるプロンプトの数を Max Prompts 設定で制限するようにしてください."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1457,6 +1520,159 @@
|
||||
"paragraphs": [
|
||||
"プロンプトまたは コントロールネットのいずれかを重視します."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"paragraphs": [
|
||||
"CPU または GPU でノイズを生成するかどうかを制御します.",
|
||||
"CPU ノイズを有効にすると, 特定のシードによってどのマシンでも同じ画像が生成されます.",
|
||||
"CPU ノイズを有効にしてもパフォーマンスに影響はありません."
|
||||
],
|
||||
"heading": "CPUノイズを使用する"
|
||||
},
|
||||
"dynamicPromptsMaxPrompts": {
|
||||
"heading": "最大プロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトによって生成できるプロンプトの数を制限します."
|
||||
]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
"paragraphs": [
|
||||
"プロンプトを生成するときにシードがどのように使用されるかを制御します.",
|
||||
"反復ごとに固有のシードを使用します. 単一のシードでプロンプトのバリエーションを試す場合に使用します.",
|
||||
"たとえば, プロンプトが 5 つある場合, 各画像は同じシードを使用します.",
|
||||
"「画像ごと」では, 画像ごとに固有のシード値が使用されます. これにより、より多くのバリエーションが得られます."
|
||||
],
|
||||
"heading": "シード行動"
|
||||
},
|
||||
"imageFit": {
|
||||
"paragraphs": [
|
||||
"初期画像の幅と高さを出力画像に合わせてサイズ変更します. 有効にすることをお勧めします."
|
||||
],
|
||||
"heading": "初期画像を出力サイズに合わせる"
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "充填方法",
|
||||
"paragraphs": [
|
||||
"アウトペインティングまたはインペインティングのプロセス中に埋め込む方法."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"ガイダンス値が高すぎると過飽和状態になる可能性があり、ガイダンス値が高すぎるか低すぎると生成結果に歪みが生じる可能性があります。ガイダンスはFLUX DEVモデルにのみ適用されます。"
|
||||
],
|
||||
"heading": "ガイダンス"
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"生成されたイメージがラスター レイヤーとどの程度異なるかを制御します。",
|
||||
"強度が低いほど、結合された表示ラスターレイヤーに近くなります。強度が高いほど、グローバルプロンプトに大きく依存します。",
|
||||
"表示されるコンテンツを持つラスター レイヤーがない場合、この設定は無視されます。"
|
||||
],
|
||||
"heading": "ディノイジングストレングス"
|
||||
},
|
||||
"refinerStart": {
|
||||
"heading": "リファイナースタート",
|
||||
"paragraphs": [
|
||||
"生成プロセスのどの時点でリファイナーが使用され始めるか。",
|
||||
"0 はリファイナーが生成プロセス全体で使用されることを意味し、0.8 は、リファイナーが生成プロセスの最後の 20% で使用されることを意味します。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "イメージtoイメージの最適化",
|
||||
"paragraphs": [
|
||||
"「イメージtoイメージを最適化」を有効にすると、Fluxモデルを用いた画像間変換およびインペインティング変換において、より段階的なノイズ除去強度スケールが適用されます。この設定により、画像に適用される変化量を制御する能力が向上しますが、標準のノイズ除去強度スケールを使用したい場合はオフにすることができます。この設定は現在調整中で、ベータ版です。"
|
||||
]
|
||||
},
|
||||
"refinerPositiveAestheticScore": {
|
||||
"heading": "ポジティブ美的スコア",
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美的スコアの高い画像に類似するように生成を重み付けします。"
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"CFG スケールの値が高すぎると、飽和しすぎて生成結果が歪む可能性があります。 "
|
||||
],
|
||||
"heading": "CFGスケール"
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"paragraphs": [
|
||||
"VAE エンコードおよびデコード時に使用される精度。",
|
||||
"Fp16/Half 精度は、画像のわずかな変化を犠牲にして、より効率的です。"
|
||||
],
|
||||
"heading": "VAE精度"
|
||||
},
|
||||
"refinerModel": {
|
||||
"heading": "リファイナーモデル",
|
||||
"paragraphs": [
|
||||
"生成プロセスの精製部分で使用されるモデル。",
|
||||
"世代モデルに似ています。"
|
||||
]
|
||||
},
|
||||
"refinerCfgScale": {
|
||||
"heading": "CFGスケール",
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスに与える影響を制御する。",
|
||||
"生成CFG スケールに似ています。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingYAxis": {
|
||||
"heading": "シームレスタイリングY軸",
|
||||
"paragraphs": [
|
||||
"画像を垂直軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "プロセス前のスケール値",
|
||||
"paragraphs": [
|
||||
"「自動」は、画像生成プロセスの前に、選択した領域をモデルに最適なサイズに拡大縮小します。",
|
||||
"「手動」では、画像生成プロセスの前に、選択した領域を拡大縮小する幅と高さを選択できます。"
|
||||
]
|
||||
},
|
||||
"creativity": {
|
||||
"heading": "クリエイティビティ",
|
||||
"paragraphs": [
|
||||
"クリエイティビティは、ディテールを追加する際のモデルに与えられる自由度を制御します。クリエイティビティが低いと元のイメージに近いままになり、クリエイティビティが高いとより多くの変化を加えることができます。プロンプトを使用する場合、クリエイティビティが高いとプロンプトの影響が増します。"
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "高解像度修正を有効にする",
|
||||
"paragraphs": [
|
||||
"モデルに最適な解像度よりも高い解像度で、高品質な画像を生成します。通常、生成された画像内の重複を防ぐために使用されます。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingXAxis": {
|
||||
"heading": "シームレスタイリングX軸",
|
||||
"paragraphs": [
|
||||
"画像を水平軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"paragraphs": [
|
||||
"ゼロ端末 SNR (ztsnr) を使用してトレーニングされたモデルに使用される、CFG ガイダンスのリスケールマルチプライヤー。",
|
||||
"これらのモデルの場合、推奨値は 0.7 です。"
|
||||
],
|
||||
"heading": "CFG リスケールマルチプライヤー"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "ストラクチャ",
|
||||
"paragraphs": [
|
||||
"ストラクチャは、出力画像が元のレイアウトにどれだけ忠実に従うかを制御します。低いストラクチャでは大幅な変更が可能ですが、高いストラクチャでは元の構成とレイアウトが厳密に維持されます。"
|
||||
]
|
||||
},
|
||||
"refinerNegativeAestheticScore": {
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美観スコアが低い画像に類似するように生成に重み付けします。"
|
||||
],
|
||||
"heading": "ネガティブ美的スコア"
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商用ライセンス",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev]モデルは、FLUX [dev]非商用ライセンスに基づいてライセンスされています。Invokeでこのモデルタイプを商用目的で使用する場合は、当社のウェブサイトをご覧ください。"
|
||||
]
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -1629,7 +1845,106 @@
|
||||
"workflows": "ワークフロー",
|
||||
"ascending": "昇順",
|
||||
"name": "名前",
|
||||
"descending": "降順"
|
||||
"descending": "降順",
|
||||
"searchPlaceholder": "名前、説明、タグで検索",
|
||||
"projectWorkflows": "プロジェクトワークフロー",
|
||||
"searchWorkflows": "ワークフローを検索",
|
||||
"updated": "アップデート",
|
||||
"published": "公表",
|
||||
"builder": {
|
||||
"label": "ラベル",
|
||||
"containerPlaceholder": "空のコンテナ",
|
||||
"showDescription": "説明を表示",
|
||||
"emptyRootPlaceholderEditMode": "開始するには、フォーム要素またはノード フィールドをここにドラッグします。",
|
||||
"divider": "仕切り",
|
||||
"deleteAllElements": "すべてのフォーム要素を削除",
|
||||
"heading": "見出し",
|
||||
"nodeField": "ノードフィールド",
|
||||
"zoomToNode": "ノードにズーム",
|
||||
"dropdown": "ドロップダウン",
|
||||
"resetOptions": "オプションをリセット",
|
||||
"both": "両方",
|
||||
"builder": "フォームビルダー",
|
||||
"text": "テキスト",
|
||||
"row": "行",
|
||||
"multiLine": "マルチライン",
|
||||
"resetAllNodeFields": "すべてのノードフィールドをリセット",
|
||||
"slider": "スライダー",
|
||||
"layout": "レイアウト",
|
||||
"addToForm": "フォームに追加",
|
||||
"headingPlaceholder": "空の見出し",
|
||||
"nodeFieldTooltip": "ノード フィールドを追加するには、ワークフロー エディターのフィールドにある小さなプラス記号ボタンをクリックするか、フィールド名をフォームにドラッグします。",
|
||||
"workflowBuilderAlphaWarning": "ワークフロービルダーは現在アルファ版です。安定版リリースまでに互換性に影響する変更が発生する可能性があります。",
|
||||
"component": "コンポーネント",
|
||||
"textPlaceholder": "空のテキスト",
|
||||
"emptyRootPlaceholderViewMode": "このワークフローのフォームの作成を開始するには、[編集] をクリックします。",
|
||||
"addOption": "オプションを追加",
|
||||
"singleLine": "単線",
|
||||
"numberInput": "数値入力",
|
||||
"column": "列",
|
||||
"container": "コンテナ",
|
||||
"containerRowLayout": "コンテナ(行レイアウト)",
|
||||
"containerColumnLayout": "コンテナ(列レイアウト)",
|
||||
"maximum": "最大",
|
||||
"published": "公開済み",
|
||||
"publishedWorkflowOutputs": "アウトプット",
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
"download": "ダウンロード",
|
||||
"savingWorkflow": "ワークフローを保存しています...",
|
||||
"problemSavingWorkflow": "ワークフローの保存に関する問題",
|
||||
"convertGraph": "グラフを変換",
|
||||
"downloadWorkflow": "ファイルに保存",
|
||||
"saveWorkflow": "ワークフローを保存",
|
||||
"userWorkflows": "ユーザーワークフロー",
|
||||
"yourWorkflows": "あなたのワークフロー",
|
||||
"edit": "編集",
|
||||
"workflowLibrary": "ワークフローライブラリ",
|
||||
"workflowSaved": "ワークフローが保存されました",
|
||||
"clearWorkflowSearchFilter": "ワークフロー検索フィルタをクリア",
|
||||
"workflowCleared": "ワークフローが作成されました",
|
||||
"autoLayout": "オートレイアウト",
|
||||
"view": "ビュー",
|
||||
"saveChanges": "変更を保存",
|
||||
"noDescription": "説明なし",
|
||||
"recommended": "あなたへのおすすめ",
|
||||
"noRecentWorkflows": "最近のワークフローがありません",
|
||||
"problemLoading": "ワークフローのローディングに関する問題",
|
||||
"newWorkflowCreated": "新しいワークフローが作成されました",
|
||||
"noWorkflows": "ワークフローがありません",
|
||||
"copyShareLink": "共有リンクをコピー",
|
||||
"copyShareLinkForWorkflow": "ワークフローの共有リンクをコピー",
|
||||
"workflowThumbnail": "ワークフローサムネイル",
|
||||
"loadWorkflow": "$t(common.load) ワークフロー",
|
||||
"shared": "共有",
|
||||
"openWorkflow": "ワークフローを開く",
|
||||
"emptyStringPlaceholder": "<空の文字列>",
|
||||
"browseWorkflows": "ワークフローを閲覧する",
|
||||
"saveWorkflowAs": "ワークフローとして保存",
|
||||
"private": "プライベート",
|
||||
"deselectAll": "すべて選択解除",
|
||||
"delete": "削除",
|
||||
"openLibrary": "ライブラリを開く",
|
||||
"loadMore": "もっと読み込む",
|
||||
"saveWorkflowToProject": "ワークフローをプロジェクトに保存",
|
||||
"created": "作成されました",
|
||||
"workflowEditorMenu": "ワークフローエディターメニュー",
|
||||
"defaultWorkflows": "デフォルトワークフロー",
|
||||
"allLoaded": "すべてのワークフローが読み込まれました",
|
||||
"filterByTags": "タグでフィルター",
|
||||
"recentlyOpened": "最近開いた",
|
||||
"opened": "オープン",
|
||||
"deleteWorkflow": "ワークフローを削除",
|
||||
"deleteWorkflow2": "このワークフローを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"loadFromGraph": "グラフからワークフローをロード",
|
||||
"workflowName": "ワークフロー名",
|
||||
"loading": "ワークフローをロードしています",
|
||||
"uploadWorkflow": "ファイルからロードする"
|
||||
},
|
||||
"system": {
|
||||
"logNamespaces": {
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"boards": "Bảng",
|
||||
"selectedForAutoAdd": "Đã Chọn Để Tự động thêm",
|
||||
"myBoard": "Bảng Của Tôi",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"changeBoard": "Thay Đổi Bảng",
|
||||
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
|
||||
"updateBoardError": "Lỗi khi cập nhật Bảng",
|
||||
@@ -41,18 +41,21 @@
|
||||
"deleteBoard": "Xoá Bảng",
|
||||
"deleteBoardAndImages": "Xoá Bảng Lẫn Hình ảnh",
|
||||
"deleteBoardOnly": "Chỉ Xoá Bảng",
|
||||
"deletedBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Xoá bảng này lẫn ảnh của nó sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"deletedBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Việc xóa ảnh sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"menuItemAutoAdd": "Tự động thêm cho Bảng này",
|
||||
"move": "Di Chuyển",
|
||||
"topMessage": "Bảng này chứa ảnh được dùng với những tính năng sau:",
|
||||
"topMessage": "Lựa chọn này chứa ảnh được dùng với những tính năng sau:",
|
||||
"uncategorized": "Chưa Sắp Xếp",
|
||||
"archived": "Được Lưu Trữ",
|
||||
"loading": "Đang Tải...",
|
||||
"selectBoard": "Chọn Bảng",
|
||||
"archiveBoard": "Lưu trữ Bảng",
|
||||
"unarchiveBoard": "Ngừng Lưu Trữ Bảng",
|
||||
"assetsWithCount_other": "{{count}} tài nguyên"
|
||||
"assetsWithCount_other": "{{count}} tài nguyên",
|
||||
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
|
||||
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
|
||||
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
|
||||
},
|
||||
"gallery": {
|
||||
"swapImages": "Đổi Hình Ảnh",
|
||||
@@ -2059,7 +2062,7 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
@@ -2108,7 +2111,11 @@
|
||||
"imageInfluence": "Ảnh Chi Phối",
|
||||
"medium": "Vừa",
|
||||
"highest": "Cao Nhất"
|
||||
}
|
||||
},
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2249,7 +2256,8 @@
|
||||
"problemUnpublishingWorkflowDescription": "Có vấn đề khi ngừng đăng tải workflow. Vui lòng thử lại sau.",
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint."
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import { selectUiSlice, textAreaSizesStateChanged } from 'features/ui/store/uiSlice';
|
||||
import { debounce } from 'lodash-es';
|
||||
import { type RefObject, useCallback, useEffect, useMemo } from 'react';
|
||||
|
||||
type Options = {
|
||||
trackWidth: boolean;
|
||||
trackHeight: boolean;
|
||||
initialWidth?: number;
|
||||
initialHeight?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Persists the width and/or height of a text area to redux.
|
||||
* @param id The unique id of this textarea, used as key to storage
|
||||
* @param ref A ref to the textarea element
|
||||
* @param options.trackWidth Whether to track width
|
||||
* @param options.trackHeight Whether to track width
|
||||
* @param options.initialWidth An optional initial width in pixels
|
||||
* @param options.initialHeight An optional initial height in pixels
|
||||
*/
|
||||
export const usePersistedTextAreaSize = (id: string, ref: RefObject<HTMLTextAreaElement>, options: Options) => {
|
||||
const { dispatch, getState } = useAppStore();
|
||||
|
||||
const onResize = useCallback(
|
||||
(size: Partial<Dimensions>) => {
|
||||
dispatch(textAreaSizesStateChanged({ id, size }));
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const debouncedOnResize = useMemo(() => debounce(onResize, 300), [onResize]);
|
||||
|
||||
useEffect(() => {
|
||||
const el = ref.current;
|
||||
if (!el) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Nothing to do here if we are not tracking anything.
|
||||
if (!options.trackHeight && !options.trackWidth) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Before registering the observer, grab the stored size from state - we may need to restore the size.
|
||||
const storedSize = selectUiSlice(getState()).textAreaSizes[id];
|
||||
|
||||
// Prefer to restore the stored size, falling back to initial size if it exists
|
||||
if (storedSize?.width !== undefined) {
|
||||
el.style.width = `${storedSize.width}px`;
|
||||
} else if (options.initialWidth !== undefined) {
|
||||
el.style.width = `${options.initialWidth}px`;
|
||||
}
|
||||
|
||||
if (storedSize?.height !== undefined) {
|
||||
el.style.height = `${storedSize.height}px`;
|
||||
} else if (options.initialHeight !== undefined) {
|
||||
el.style.height = `${options.initialHeight}px`;
|
||||
}
|
||||
|
||||
let currentHeight = el.offsetHeight;
|
||||
let currentWidth = el.offsetWidth;
|
||||
|
||||
const resizeObserver = new ResizeObserver(() => {
|
||||
// We only want to push the changes if a tracked dimension changes
|
||||
let didChange = false;
|
||||
const newSize: Partial<Dimensions> = {};
|
||||
|
||||
if (options.trackHeight) {
|
||||
if (el.offsetHeight !== currentHeight) {
|
||||
didChange = true;
|
||||
currentHeight = el.offsetHeight;
|
||||
}
|
||||
newSize.height = currentHeight;
|
||||
}
|
||||
|
||||
if (options.trackWidth) {
|
||||
if (el.offsetWidth !== currentWidth) {
|
||||
didChange = true;
|
||||
currentWidth = el.offsetWidth;
|
||||
}
|
||||
newSize.width = currentWidth;
|
||||
}
|
||||
|
||||
if (didChange) {
|
||||
debouncedOnResize(newSize);
|
||||
}
|
||||
});
|
||||
|
||||
resizeObserver.observe(el);
|
||||
|
||||
return () => {
|
||||
debouncedOnResize.cancel();
|
||||
resizeObserver.disconnect();
|
||||
};
|
||||
}, [
|
||||
debouncedOnResize,
|
||||
dispatch,
|
||||
getState,
|
||||
id,
|
||||
options.initialHeight,
|
||||
options.initialWidth,
|
||||
options.trackHeight,
|
||||
options.trackWidth,
|
||||
ref,
|
||||
]);
|
||||
};
|
||||
@@ -0,0 +1,28 @@
|
||||
import { Spinner } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useAllEntityAdapters } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { computed } from 'nanostores';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
export const CanvasBusySpinner = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const allEntityAdapters = useAllEntityAdapters();
|
||||
const $isPendingRectCalculation = useMemo(
|
||||
() =>
|
||||
computed(
|
||||
allEntityAdapters.map(({ transformer }) => transformer.$isPendingRectCalculation),
|
||||
(...values) => values.some((v) => v)
|
||||
),
|
||||
[allEntityAdapters]
|
||||
);
|
||||
const isPendingRectCalculation = useStore($isPendingRectCalculation);
|
||||
const isRasterizing = useStore(canvasManager.stateApi.$isRasterizing);
|
||||
const isCompositing = useStore(canvasManager.compositor.$isBusy);
|
||||
|
||||
if (isRasterizing || isCompositing || isPendingRectCalculation) {
|
||||
return <Spinner opacity={0.3} />;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
CanvasBusySpinner.displayName = 'CanvasBusySpinner';
|
||||
@@ -12,6 +12,7 @@ import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
|
||||
import { CanvasAlertsPreserveMask } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsPreserveMask';
|
||||
import { CanvasAlertsSelectedEntityStatus } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSelectedEntityStatus';
|
||||
import { CanvasAlertsSendingToGallery } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
|
||||
import { CanvasBusySpinner } from 'features/controlLayers/components/CanvasBusySpinner';
|
||||
import { CanvasContextMenuGlobalMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuGlobalMenuItems';
|
||||
import { CanvasContextMenuSelectedEntityMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuSelectedEntityMenuItems';
|
||||
import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea';
|
||||
@@ -106,6 +107,9 @@ export const CanvasMainPanelContent = memo(() => {
|
||||
<MenuContent />
|
||||
</Menu>
|
||||
</Flex>
|
||||
<Flex position="absolute" bottom={4} insetInlineEnd={4}>
|
||||
<CanvasBusySpinner />
|
||||
</Flex>
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
)}
|
||||
|
||||
@@ -2,10 +2,11 @@ import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoLayer } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { replaceCanvasEntityObjectsWithImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@@ -23,27 +24,27 @@ export const ControlLayerSettingsEmptyState = memo(() => {
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const pullBboxIntoLayer = usePullBboxIntoLayer(entityIdentifier);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
PullBboxButton: (
|
||||
<Button onClick={pullBboxIntoLayer} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, pullBboxIntoLayer, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans
|
||||
i18nKey="controlLayers.controlLayerEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
<Trans i18nKey="controlLayers.controlLayerEmptyState" components={components} />
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
</Flex>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { InpaintMaskSettings } from 'features/controlLayers/components/InpaintMask/InpaintMaskSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { InpaintMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
@@ -28,6 +29,7 @@ export const InpaintMask = memo(({ id }: Props) => {
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<InpaintMaskSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</InpaintMaskAdapterGate>
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
// import { Button, Flex } from '@invoke-ai/ui-library';
|
||||
// import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
// import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
// import { useTranslation } from 'react-i18next';
|
||||
// import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
// Removed buttons because denosie limit is not helpful for many architectures
|
||||
// Users can access with right click menu instead.
|
||||
// If buttons for noise or new features are deemed important in the future, add them back here.
|
||||
export const InpaintMaskAddButtons = () => {
|
||||
// Buttons are temporarily hidden. To restore, uncomment the code below.
|
||||
return null;
|
||||
// const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
// const { t } = useTranslation();
|
||||
// const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
|
||||
// const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
|
||||
// return (
|
||||
// <Flex w="full" p={2} justifyContent="center">
|
||||
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskDenoiseLimit}>
|
||||
// {t('controlLayers.denoiseLimit')}
|
||||
// </Button>
|
||||
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskNoise}>
|
||||
// {t('controlLayers.imageNoise')}
|
||||
// </Button>
|
||||
// </Flex>
|
||||
// );
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
import type { IconButtonProps } from '@invoke-ai/ui-library';
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
|
||||
type Props = Omit<IconButtonProps, 'aria-label'> & {
|
||||
onDelete: () => void;
|
||||
};
|
||||
|
||||
export const InpaintMaskDeleteModifierButton = memo(({ onDelete, ...rest }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<IconButton
|
||||
tooltip={t('common.delete')}
|
||||
variant="link"
|
||||
aria-label={t('common.delete')}
|
||||
icon={<PiXBold />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskDeleteModifierButton.displayName = 'InpaintMaskDeleteNoiseButton';
|
||||
@@ -0,0 +1,70 @@
|
||||
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import {
|
||||
inpaintMaskDenoiseLimitChanged,
|
||||
inpaintMaskDenoiseLimitDeleted,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskDenoiseLimitSlider = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selectDenoiseLimit = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskDenoiseLimitSlider').denoiseLimit
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const denoiseLimit = useAppSelector(selectDenoiseLimit);
|
||||
|
||||
const handleDenoiseLimitChange = useCallback(
|
||||
(value: number) => {
|
||||
dispatch(inpaintMaskDenoiseLimitChanged({ entityIdentifier, denoiseLimit: value }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
const onDeleteDenoiseLimit = useCallback(() => {
|
||||
dispatch(inpaintMaskDenoiseLimitDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (denoiseLimit === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Text fontSize="sm">{t('controlLayers.denoiseLimit')}</Text>
|
||||
<Flex alignItems="center" gap={1}>
|
||||
<Text fontSize="sm">{denoiseLimit.toFixed(2)}</Text>
|
||||
<InpaintMaskDeleteModifierButton onDelete={onDeleteDenoiseLimit} />
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Slider
|
||||
aria-label={t('controlLayers.denoiseLimit')}
|
||||
value={denoiseLimit}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
onChange={handleDenoiseLimitChange}
|
||||
>
|
||||
<SliderTrack>
|
||||
<SliderFilledTrack />
|
||||
</SliderTrack>
|
||||
<SliderThumb />
|
||||
</Slider>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskDenoiseLimitSlider.displayName = 'InpaintMaskDenoiseLimitSlider';
|
||||
@@ -7,6 +7,7 @@ import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/component
|
||||
import { CanvasEntityMenuItemsMergeDown } from 'features/controlLayers/components/common/CanvasEntityMenuItemsMergeDown';
|
||||
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { InpaintMaskMenuItemsAddModifiers } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsAddModifiers';
|
||||
import { InpaintMaskMenuItemsConvertToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsConvertToSubMenu';
|
||||
import { InpaintMaskMenuItemsCopyToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsCopyToSubMenu';
|
||||
import { memo } from 'react';
|
||||
@@ -20,6 +21,8 @@ export const InpaintMaskMenuItems = memo(() => {
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<InpaintMaskMenuItemsAddModifiers />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsMergeDown />
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskMenuItemsAddModifiers = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
|
||||
const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem onClick={addInpaintMaskNoise} isDisabled={isBusy}>
|
||||
{t('controlLayers.addImageNoise')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={addInpaintMaskDenoiseLimit} isDisabled={isBusy}>
|
||||
{t('controlLayers.addDenoiseLimit')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskMenuItemsAddModifiers.displayName = 'InpaintMaskMenuItemsAddNoise';
|
||||
@@ -0,0 +1,67 @@
|
||||
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { inpaintMaskNoiseChanged, inpaintMaskNoiseDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskNoiseSlider = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const selectNoiseLevel = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskNoiseSlider').noiseLevel
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const noiseLevel = useAppSelector(selectNoiseLevel);
|
||||
|
||||
const handleNoiseChange = useCallback(
|
||||
(value: number) => {
|
||||
dispatch(inpaintMaskNoiseChanged({ entityIdentifier, noiseLevel: value }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
const onDeleteNoise = useCallback(() => {
|
||||
dispatch(inpaintMaskNoiseDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (noiseLevel === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
|
||||
<Flex justifyContent="space-between" w="full" alignItems="center">
|
||||
<Text fontSize="sm">{t('controlLayers.imageNoise')}</Text>
|
||||
<Flex alignItems="center" gap={1}>
|
||||
<Text fontSize="sm">{Math.round(noiseLevel * 100)}%</Text>
|
||||
<InpaintMaskDeleteModifierButton onDelete={onDeleteNoise} />
|
||||
</Flex>
|
||||
</Flex>
|
||||
<Slider
|
||||
aria-label={t('controlLayers.imageNoise')}
|
||||
value={noiseLevel}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
onChange={handleNoiseChange}
|
||||
>
|
||||
<SliderTrack>
|
||||
<SliderFilledTrack />
|
||||
</SliderTrack>
|
||||
<SliderThumb />
|
||||
</Slider>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskNoiseSlider.displayName = 'InpaintMaskNoiseSlider';
|
||||
@@ -0,0 +1,47 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { InpaintMaskDenoiseLimitSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskDenoiseLimitSlider';
|
||||
import { InpaintMaskNoiseSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskNoiseSlider';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
const buildSelectHasDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
|
||||
return entity.denoiseLimit !== undefined;
|
||||
});
|
||||
|
||||
const buildSelectHasNoiseLevel = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
|
||||
return entity.noiseLevel !== undefined;
|
||||
});
|
||||
|
||||
export const InpaintMaskSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
|
||||
const selectHasDenoiseLimit = useMemo(() => buildSelectHasDenoiseLimit(entityIdentifier), [entityIdentifier]);
|
||||
const selectHasNoiseLevel = useMemo(() => buildSelectHasNoiseLevel(entityIdentifier), [entityIdentifier]);
|
||||
|
||||
const hasDenoiseLimit = useAppSelector(selectHasDenoiseLimit);
|
||||
const hasNoiseLevel = useAppSelector(selectHasNoiseLevel);
|
||||
|
||||
if (!hasNoiseLevel && !hasDenoiseLimit) {
|
||||
// If we show the <InpaintMaskAddButtons /> below, we can remove this check.
|
||||
// Until then, if there are no sliders to show for the mask settings, return null. This prevents rendering an
|
||||
// empty settings wrapper div, which adds unnecessary space in the UI.
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<CanvasEntitySettingsWrapper>
|
||||
{/* {!hasNoiseLevel && !hasDenoiseLimit && <InpaintMaskAddButtons />} */}
|
||||
{hasNoiseLevel && <InpaintMaskNoiseSlider />}
|
||||
{hasDenoiseLimit && <InpaintMaskDenoiseLimitSlider />}
|
||||
</CanvasEntitySettingsWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskSettings.displayName = 'InpaintMaskSettings';
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
$shift,
|
||||
CompositeSlider,
|
||||
@@ -16,7 +17,6 @@ import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { snapToNearest } from 'features/controlLayers/konva/util';
|
||||
import { round } from 'lodash-es';
|
||||
import { computed } from 'nanostores';
|
||||
import type { KeyboardEvent } from 'react';
|
||||
import { memo, useCallback, useEffect, useState } from 'react';
|
||||
import { PiCaretDownBold, PiMagnifyingGlassMinusBold, PiMagnifyingGlassPlusBold } from 'react-icons/pi';
|
||||
@@ -68,9 +68,16 @@ const sliderDefaultValue = mapRawValueToSliderValue(100);
|
||||
|
||||
const snapCandidates = marks.slice(1, marks.length - 1);
|
||||
|
||||
const inputFieldSx = {
|
||||
paddingInlineEnd: 7,
|
||||
_focusVisible: {
|
||||
zIndex: 0,
|
||||
},
|
||||
} satisfies SystemStyleObject;
|
||||
|
||||
export const CanvasToolbarScale = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const [localScale, setLocalScale] = useState(scale * 100);
|
||||
|
||||
const onChangeSlider = useCallback(
|
||||
@@ -115,7 +122,7 @@ export const CanvasToolbarScale = memo(() => {
|
||||
return (
|
||||
<Flex alignItems="center">
|
||||
<ZoomOutButton />
|
||||
<Popover>
|
||||
<Popover isLazy lazyBehavior="unmount">
|
||||
<PopoverAnchor>
|
||||
<NumberInput
|
||||
variant="outline"
|
||||
@@ -132,7 +139,7 @@ export const CanvasToolbarScale = memo(() => {
|
||||
onKeyDown={onKeyDown}
|
||||
clampValueOnBlur={false}
|
||||
>
|
||||
<NumberInputField paddingInlineEnd={7} title="" _focusVisible={{ zIndex: 0 }} />
|
||||
<NumberInputField title="" sx={inputFieldSx} />
|
||||
<PopoverTrigger>
|
||||
<IconButton
|
||||
aria-label="open-slider"
|
||||
@@ -171,16 +178,17 @@ CanvasToolbarScale.displayName = 'CanvasToolbarScale';
|
||||
|
||||
const SCALE_SNAPS = [0.1, 0.15, 0.2, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 5, 7.5, 10, 15, 20];
|
||||
|
||||
const ZoomOutButton = () => {
|
||||
const ZoomOutButton = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const onClick = useCallback(() => {
|
||||
const scale = canvasManager.stage.$scale.get();
|
||||
const nextScale =
|
||||
SCALE_SNAPS.slice()
|
||||
.reverse()
|
||||
.find((snap) => snap < scale) ?? canvasManager.stage.config.MIN_SCALE;
|
||||
canvasManager.stage.setScale(Math.max(nextScale, canvasManager.stage.config.MIN_SCALE));
|
||||
}, [canvasManager.stage, scale]);
|
||||
}, [canvasManager.stage]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -192,15 +200,17 @@ const ZoomOutButton = () => {
|
||||
isDisabled={scale <= canvasManager.stage.config.MIN_SCALE}
|
||||
/>
|
||||
);
|
||||
};
|
||||
});
|
||||
ZoomOutButton.displayName = 'ZoomOutButton';
|
||||
|
||||
const ZoomInButton = () => {
|
||||
const ZoomInButton = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
|
||||
const scale = useStore(canvasManager.stage.$scale);
|
||||
const onClick = useCallback(() => {
|
||||
const scale = canvasManager.stage.$scale.get();
|
||||
const nextScale = SCALE_SNAPS.find((snap) => snap > scale) ?? canvasManager.stage.config.MAX_SCALE;
|
||||
canvasManager.stage.setScale(Math.min(nextScale, canvasManager.stage.config.MAX_SCALE));
|
||||
}, [canvasManager.stage, scale]);
|
||||
}, [canvasManager.stage]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -212,4 +222,5 @@ const ZoomInButton = () => {
|
||||
isDisabled={scale >= canvasManager.stage.config.MAX_SCALE}
|
||||
/>
|
||||
);
|
||||
};
|
||||
});
|
||||
ZoomInButton.displayName = 'ZoomInButton';
|
||||
|
||||
@@ -168,3 +168,33 @@ export const useEntityAdapter = (
|
||||
assert(adapter, 'useEntityAdapter must be used within a EntityAdapterContext');
|
||||
return adapter;
|
||||
};
|
||||
|
||||
export const useAllEntityAdapters = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const regionalGuidanceAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.regionMasks.subscribe,
|
||||
canvasManager.adapters.regionMasks.getSnapshot
|
||||
);
|
||||
const rasterLayerAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.rasterLayers.subscribe,
|
||||
canvasManager.adapters.rasterLayers.getSnapshot
|
||||
);
|
||||
const controlLayerAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.controlLayers.subscribe,
|
||||
canvasManager.adapters.controlLayers.getSnapshot
|
||||
);
|
||||
const inpaintMaskAdapters = useSyncExternalStore(
|
||||
canvasManager.adapters.inpaintMasks.subscribe,
|
||||
canvasManager.adapters.inpaintMasks.getSnapshot
|
||||
);
|
||||
const allEntityAdapters = useMemo(() => {
|
||||
return [
|
||||
...Array.from(rasterLayerAdapters.values()),
|
||||
...Array.from(controlLayerAdapters.values()),
|
||||
...Array.from(inpaintMaskAdapters.values()),
|
||||
...Array.from(regionalGuidanceAdapters.values()),
|
||||
];
|
||||
}, [controlLayerAdapters, inpaintMaskAdapters, rasterLayerAdapters, regionalGuidanceAdapters]);
|
||||
|
||||
return allEntityAdapters;
|
||||
};
|
||||
|
||||
@@ -6,6 +6,8 @@ import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskDenoiseLimitAdded,
|
||||
inpaintMaskNoiseAdded,
|
||||
rasterLayerAdded,
|
||||
referenceImageAdded,
|
||||
rgAdded,
|
||||
@@ -222,6 +224,24 @@ export const useAddRegionalGuidanceNegativePrompt = (entityIdentifier: CanvasEnt
|
||||
return runc;
|
||||
};
|
||||
|
||||
export const useAddInpaintMaskNoise = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskNoiseAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddInpaintMaskDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskDenoiseLimitAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const buildSelectValidRegionalGuidanceActions = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>
|
||||
) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { withResult, withResultAsync } from 'common/util/result';
|
||||
import { CanvasCacheModule } from 'features/controlLayers/konva/CanvasCacheModule';
|
||||
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
|
||||
import type { CanvasEntityAdapter, CanvasEntityAdapterFromType } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
@@ -426,6 +427,145 @@ export class CanvasCompositorModule extends CanvasModuleBase {
|
||||
return this.mergeByEntityIdentifiers(entityIdentifiers, false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates and uploads a grayscale representation of the inpaint mask image noise or denoise limit values.
|
||||
* This produces an image with a white background where the mask is represented by dark values.
|
||||
*
|
||||
* @param adapters The adapters for the canvas entities to composite
|
||||
* @param rect The region to include in the rasterized image
|
||||
* @param attribute The attribute to use for grayscale values (defaults to 'noiseLevel')
|
||||
* @param uploadOptions Options for uploading the image
|
||||
* @param forceUpload If true, the image is always re-uploaded, returning a new image DTO
|
||||
* @returns A promise that resolves to the image DTO
|
||||
*/
|
||||
getGrayscaleMaskCompositeImageDTO = async (
|
||||
adapters: CanvasEntityAdapterInpaintMask[],
|
||||
rect: Rect,
|
||||
attribute: 'noiseLevel' | 'denoiseLimit' = 'noiseLevel',
|
||||
invertMask: boolean = false,
|
||||
uploadOptions: SetOptional<Omit<UploadImageArg, 'file'>, 'image_category'> = { is_intermediate: true },
|
||||
forceUpload?: boolean
|
||||
): Promise<ImageDTO> => {
|
||||
assert(rect.width > 0 && rect.height > 0, 'Unable to rasterize empty rect');
|
||||
// Use a unique hash that includes the attribute name for caching
|
||||
const hash = this.getCompositeHash(adapters, { rect, attribute, invertMask, grayscale: true });
|
||||
const cachedImageName = forceUpload ? undefined : this.manager.cache.imageNameCache.get(hash);
|
||||
|
||||
let imageDTO: ImageDTO | null = null;
|
||||
|
||||
if (cachedImageName) {
|
||||
imageDTO = await getImageDTOSafe(cachedImageName);
|
||||
if (imageDTO) {
|
||||
this.log.debug({ rect, imageName: cachedImageName, imageDTO }, 'Using cached grayscale composite image');
|
||||
return imageDTO;
|
||||
}
|
||||
this.log.warn({ rect, imageName: cachedImageName }, 'Cached grayscale image name not found, recompositing');
|
||||
}
|
||||
|
||||
// Create a white background canvas
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = rect.width;
|
||||
canvas.height = rect.height;
|
||||
|
||||
const ctx = canvas.getContext('2d');
|
||||
assert(ctx !== null, 'Canvas 2D context is null');
|
||||
|
||||
// Fill with white first (creates white background)
|
||||
ctx.fillStyle = 'white';
|
||||
ctx.fillRect(0, 0, rect.width, rect.height);
|
||||
|
||||
// Apply special compositing mode
|
||||
ctx.globalCompositeOperation = 'darken';
|
||||
|
||||
// Draw each adapter's content
|
||||
for (const adapter of adapters) {
|
||||
this.log.debug({ entityIdentifier: adapter.entityIdentifier }, 'Drawing entity to grayscale composite canvas');
|
||||
|
||||
// Get the canvas from the adapter
|
||||
const adapterCanvas = adapter.getCanvas(rect);
|
||||
|
||||
// Create a temporary canvas for grayscale conversion
|
||||
const tempCanvas = document.createElement('canvas');
|
||||
tempCanvas.width = adapterCanvas.width;
|
||||
tempCanvas.height = adapterCanvas.height;
|
||||
|
||||
const tempCtx = tempCanvas.getContext('2d');
|
||||
assert(tempCtx !== null, 'Temp canvas 2D context is null');
|
||||
|
||||
// Draw the original adapter canvas to the temp canvas
|
||||
tempCtx.drawImage(adapterCanvas, 0, 0);
|
||||
|
||||
// Get the image data for processing
|
||||
const imageData = tempCtx.getImageData(0, 0, tempCanvas.width, tempCanvas.height);
|
||||
const data = imageData.data;
|
||||
|
||||
const attributeValue = typeof adapter.state[attribute] === 'number' ? (adapter.state[attribute] as number) : 1.0; // Default to full strength if attribute is undefined
|
||||
|
||||
// Process all pixels in the image data
|
||||
for (let i = 0; i < data.length; i += 4) {
|
||||
// Make sure we're accessing valid array indices
|
||||
if (i + 3 < data.length) {
|
||||
// input has transparency
|
||||
// Calculate grayscale value: white (255) for no mask, darker for stronger mask
|
||||
let grayValue = 255; // Default to white for unmasked areas
|
||||
if (invertMask ? (data[i + 3] ?? 0) < 128 : (data[i + 3] ?? 0) > 127) {
|
||||
grayValue = Math.max(0, Math.min(255, 255 - Math.round(255 * attributeValue)));
|
||||
}
|
||||
|
||||
data[i] = grayValue; // R
|
||||
data[i + 1] = grayValue; // G
|
||||
data[i + 2] = grayValue; // B
|
||||
data[i + 3] = 255; // A (output is fully opaque)
|
||||
}
|
||||
}
|
||||
|
||||
imageData.data.set(data); // Update the image data with the processed values
|
||||
|
||||
// Put the processed image data back to the temp canvas
|
||||
tempCtx.putImageData(imageData, 0, 0);
|
||||
|
||||
// Draw the temp canvas to the main canvas
|
||||
ctx.drawImage(tempCanvas, 0, 0);
|
||||
}
|
||||
|
||||
// Convert to blob and upload
|
||||
this.$isProcessing.set(true);
|
||||
const blobResult = await withResultAsync(() => canvasToBlob(canvas));
|
||||
this.$isProcessing.set(false);
|
||||
|
||||
if (blobResult.isErr()) {
|
||||
this.log.error(
|
||||
{ error: serializeError(blobResult.error) },
|
||||
'Failed to convert grayscale composite canvas to blob'
|
||||
);
|
||||
throw blobResult.error;
|
||||
}
|
||||
|
||||
const blob = blobResult.value;
|
||||
|
||||
if (this.manager._isDebugging) {
|
||||
previewBlob(blob, 'Grayscale Composite');
|
||||
}
|
||||
|
||||
this.$isUploading.set(true);
|
||||
const uploadResult = await withResultAsync(() =>
|
||||
uploadImage({
|
||||
file: new File([blob], 'canvas-grayscale-composite.png', { type: 'image/png' }),
|
||||
image_category: 'general',
|
||||
...uploadOptions,
|
||||
})
|
||||
);
|
||||
this.$isUploading.set(false);
|
||||
|
||||
if (uploadResult.isErr()) {
|
||||
throw uploadResult.error;
|
||||
}
|
||||
|
||||
imageDTO = uploadResult.value;
|
||||
this.manager.cache.imageNameCache.set(hash, imageDTO.image_name);
|
||||
return imageDTO;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculates the transparency of the composite of the give adapters.
|
||||
* @param adapters The adapters to composite
|
||||
|
||||
@@ -24,12 +24,13 @@ import {
|
||||
selectCanvasSlice,
|
||||
selectEntity,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
type CanvasEntityIdentifier,
|
||||
type CanvasRenderableEntityState,
|
||||
isRasterLayerEntityIdentifier,
|
||||
type Rect,
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRenderableEntityState,
|
||||
LifecycleCallback,
|
||||
Rect,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { isRasterLayerEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import Konva from 'konva';
|
||||
import { atom } from 'nanostores';
|
||||
@@ -40,11 +41,6 @@ import stableHash from 'stable-hash';
|
||||
import { assert } from 'tsafe';
|
||||
import type { Jsonifiable, JsonObject } from 'type-fest';
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`. We'll need to do a
|
||||
// type assertion below in the `onInit` method, which calls these callbacks.
|
||||
type InitCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
export abstract class CanvasEntityAdapterBase<
|
||||
T extends CanvasRenderableEntityState,
|
||||
U extends string,
|
||||
@@ -118,7 +114,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
/**
|
||||
* Callbacks that are executed when the module is initialized.
|
||||
*/
|
||||
private static initCallbacks = new Set<InitCallback>();
|
||||
private static initCallbacks = new Set<LifecycleCallback>();
|
||||
|
||||
/**
|
||||
* Register a callback to be run when an entity adapter is initialized.
|
||||
@@ -165,7 +161,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
* return false;
|
||||
* });
|
||||
*/
|
||||
static registerInitCallback = (callback: InitCallback) => {
|
||||
static registerInitCallback = (callback: LifecycleCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
|
||||
@@ -13,7 +13,7 @@ import {
|
||||
roundRect,
|
||||
} from 'features/controlLayers/konva/util';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import type { Coordinate, LifecycleCallback, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import Konva from 'konva';
|
||||
import type { GroupConfig } from 'konva/lib/Group';
|
||||
@@ -123,7 +123,7 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
/**
|
||||
* Whether the transformer is currently calculating the rect of the parent.
|
||||
*/
|
||||
$isPendingRectCalculation = atom<boolean>(true);
|
||||
$isPendingRectCalculation = atom<boolean>(false);
|
||||
|
||||
/**
|
||||
* A set of subscriptions that should be cleaned up when the transformer is destroyed.
|
||||
@@ -177,6 +177,11 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
*/
|
||||
transformMutex = new Mutex();
|
||||
|
||||
/**
|
||||
* Callbacks that are executed when the bbox is updated.
|
||||
*/
|
||||
private static bboxUpdatedCallbacks = new Set<LifecycleCallback>();
|
||||
|
||||
konva: {
|
||||
transformer: Konva.Transformer;
|
||||
proxyRect: Konva.Rect;
|
||||
@@ -908,6 +913,8 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.parent.renderer.konva.objectGroup.setAttrs(groupAttrs);
|
||||
this.parent.bufferRenderer.konva.group.setAttrs(groupAttrs);
|
||||
}
|
||||
|
||||
CanvasEntityTransformer.runBboxUpdatedCallbacks(this.parent);
|
||||
};
|
||||
|
||||
calculateRect = debounce(() => {
|
||||
@@ -1026,6 +1033,23 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.konva.outlineRect.visible(false);
|
||||
};
|
||||
|
||||
static registerBboxUpdatedCallback = (callback: LifecycleCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
this.bboxUpdatedCallbacks.delete(wrapped);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
this.bboxUpdatedCallbacks.add(wrapped);
|
||||
};
|
||||
|
||||
private static runBboxUpdatedCallbacks = (adapter: CanvasEntityAdapter) => {
|
||||
for (const callback of this.bboxUpdatedCallbacks) {
|
||||
callback(adapter);
|
||||
}
|
||||
};
|
||||
|
||||
repr = () => {
|
||||
return {
|
||||
id: this.id,
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import type { Property } from 'csstype';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import { getKonvaNodeDebugAttrs, getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { getKonvaNodeDebugAttrs, getPrefixedId, getRectUnion } from 'features/controlLayers/konva/util';
|
||||
import type { Coordinate, Dimensions, Rect, StageAttrs } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
import { clamp } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { atom, computed } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
|
||||
type CanvasStageModuleConfig = {
|
||||
@@ -26,6 +26,14 @@ type CanvasStageModuleConfig = {
|
||||
* The padding in pixels to use when fitting the layers to the stage.
|
||||
*/
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: number;
|
||||
/**
|
||||
* The snap points for the scale of the canvas.
|
||||
*/
|
||||
SCALE_SNAP_POINTS: number[];
|
||||
/**
|
||||
* The tolerance for snapping the scale of the canvas, as a fraction of the scale.
|
||||
*/
|
||||
SCALE_SNAP_TOLERANCE: number;
|
||||
};
|
||||
|
||||
const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
@@ -33,6 +41,8 @@ const DEFAULT_CONFIG: CanvasStageModuleConfig = {
|
||||
MAX_SCALE: 20,
|
||||
SCALE_FACTOR: 0.999,
|
||||
FIT_LAYERS_TO_STAGE_PADDING_PX: 48,
|
||||
SCALE_SNAP_POINTS: [0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5],
|
||||
SCALE_SNAP_TOLERANCE: 0.05,
|
||||
};
|
||||
|
||||
export class CanvasStageModule extends CanvasModuleBase {
|
||||
@@ -43,6 +53,11 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
readonly manager: CanvasManager;
|
||||
readonly log: Logger;
|
||||
|
||||
// State for scale snapping logic
|
||||
private _intendedScale: number = 1;
|
||||
private _activeSnapPoint: number | null = null;
|
||||
private _snapTimeout: number | null = null;
|
||||
|
||||
container: HTMLDivElement;
|
||||
konva: { stage: Konva.Stage };
|
||||
|
||||
@@ -55,6 +70,7 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
height: 0,
|
||||
scale: 0,
|
||||
});
|
||||
$scale = computed(this.$stageAttrs, (attrs) => attrs.scale);
|
||||
|
||||
subscriptions = new Set<() => void>();
|
||||
resizeObserver: ResizeObserver | null = null;
|
||||
@@ -76,6 +92,9 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
container,
|
||||
}),
|
||||
};
|
||||
|
||||
// Initialize intended scale to the default stage scale
|
||||
this._intendedScale = this.konva.stage.scaleX();
|
||||
}
|
||||
|
||||
setContainer = (container: HTMLDivElement) => {
|
||||
@@ -167,6 +186,18 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits the bbox and layers to the stage. The union of the bbox and the visible layers will be centered and scaled
|
||||
* to fit the stage with some padding.
|
||||
*/
|
||||
fitBboxAndLayersToStage = (): void => {
|
||||
const layersRect = this.manager.compositor.getVisibleRectOfType();
|
||||
const bboxRect = this.manager.stateApi.getBbox().rect;
|
||||
const unionRect = getRectUnion(layersRect, bboxRect);
|
||||
this.log.trace({ bboxRect, layersRect, unionRect }, 'Fitting bbox and layers to stage');
|
||||
this.fitRect(unionRect);
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits a rectangle to the stage. The rectangle will be centered and scaled to fit the stage with some padding.
|
||||
*
|
||||
@@ -195,14 +226,27 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
-rect.y * scale + this.config.FIT_LAYERS_TO_STAGE_PADDING_PX + (availableHeight - rect.height * scale) / 2
|
||||
);
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
// When fitting the stage, we update the intended scale and reset any active snap.
|
||||
this._intendedScale = scale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
const tween = new Konva.Tween({
|
||||
node: this.konva.stage,
|
||||
duration: 0.15,
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
easing: Konva.Easings.EaseInOut,
|
||||
onUpdate: () => {
|
||||
this.syncStageAttrs();
|
||||
},
|
||||
onFinish: () => {
|
||||
this.syncStageAttrs();
|
||||
tween.destroy();
|
||||
},
|
||||
});
|
||||
|
||||
this.syncStageAttrs({ x, y, scale });
|
||||
tween.play();
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -230,26 +274,41 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
* Constrains a scale to be within the valid range
|
||||
*/
|
||||
constrainScale = (scale: number): number => {
|
||||
return clamp(Math.round(scale * 100) / 100, this.config.MIN_SCALE, this.config.MAX_SCALE);
|
||||
return clamp(scale, this.config.MIN_SCALE, this.config.MAX_SCALE);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the scale of the stage. If center is provided, the stage will zoom in/out on that point.
|
||||
* @param scale The new scale to set
|
||||
* @param center The center of the stage to zoom in/out on
|
||||
* Programmatically sets the scale of the stage, overriding any active snapping.
|
||||
* If a center point is provided, the stage will zoom on that point.
|
||||
* @param scale The new scale to set.
|
||||
* @param center The center point for the zoom.
|
||||
*/
|
||||
setScale = (scale: number, center: Coordinate = this.getCenter(true)): void => {
|
||||
this.log.trace('Setting scale');
|
||||
setScale = (scale: number, center?: Coordinate): void => {
|
||||
this.log.trace({ scale }, 'Programmatically setting scale');
|
||||
const newScale = this.constrainScale(scale);
|
||||
|
||||
const { x, y } = this.getPosition();
|
||||
// When scale is set programmatically, update the intended scale and reset any active snap.
|
||||
this._intendedScale = newScale;
|
||||
this._activeSnapPoint = null;
|
||||
|
||||
this._applyScale(newScale, center);
|
||||
};
|
||||
|
||||
/**
|
||||
* Applies a scale to the stage, adjusting the position to keep the given center point stationary.
|
||||
* This internal method does NOT modify snapping state.
|
||||
*/
|
||||
private _applyScale = (newScale: number, center?: Coordinate): void => {
|
||||
const oldScale = this.getScale();
|
||||
|
||||
const deltaX = (center.x - x) / oldScale;
|
||||
const deltaY = (center.y - y) / oldScale;
|
||||
const _center = center ?? this.getCenter(true);
|
||||
const { x, y } = this.getPosition();
|
||||
|
||||
const newX = Math.floor(center.x - deltaX * newScale);
|
||||
const newY = Math.floor(center.y - deltaY * newScale);
|
||||
const deltaX = (_center.x - x) / oldScale;
|
||||
const deltaY = (_center.y - y) / oldScale;
|
||||
|
||||
const newX = _center.x - deltaX * newScale;
|
||||
const newY = _center.y - deltaY * newScale;
|
||||
|
||||
this.konva.stage.setAttrs({
|
||||
x: newX,
|
||||
@@ -263,6 +322,7 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
|
||||
onStageMouseWheel = (e: KonvaEventObject<WheelEvent>) => {
|
||||
e.evt.preventDefault();
|
||||
this._snapTimeout && window.clearTimeout(this._snapTimeout);
|
||||
|
||||
if (e.evt.ctrlKey || e.evt.metaKey) {
|
||||
return;
|
||||
@@ -271,12 +331,59 @@ export class CanvasStageModule extends CanvasModuleBase {
|
||||
// We need the absolute cursor position - not the scaled position
|
||||
const cursorPos = this.konva.stage.getPointerPosition();
|
||||
|
||||
if (cursorPos) {
|
||||
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
|
||||
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
|
||||
const scale = this.manager.stage.getScale() * this.config.SCALE_FACTOR ** delta;
|
||||
this.manager.stage.setScale(scale, cursorPos);
|
||||
if (!cursorPos) {
|
||||
return;
|
||||
}
|
||||
|
||||
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
|
||||
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
|
||||
|
||||
// Update the intended scale based on the last intended scale, creating a continuous zoom feel
|
||||
const newIntendedScale = this._intendedScale * this.config.SCALE_FACTOR ** delta;
|
||||
this._intendedScale = this.constrainScale(newIntendedScale);
|
||||
|
||||
// Pass control to the snapping logic
|
||||
this._updateScaleWithSnapping(cursorPos);
|
||||
|
||||
this._snapTimeout = window.setTimeout(() => {
|
||||
// After a short delay, we can reset the intended scale to the current scale
|
||||
// This allows for continuous zooming without snapping back to the last snapped scale
|
||||
this._intendedScale = this.getScale();
|
||||
}, 100);
|
||||
};
|
||||
|
||||
/**
|
||||
* Implements "sticky" snap logic.
|
||||
* - If not snapped, checks if the intended scale is close enough to a snap point to engage the snap.
|
||||
* - If snapped, checks if the intended scale has moved far enough away to break the snap.
|
||||
* - Applies the resulting scale to the stage.
|
||||
*/
|
||||
private _updateScaleWithSnapping = (center: Coordinate) => {
|
||||
// If we are currently snapped, check if we should break out
|
||||
if (this._activeSnapPoint !== null) {
|
||||
const threshold = this._activeSnapPoint * this.config.SCALE_SNAP_TOLERANCE;
|
||||
if (Math.abs(this._intendedScale - this._activeSnapPoint) > threshold) {
|
||||
// User has scrolled far enough to break the snap
|
||||
this._activeSnapPoint = null;
|
||||
this._applyScale(this._intendedScale, center);
|
||||
}
|
||||
// Else, do nothing - we remain snapped at the current scale, creating a "dead zone"
|
||||
return;
|
||||
}
|
||||
|
||||
// If we are not snapped, check if we should snap to a point
|
||||
for (const snapPoint of this.config.SCALE_SNAP_POINTS) {
|
||||
const threshold = snapPoint * this.config.SCALE_SNAP_TOLERANCE;
|
||||
if (Math.abs(this._intendedScale - snapPoint) < threshold) {
|
||||
// Engage the snap
|
||||
this._activeSnapPoint = snapPoint;
|
||||
this._applyScale(snapPoint, center);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not snapping and not breaking a snap, just update to the intended scale
|
||||
this._applyScale(this._intendedScale, center);
|
||||
};
|
||||
|
||||
onStagePointerDown = (e: KonvaEventObject<PointerEvent>) => {
|
||||
|
||||
@@ -1096,6 +1096,30 @@ export const canvasSlice = createSlice({
|
||||
state.inpaintMasks.entities = [data];
|
||||
state.selectedEntityIdentifier = { type: 'inpaint_mask', id: data.id };
|
||||
},
|
||||
inpaintMaskNoiseAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = 0.15; // Default noise level
|
||||
}
|
||||
},
|
||||
inpaintMaskNoiseChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ noiseLevel: number }, 'inpaint_mask'>>
|
||||
) => {
|
||||
const { entityIdentifier, noiseLevel } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = noiseLevel;
|
||||
}
|
||||
},
|
||||
inpaintMaskNoiseDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.noiseLevel = undefined;
|
||||
}
|
||||
},
|
||||
inpaintMaskConvertedToRegionalGuidance: {
|
||||
reducer: (
|
||||
state,
|
||||
@@ -1134,6 +1158,30 @@ export const canvasSlice = createSlice({
|
||||
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
},
|
||||
inpaintMaskDenoiseLimitAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = 1.0; // Default denoise limit
|
||||
}
|
||||
},
|
||||
inpaintMaskDenoiseLimitChanged: (
|
||||
state,
|
||||
action: PayloadAction<EntityIdentifierPayload<{ denoiseLimit: number }, 'inpaint_mask'>>
|
||||
) => {
|
||||
const { entityIdentifier, denoiseLimit } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = denoiseLimit;
|
||||
}
|
||||
},
|
||||
inpaintMaskDenoiseLimitDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
|
||||
const { entityIdentifier } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (entity && entity.type === 'inpaint_mask') {
|
||||
entity.denoiseLimit = undefined;
|
||||
}
|
||||
},
|
||||
//#region BBox
|
||||
bboxScaledWidthChanged: (state, action: PayloadAction<number>) => {
|
||||
const gridSize = getGridSize(state.bbox.modelBase);
|
||||
@@ -1869,6 +1917,12 @@ export const {
|
||||
// Inpaint mask
|
||||
inpaintMaskAdded,
|
||||
inpaintMaskConvertedToRegionalGuidance,
|
||||
inpaintMaskNoiseAdded,
|
||||
inpaintMaskNoiseChanged,
|
||||
inpaintMaskNoiseDeleted,
|
||||
inpaintMaskDenoiseLimitAdded,
|
||||
inpaintMaskDenoiseLimitChanged,
|
||||
inpaintMaskDenoiseLimitDeleted,
|
||||
// inpaintMaskRecalled,
|
||||
} = canvasSlice.actions;
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import { fetchModelConfigByIdentifier } from 'features/metadata/util/modelFetchingHelpers';
|
||||
import { zMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import type { ParameterLoRAModel } from 'features/parameters/types/parameterSchemas';
|
||||
@@ -310,6 +311,8 @@ const zCanvasInpaintMaskState = zCanvasEntityBase.extend({
|
||||
fill: zFill,
|
||||
opacity: zOpacity,
|
||||
objects: z.array(zCanvasObjectState),
|
||||
noiseLevel: z.number().gte(0).lte(1).optional(),
|
||||
denoiseLimit: z.number().gte(0).lte(1).optional(),
|
||||
});
|
||||
export type CanvasInpaintMaskState = z.infer<typeof zCanvasInpaintMaskState>;
|
||||
|
||||
@@ -609,3 +612,7 @@ export const isMaskEntityIdentifier = (
|
||||
): entityIdentifier is CanvasEntityIdentifier<'inpaint_mask' | 'regional_guidance'> => {
|
||||
return isInpaintMaskEntityIdentifier(entityIdentifier) || isRegionalGuidanceEntityIdentifier(entityIdentifier);
|
||||
};
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`.
|
||||
export type LifecycleCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
@@ -199,6 +199,8 @@ export const getInpaintMaskState = (
|
||||
style: 'diagonal',
|
||||
color: getInpaintMaskFillColor(),
|
||||
},
|
||||
noiseLevel: undefined,
|
||||
denoiseLimit: undefined,
|
||||
};
|
||||
merge(entityState, overrides);
|
||||
return entityState;
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import type { MainModelBase } from 'features/nodes/types/common';
|
||||
import { getGridSize, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import {
|
||||
getGridSize,
|
||||
getOptimalDimension,
|
||||
isInSDXLTrainingDimensions,
|
||||
} from 'features/parameters/util/optimalDimension';
|
||||
|
||||
/**
|
||||
* Scales the bounding box dimensions to the optimal dimension. The optimal dimensions should be the trained dimension
|
||||
@@ -10,6 +14,11 @@ import { getGridSize, getOptimalDimension } from 'features/parameters/util/optim
|
||||
* @param modelBase The base model
|
||||
*/
|
||||
export const getScaledBoundingBoxDimensions = (dimensions: Dimensions, modelBase: MainModelBase): Dimensions => {
|
||||
// Special cases: Return original if SDXL and in training dimensions
|
||||
if (modelBase === 'sdxl' && isInSDXLTrainingDimensions(dimensions.width, dimensions.height)) {
|
||||
return { ...dimensions };
|
||||
}
|
||||
|
||||
const optimalDimension = getOptimalDimension(modelBase);
|
||||
const gridSize = getGridSize(modelBase);
|
||||
const width = roundToMultiple(dimensions.width, gridSize);
|
||||
|
||||
@@ -26,19 +26,26 @@ import { atom } from 'nanostores';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListAllImageNamesForBoardQuery } from 'services/api/endpoints/boards';
|
||||
import { useDeleteBoardAndImagesMutation, useDeleteBoardMutation } from 'services/api/endpoints/images';
|
||||
import {
|
||||
useDeleteBoardAndImagesMutation,
|
||||
useDeleteBoardMutation,
|
||||
useDeleteUncategorizedImagesMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import type { BoardDTO } from 'services/api/types';
|
||||
|
||||
export const $boardToDelete = atom<BoardDTO | null>(null);
|
||||
export const $boardToDelete = atom<BoardDTO | 'none' | null>(null);
|
||||
|
||||
const DeleteBoardModal = () => {
|
||||
useAssertSingleton('DeleteBoardModal');
|
||||
const boardToDelete = useStore($boardToDelete);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const boardId = useMemo(() => (boardToDelete === 'none' ? 'none' : boardToDelete?.board_id), [boardToDelete]);
|
||||
|
||||
const { currentData: boardImageNames, isFetching: isFetchingBoardNames } = useListAllImageNamesForBoardQuery(
|
||||
boardToDelete?.board_id
|
||||
boardId
|
||||
? {
|
||||
board_id: boardToDelete?.board_id,
|
||||
board_id: boardId,
|
||||
categories: undefined,
|
||||
is_intermediate: undefined,
|
||||
}
|
||||
@@ -71,10 +78,13 @@ const DeleteBoardModal = () => {
|
||||
|
||||
const [deleteBoardAndImages, { isLoading: isDeleteBoardAndImagesLoading }] = useDeleteBoardAndImagesMutation();
|
||||
|
||||
const [deleteUncategorizedImages, { isLoading: isDeleteUncategorizedImagesLoading }] =
|
||||
useDeleteUncategorizedImagesMutation();
|
||||
|
||||
const imageUsageSummary = useAppSelector(selectImageUsageSummary);
|
||||
|
||||
const handleDeleteBoardOnly = useCallback(() => {
|
||||
if (!boardToDelete) {
|
||||
if (!boardToDelete || boardToDelete === 'none') {
|
||||
return;
|
||||
}
|
||||
deleteBoardOnly(boardToDelete.board_id);
|
||||
@@ -82,13 +92,21 @@ const DeleteBoardModal = () => {
|
||||
}, [boardToDelete, deleteBoardOnly]);
|
||||
|
||||
const handleDeleteBoardAndImages = useCallback(() => {
|
||||
if (!boardToDelete) {
|
||||
if (!boardToDelete || boardToDelete === 'none') {
|
||||
return;
|
||||
}
|
||||
deleteBoardAndImages(boardToDelete.board_id);
|
||||
$boardToDelete.set(null);
|
||||
}, [boardToDelete, deleteBoardAndImages]);
|
||||
|
||||
const handleDeleteUncategorizedImages = useCallback(() => {
|
||||
if (!boardToDelete || boardToDelete !== 'none') {
|
||||
return;
|
||||
}
|
||||
deleteUncategorizedImages();
|
||||
$boardToDelete.set(null);
|
||||
}, [boardToDelete, deleteUncategorizedImages]);
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
$boardToDelete.set(null);
|
||||
}, []);
|
||||
@@ -96,8 +114,12 @@ const DeleteBoardModal = () => {
|
||||
const cancelRef = useRef<HTMLButtonElement>(null);
|
||||
|
||||
const isLoading = useMemo(
|
||||
() => isDeleteBoardAndImagesLoading || isDeleteBoardOnlyLoading || isFetchingBoardNames,
|
||||
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames]
|
||||
() =>
|
||||
isDeleteBoardAndImagesLoading ||
|
||||
isDeleteBoardOnlyLoading ||
|
||||
isFetchingBoardNames ||
|
||||
isDeleteUncategorizedImagesLoading,
|
||||
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames, isDeleteUncategorizedImagesLoading]
|
||||
);
|
||||
|
||||
if (!boardToDelete) {
|
||||
@@ -109,7 +131,7 @@ const DeleteBoardModal = () => {
|
||||
<AlertDialogOverlay>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader fontSize="lg" fontWeight="bold">
|
||||
{t('common.delete')} {boardToDelete.board_name}
|
||||
{t('common.delete')} {boardToDelete === 'none' ? t('boards.uncategorizedImages') : boardToDelete.board_name}
|
||||
</AlertDialogHeader>
|
||||
|
||||
<AlertDialogBody>
|
||||
@@ -125,11 +147,13 @@ const DeleteBoardModal = () => {
|
||||
bottomMessage={t('boards.bottomMessage')}
|
||||
/>
|
||||
)}
|
||||
<Text>
|
||||
{boardToDelete.is_private
|
||||
? t('boards.deletedPrivateBoardsCannotbeRestored')
|
||||
: t('boards.deletedBoardsCannotbeRestored')}
|
||||
</Text>
|
||||
{boardToDelete !== 'none' && (
|
||||
<Text>
|
||||
{boardToDelete.is_private
|
||||
? t('boards.deletedPrivateBoardsCannotbeRestored')
|
||||
: t('boards.deletedBoardsCannotbeRestored')}
|
||||
</Text>
|
||||
)}
|
||||
<Text>{t('gallery.deleteImagePermanent')}</Text>
|
||||
</Flex>
|
||||
</AlertDialogBody>
|
||||
@@ -138,12 +162,21 @@ const DeleteBoardModal = () => {
|
||||
<Button ref={cancelRef} onClick={handleClose}>
|
||||
{t('boards.cancel')}
|
||||
</Button>
|
||||
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
|
||||
{t('boards.deleteBoardOnly')}
|
||||
</Button>
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
|
||||
{t('boards.deleteBoardAndImages')}
|
||||
</Button>
|
||||
{boardToDelete !== 'none' && (
|
||||
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
|
||||
{t('boards.deleteBoardOnly')}
|
||||
</Button>
|
||||
)}
|
||||
{boardToDelete !== 'none' && (
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
|
||||
{t('boards.deleteBoardAndImages')}
|
||||
</Button>
|
||||
)}
|
||||
{boardToDelete === 'none' && (
|
||||
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteUncategorizedImages}>
|
||||
{t('boards.deleteAllUncategorizedImages')}
|
||||
</Button>
|
||||
)}
|
||||
</Flex>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
|
||||
@@ -7,9 +7,11 @@ import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDownloadBold, PiPlusBold } from 'react-icons/pi';
|
||||
import { PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
|
||||
import { useBulkDownloadImagesMutation } from 'services/api/endpoints/images';
|
||||
|
||||
import { $boardToDelete } from './DeleteBoardModal';
|
||||
|
||||
type Props = {
|
||||
children: ContextMenuProps<HTMLDivElement>['children'];
|
||||
};
|
||||
@@ -33,6 +35,10 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
|
||||
bulkDownload({ image_names: [], board_id: 'none' });
|
||||
}, [bulkDownload]);
|
||||
|
||||
const setUncategorizedImagesAsToBeDeleted = useCallback(() => {
|
||||
$boardToDelete.set('none');
|
||||
}, []);
|
||||
|
||||
const renderMenuFunc = useCallback(
|
||||
() => (
|
||||
<MenuList visibility="visible">
|
||||
@@ -47,10 +53,26 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
|
||||
{t('boards.downloadBoard')}
|
||||
</MenuItem>
|
||||
)}
|
||||
<MenuItem
|
||||
color="error.300"
|
||||
icon={<PiTrashSimpleBold />}
|
||||
onClick={setUncategorizedImagesAsToBeDeleted}
|
||||
isDestructive
|
||||
>
|
||||
{t('boards.deleteAllUncategorizedImages')}
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
</MenuList>
|
||||
),
|
||||
[autoAssignBoardOnClick, handleBulkDownload, handleSetAutoAdd, isBulkDownloadEnabled, isSelectedForAutoAdd, t]
|
||||
[
|
||||
autoAssignBoardOnClick,
|
||||
handleBulkDownload,
|
||||
handleSetAutoAdd,
|
||||
isBulkDownloadEnabled,
|
||||
isSelectedForAutoAdd,
|
||||
t,
|
||||
setUncategorizedImagesAsToBeDeleted,
|
||||
]
|
||||
);
|
||||
|
||||
return <ContextMenu renderMenu={renderMenuFunc}>{children}</ContextMenu>;
|
||||
|
||||
@@ -19,9 +19,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusySafe();
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -31,9 +31,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -43,9 +43,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(() => {
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
@@ -55,9 +55,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(() => {
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(async () => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
await newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { FlexProps } from '@invoke-ai/ui-library';
|
||||
import { Box, chakra, Flex, IconButton, Tooltip, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { getOverlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
|
||||
import { useClipboard } from 'common/hooks/useClipboard';
|
||||
import { Formatter } from 'fracturedjsonjs';
|
||||
import { Formatter, TableCommaPlacement } from 'fracturedjsonjs';
|
||||
import { isString } from 'lodash-es';
|
||||
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
|
||||
import type { CSSProperties } from 'react';
|
||||
@@ -11,6 +11,8 @@ import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyBold, PiDownloadSimpleBold } from 'react-icons/pi';
|
||||
|
||||
const formatter = new Formatter();
|
||||
formatter.Options.TableCommaPlacement = TableCommaPlacement.BeforePadding;
|
||||
formatter.Options.OmitTrailingWhitespace = true;
|
||||
|
||||
type Props = {
|
||||
label: string;
|
||||
@@ -19,6 +21,7 @@ type Props = {
|
||||
withDownload?: boolean;
|
||||
withCopy?: boolean;
|
||||
extraCopyActions?: { label: string; getData: (data: unknown) => unknown }[];
|
||||
wrapData?: boolean;
|
||||
} & FlexProps;
|
||||
|
||||
const overlayscrollbarsOptions = getOverlayScrollbarsParams({
|
||||
@@ -29,7 +32,16 @@ const overlayscrollbarsOptions = getOverlayScrollbarsParams({
|
||||
const ChakraPre = chakra('pre');
|
||||
|
||||
const DataViewer = (props: Props) => {
|
||||
const { label, data, fileName, withDownload = true, withCopy = true, extraCopyActions, ...rest } = props;
|
||||
const {
|
||||
label,
|
||||
data,
|
||||
fileName,
|
||||
withDownload = true,
|
||||
withCopy = true,
|
||||
extraCopyActions,
|
||||
wrapData = true,
|
||||
...rest
|
||||
} = props;
|
||||
const dataString = useMemo(() => (isString(data) ? data : formatter.Serialize(data)) ?? '', [data]);
|
||||
const shift = useShiftModifier();
|
||||
const clipboard = useClipboard();
|
||||
@@ -53,7 +65,7 @@ const DataViewer = (props: Props) => {
|
||||
<Flex bg="base.800" borderRadius="base" flexGrow={1} w="full" h="full" position="relative" {...rest}>
|
||||
<Box position="absolute" top={0} left={0} right={0} bottom={0} overflow="auto" p={2} fontSize="sm">
|
||||
<OverlayScrollbarsComponent defer style={overlayScrollbarsStyles} options={overlayscrollbarsOptions}>
|
||||
<ChakraPre whiteSpace="pre-wrap">{dataString}</ChakraPre>
|
||||
<ChakraPre whiteSpace={wrapData ? 'pre-wrap' : undefined}>{dataString}</ChakraPre>
|
||||
</OverlayScrollbarsComponent>
|
||||
</Box>
|
||||
<Flex position="absolute" top={0} insetInlineEnd={0} p={2}>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { selectDefaultIPAdapter, selectDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { CanvasEntityAdapterBase } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterBase';
|
||||
import { CanvasEntityTransformer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import {
|
||||
@@ -20,6 +20,7 @@ import type {
|
||||
CanvasControlLayerState,
|
||||
CanvasEntityIdentifier,
|
||||
CanvasEntityType,
|
||||
CanvasImageState,
|
||||
CanvasInpaintMaskState,
|
||||
CanvasRasterLayerState,
|
||||
CanvasRegionalGuidanceState,
|
||||
@@ -34,7 +35,7 @@ import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type { FieldIdentifier } from 'features/nodes/types/field';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { imageDTOToFile, imagesApi, uploadImage } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -142,14 +143,14 @@ export const createNewCanvasEntityFromImage = (arg: {
|
||||
*
|
||||
* Using 'raster_layer' for the type and enabling `withResize` replicates the common img2img flow.
|
||||
*/
|
||||
export const newCanvasFromImage = (arg: {
|
||||
export const newCanvasFromImage = async (arg: {
|
||||
imageDTO: ImageDTO;
|
||||
type: CanvasEntityType | 'regional_guidance_with_reference_image';
|
||||
withResize: boolean;
|
||||
withResize?: boolean;
|
||||
dispatch: AppDispatch;
|
||||
getState: () => RootState;
|
||||
}) => {
|
||||
const { type, imageDTO, withResize, dispatch, getState } = arg;
|
||||
const { type, imageDTO, withResize = false, dispatch, getState } = arg;
|
||||
const state = getState();
|
||||
|
||||
const base = selectBboxModelBase(state);
|
||||
@@ -158,20 +159,29 @@ export const newCanvasFromImage = (arg: {
|
||||
const optimalDimension = getOptimalDimension(base);
|
||||
const { width, height } = calculateNewSize(ratio, optimalDimension ** 2, base);
|
||||
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
const { x, y } = selectBboxRect(state);
|
||||
let imageObject: CanvasImageState;
|
||||
|
||||
const addInitCallback = (id: string) => {
|
||||
CanvasEntityAdapterBase.registerInitCallback(async (adapter) => {
|
||||
if (withResize && (width !== imageDTO.width || height !== imageDTO.height)) {
|
||||
const resizedImageDTO = await uploadImage({
|
||||
file: await imageDTOToFile(imageDTO),
|
||||
image_category: 'general',
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
resize_to: { width, height },
|
||||
});
|
||||
imageObject = imageDTOToImageObject(resizedImageDTO);
|
||||
} else {
|
||||
imageObject = imageDTOToImageObject(imageDTO);
|
||||
}
|
||||
|
||||
const addFitOnLayerInitCallback = (adapterId: string) => {
|
||||
CanvasEntityTransformer.registerBboxUpdatedCallback((adapter) => {
|
||||
// Skip the callback if the adapter is not the one we are creating
|
||||
if (adapter.id !== id) {
|
||||
return false;
|
||||
if (adapter.id !== adapterId) {
|
||||
return Promise.resolve(false);
|
||||
}
|
||||
// Fit the layer to the bbox w/ fill strategy
|
||||
await adapter.transformer.startTransform({ silent: true });
|
||||
adapter.transformer.fitToBboxFill();
|
||||
await adapter.transformer.applyTransform();
|
||||
return true;
|
||||
adapter.manager.stage.fitBboxAndLayersToStage();
|
||||
return Promise.resolve(true);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -180,11 +190,8 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('raster_layer'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRasterLayerState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -195,12 +202,9 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('control_layer'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
controlAdapter: deepClone(initialControlNet),
|
||||
} satisfies Partial<CanvasControlLayerState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -211,11 +215,8 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('inpaint_mask'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasInpaintMaskState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -226,11 +227,8 @@ export const newCanvasFromImage = (arg: {
|
||||
const overrides = {
|
||||
id: getPrefixedId('regional_guidance'),
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRegionalGuidanceState>;
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
addFitOnLayerInitCallback(overrides.id);
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
|
||||
@@ -22,6 +22,7 @@ import { NodeFieldElementOverlay } from 'features/nodes/components/sidePanel/bui
|
||||
import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
|
||||
import {
|
||||
$isInPublishFlow,
|
||||
$isPublishing,
|
||||
$isReadyToDoValidationRun,
|
||||
$isSelectingOutputNode,
|
||||
$outputNodeId,
|
||||
@@ -183,13 +184,14 @@ SelectOutputNodeButton.displayName = 'SelectOutputNodeButton';
|
||||
|
||||
const CancelPublishButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isPublishing = useStore($isPublishing);
|
||||
const onClick = useCallback(() => {
|
||||
$isInPublishFlow.set(false);
|
||||
$isSelectingOutputNode.set(false);
|
||||
$outputNodeId.set(null);
|
||||
}, []);
|
||||
return (
|
||||
<Button leftIcon={<PiXBold />} onClick={onClick}>
|
||||
<Button leftIcon={<PiXBold />} onClick={onClick} isDisabled={isPublishing}>
|
||||
{t('common.cancel')}
|
||||
</Button>
|
||||
);
|
||||
@@ -198,6 +200,7 @@ CancelPublishButton.displayName = 'CancelDeployButton';
|
||||
|
||||
const PublishWorkflowButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isPublishing = useStore($isPublishing);
|
||||
const isReadyToDoValidationRun = useStore($isReadyToDoValidationRun);
|
||||
const isReadyToEnqueue = useStore($isReadyToEnqueue);
|
||||
const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
|
||||
@@ -211,6 +214,7 @@ const PublishWorkflowButton = memo(() => {
|
||||
|
||||
const enqueue = useEnqueueWorkflows();
|
||||
const onClick = useCallback(async () => {
|
||||
$isPublishing.set(true);
|
||||
const result = await withResultAsync(() => enqueue(true, true));
|
||||
if (result.isErr()) {
|
||||
toast({
|
||||
@@ -244,8 +248,30 @@ const PublishWorkflowButton = memo(() => {
|
||||
});
|
||||
log.debug(parseify(result.value), 'Enqueued batch');
|
||||
}
|
||||
$isPublishing.set(false);
|
||||
}, [enqueue, projectUrl, t]);
|
||||
|
||||
const isDisabled = useMemo(() => {
|
||||
return (
|
||||
!allowPublishWorkflows ||
|
||||
!isReadyToEnqueue ||
|
||||
doesWorkflowHaveUnsavedChanges ||
|
||||
hasUnpublishableNodes ||
|
||||
!isReadyToDoValidationRun ||
|
||||
!(outputNodeId !== null && !isSelectingOutputNode) ||
|
||||
isPublishing
|
||||
);
|
||||
}, [
|
||||
allowPublishWorkflows,
|
||||
doesWorkflowHaveUnsavedChanges,
|
||||
hasUnpublishableNodes,
|
||||
isReadyToDoValidationRun,
|
||||
isReadyToEnqueue,
|
||||
isSelectingOutputNode,
|
||||
outputNodeId,
|
||||
isPublishing,
|
||||
]);
|
||||
|
||||
return (
|
||||
<PublishTooltip
|
||||
isWorkflowSaved={!doesWorkflowHaveUnsavedChanges}
|
||||
@@ -255,19 +281,8 @@ const PublishWorkflowButton = memo(() => {
|
||||
hasPublishableInputs={inputs.publishable.length > 0}
|
||||
hasUnpublishableInputs={inputs.unpublishable.length > 0}
|
||||
>
|
||||
<Button
|
||||
leftIcon={<PiLightningFill />}
|
||||
isDisabled={
|
||||
!allowPublishWorkflows ||
|
||||
!isReadyToEnqueue ||
|
||||
doesWorkflowHaveUnsavedChanges ||
|
||||
hasUnpublishableNodes ||
|
||||
!isReadyToDoValidationRun ||
|
||||
!(outputNodeId !== null && !isSelectingOutputNode)
|
||||
}
|
||||
onClick={onClick}
|
||||
>
|
||||
{t('workflows.builder.publish')}
|
||||
<Button leftIcon={<PiLightningFill />} isDisabled={isDisabled} onClick={onClick}>
|
||||
{isPublishing ? t('workflows.builder.publishing') : t('workflows.builder.publish')}
|
||||
</Button>
|
||||
</PublishTooltip>
|
||||
);
|
||||
@@ -337,6 +352,10 @@ export const StartPublishFlowButton = memo(() => {
|
||||
$isInPublishFlow.set(true);
|
||||
}, []);
|
||||
|
||||
const isDisabled = useMemo(() => {
|
||||
return !allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes;
|
||||
}, [allowPublishWorkflows, doesWorkflowHaveUnsavedChanges, hasUnpublishableNodes, isReadyToEnqueue]);
|
||||
|
||||
return (
|
||||
<PublishTooltip
|
||||
isWorkflowSaved={!doesWorkflowHaveUnsavedChanges}
|
||||
@@ -346,15 +365,7 @@ export const StartPublishFlowButton = memo(() => {
|
||||
hasPublishableInputs={inputs.publishable.length > 0}
|
||||
hasUnpublishableInputs={inputs.unpublishable.length > 0}
|
||||
>
|
||||
<Button
|
||||
onClick={onClick}
|
||||
leftIcon={<PiLightningFill />}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
isDisabled={
|
||||
!allowPublishWorkflows || !isReadyToEnqueue || doesWorkflowHaveUnsavedChanges || hasUnpublishableNodes
|
||||
}
|
||||
>
|
||||
<Button onClick={onClick} leftIcon={<PiLightningFill />} variant="ghost" size="sm" isDisabled={isDisabled}>
|
||||
{t('workflows.builder.publish')}
|
||||
</Button>
|
||||
</PublishTooltip>
|
||||
|
||||
@@ -19,6 +19,7 @@ import { useGetBatchStatusQuery } from 'services/api/endpoints/queue';
|
||||
import { useGetWorkflowQuery } from 'services/api/endpoints/workflows';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
export const $isPublishing = atom(false);
|
||||
export const $isInPublishFlow = atom(false);
|
||||
export const $outputNodeId = atom<string | null>(null);
|
||||
export const $isSelectingOutputNode = atom(false);
|
||||
|
||||
@@ -14,7 +14,7 @@ import type {
|
||||
VaeSourceNodes,
|
||||
} from 'features/nodes/util/graph/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
|
||||
type AddInpaintArg = {
|
||||
state: RootState;
|
||||
@@ -29,6 +29,7 @@ type AddInpaintArg = {
|
||||
scaledSize: Dimensions;
|
||||
denoising_start: number;
|
||||
fp32: boolean;
|
||||
seed: number;
|
||||
};
|
||||
|
||||
export const addInpaint = async ({
|
||||
@@ -44,6 +45,7 @@ export const addInpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
}: AddInpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
@@ -51,19 +53,45 @@ export const addInpaint = async ({
|
||||
const canvasSettings = selectCanvasSettingsSlice(state);
|
||||
const canvas = selectCanvasSlice(state);
|
||||
|
||||
const { bbox } = canvas;
|
||||
const { rect } = canvas.bbox;
|
||||
|
||||
const rasterAdapters = manager.compositor.getVisibleAdaptersOfType('raster_layer');
|
||||
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, bbox.rect, {
|
||||
const initialImage = await manager.compositor.getCompositeImageDTO(rasterAdapters, rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
|
||||
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
// Get inpaint mask adapters that have noise settings
|
||||
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);
|
||||
|
||||
// Create a composite noise mask if we have any adapters with noise settings
|
||||
let noiseMaskImage: ImageDTO | null = null;
|
||||
if (noiseMaskAdapters.length > 0) {
|
||||
noiseMaskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
noiseMaskAdapters,
|
||||
rect,
|
||||
'noiseLevel',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Create a composite denoise limit mask
|
||||
const maskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
inpaintMaskAdapters, // denoise limit defaults to 1 for masks that don't have it
|
||||
rect,
|
||||
'denoiseLimit',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
|
||||
const needsScaleBeforeProcessing = !isEqual(scaledSize, originalSize);
|
||||
|
||||
@@ -82,15 +110,38 @@ export const addInpaint = async ({
|
||||
image: { image_name: initialImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
const alphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Resize the noise mask to match the scaled size
|
||||
const resizeNoiseMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_noise_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: noiseMaskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
});
|
||||
|
||||
g.addEdge(resizeImageToScaledSize, 'image', noiseNode, 'image');
|
||||
g.addEdge(resizeNoiseMaskToScaledSize, 'image', noiseNode, 'mask');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
const resizeMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: maskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
const resizeImageToOriginalSize = g.addNode({
|
||||
@@ -117,12 +168,8 @@ export const addInpaint = async ({
|
||||
fade_size_px: params.maskBlur,
|
||||
});
|
||||
|
||||
// Resize initial image and mask to scaled size, feed into to gradient mask
|
||||
g.addEdge(alphaToMask, 'image', resizeMaskToScaledSize, 'image');
|
||||
g.addEdge(resizeImageToScaledSize, 'image', i2l, 'image');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
if (!isMainModelWithoutUnet(modelLoader)) {
|
||||
g.addEdge(modelLoader, 'unet', createGradientMask, 'unet');
|
||||
@@ -169,12 +216,23 @@ export const addInpaint = async ({
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
|
||||
const alphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
image: initialImage.image_name ? { image_name: initialImage.image_name } : undefined,
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
mask: { image_name: noiseMaskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
const createGradientMask = g.addNode({
|
||||
id: getPrefixedId('create_gradient_mask'),
|
||||
type: 'create_gradient_mask',
|
||||
@@ -183,9 +241,9 @@ export const addInpaint = async ({
|
||||
edge_radius: params.canvasCoherenceEdgeSize,
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
mask: { image_name: maskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(alphaToMask, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
|
||||
@@ -15,7 +15,7 @@ import type {
|
||||
VaeSourceNodes,
|
||||
} from 'features/nodes/util/graph/types';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
|
||||
type AddOutpaintArg = {
|
||||
state: RootState;
|
||||
@@ -30,6 +30,7 @@ type AddOutpaintArg = {
|
||||
scaledSize: Dimensions;
|
||||
denoising_start: number;
|
||||
fp32: boolean;
|
||||
seed: number;
|
||||
};
|
||||
|
||||
export const addOutpaint = async ({
|
||||
@@ -45,6 +46,7 @@ export const addOutpaint = async ({
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
}: AddOutpaintArg): Promise<Invocation<'invokeai_img_blend' | 'apply_mask_to_image'>> => {
|
||||
denoise.denoising_start = denoising_start;
|
||||
|
||||
@@ -61,10 +63,38 @@ export const addOutpaint = async ({
|
||||
});
|
||||
|
||||
const inpaintMaskAdapters = manager.compositor.getVisibleAdaptersOfType('inpaint_mask');
|
||||
const maskImage = await manager.compositor.getCompositeImageDTO(inpaintMaskAdapters, bbox.rect, {
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
const { rect } = canvas.bbox;
|
||||
|
||||
// Get inpaint mask adapters that have noise settings
|
||||
const noiseMaskAdapters = inpaintMaskAdapters.filter((adapter) => adapter.state.noiseLevel !== undefined);
|
||||
|
||||
// Create a composite noise mask if we have any adapters with noise settings
|
||||
let noiseMaskImage: ImageDTO | null = null;
|
||||
if (noiseMaskAdapters.length > 0) {
|
||||
noiseMaskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
noiseMaskAdapters,
|
||||
rect,
|
||||
'noiseLevel',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Create a composite denoise limit mask
|
||||
const maskImage = await manager.compositor.getGrayscaleMaskCompositeImageDTO(
|
||||
inpaintMaskAdapters, // denoise limit defaults to 1 for masks that don't have it
|
||||
rect,
|
||||
'denoiseLimit',
|
||||
canvasSettings.preserveMask,
|
||||
{
|
||||
is_intermediate: true,
|
||||
silent: true,
|
||||
}
|
||||
);
|
||||
|
||||
const infill = getInfill(g, params);
|
||||
|
||||
@@ -72,14 +102,6 @@ export const addOutpaint = async ({
|
||||
|
||||
if (needsScaleBeforeProcessing) {
|
||||
// Scale before processing requires some resizing
|
||||
|
||||
// Combine the inpaint mask and the initial image's alpha channel into a single mask
|
||||
const maskAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
const initialImageAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('image_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
@@ -88,8 +110,8 @@ export const addOutpaint = async ({
|
||||
const maskCombine = g.addNode({
|
||||
id: getPrefixedId('mask_combine'),
|
||||
type: 'mask_combine',
|
||||
mask1: { image_name: maskImage.image_name },
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
|
||||
// Resize the combined and initial image to the scaled size
|
||||
@@ -134,7 +156,32 @@ export const addOutpaint = async ({
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Resize the noise mask to match the scaled size
|
||||
const resizeNoiseMaskToScaledSize = g.addNode({
|
||||
id: getPrefixedId('resize_noise_mask_to_scaled_size'),
|
||||
type: 'img_resize',
|
||||
image: { image_name: noiseMaskImage.image_name },
|
||||
...scaledSize,
|
||||
});
|
||||
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
});
|
||||
|
||||
g.addEdge(resizeNoiseMaskToScaledSize, 'image', noiseNode, 'mask');
|
||||
g.addEdge(infill, 'image', noiseNode, 'image');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
}
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
|
||||
@@ -190,12 +237,6 @@ export const addOutpaint = async ({
|
||||
type: i2lNodeType,
|
||||
...(i2lNodeType === 'i2l' ? { fp32 } : {}),
|
||||
});
|
||||
const maskAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('mask_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
image: { image_name: maskImage.image_name },
|
||||
invert: !canvasSettings.preserveMask,
|
||||
});
|
||||
const initialImageAlphaToMask = g.addNode({
|
||||
id: getPrefixedId('image_alpha_to_mask'),
|
||||
type: 'tomask',
|
||||
@@ -204,6 +245,7 @@ export const addOutpaint = async ({
|
||||
const maskCombine = g.addNode({
|
||||
id: getPrefixedId('mask_combine'),
|
||||
type: 'mask_combine',
|
||||
mask1: { image_name: maskImage.image_name },
|
||||
});
|
||||
const createGradientMask = g.addNode({
|
||||
id: getPrefixedId('create_gradient_mask'),
|
||||
@@ -214,10 +256,29 @@ export const addOutpaint = async ({
|
||||
fp32,
|
||||
image: { image_name: initialImage.image_name },
|
||||
});
|
||||
g.addEdge(maskAlphaToMask, 'image', maskCombine, 'mask1');
|
||||
g.addEdge(initialImageAlphaToMask, 'image', maskCombine, 'mask2');
|
||||
g.addEdge(maskCombine, 'image', createGradientMask, 'mask');
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
|
||||
// If we have a noise mask, apply it to the input image before i2l conversion
|
||||
if (noiseMaskImage) {
|
||||
// Add noise to the scaled image using the mask
|
||||
const noiseNode = g.addNode({
|
||||
type: 'img_noise',
|
||||
id: getPrefixedId('add_inpaint_noise'),
|
||||
image: initialImage.image_name ? { image_name: initialImage.image_name } : undefined,
|
||||
noise_type: 'gaussian',
|
||||
amount: 1.0, // the mask controls the actual intensity
|
||||
noise_color: true,
|
||||
seed: seed,
|
||||
mask: { image_name: noiseMaskImage.image_name },
|
||||
});
|
||||
|
||||
g.addEdge(infill, 'image', noiseNode, 'image');
|
||||
g.addEdge(noiseNode, 'image', i2l, 'image');
|
||||
} else {
|
||||
g.addEdge(infill, 'image', i2l, 'image');
|
||||
}
|
||||
|
||||
g.addEdge(i2l, 'latents', denoise, 'latents');
|
||||
g.addEdge(vaeSource, 'vae', i2l, 'vae');
|
||||
g.addEdge(vaeSource, 'vae', createGradientMask, 'vae');
|
||||
|
||||
@@ -137,6 +137,7 @@ export const buildCogView4Graph = async (state: RootState, manager: CanvasManage
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'cogview4_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -153,6 +154,7 @@ export const buildCogView4Graph = async (state: RootState, manager: CanvasManage
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'cogview4_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -212,6 +212,7 @@ export const buildFLUXGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'flux_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -228,6 +229,7 @@ export const buildFLUXGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'flux_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -198,6 +198,7 @@ export const buildSD1Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: vaePrecision === 'fp32',
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -214,6 +215,7 @@ export const buildSD1Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -162,6 +162,7 @@ export const buildSD3Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sd3_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -178,6 +179,7 @@ export const buildSD3Graph = async (state: RootState, manager: CanvasManager): P
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32: false,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sd3_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -205,6 +205,7 @@ export const buildSDXLGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sdxl_inpaint' });
|
||||
} else if (generationMode === 'outpaint') {
|
||||
@@ -221,6 +222,7 @@ export const buildSDXLGraph = async (state: RootState, manager: CanvasManager):
|
||||
scaledSize,
|
||||
denoising_start,
|
||||
fp32,
|
||||
seed,
|
||||
});
|
||||
g.upsertMetadata({ generation_mode: 'sdxl_outpaint' });
|
||||
} else {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePromptChanged, selectNegativePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -15,12 +16,20 @@ import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamNegativePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt);
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
@@ -31,7 +40,6 @@ export const ParamNegativePrompt = memo(() => {
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const _onChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePromptChanged, selectBase, selectPositivePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { ShowDynamicPromptsPreviewButton } from 'features/dynamicPrompts/components/ShowDynamicPromptsPreviewButton';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
@@ -19,6 +20,12 @@ import type { HotkeyCallback } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListStylePresetsQuery } from 'services/api/endpoints/stylePresets';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
initialHeight: 120,
|
||||
};
|
||||
|
||||
export const ParamPositivePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt);
|
||||
@@ -26,6 +33,9 @@ export const ParamPositivePrompt = memo(() => {
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { activeStylePreset } = useListStylePresetsQuery(undefined, {
|
||||
selectFromResult: ({ data }) => {
|
||||
let activeStylePreset = null;
|
||||
@@ -36,7 +46,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
},
|
||||
});
|
||||
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
@@ -75,7 +84,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
onChange={onChange}
|
||||
minH={40}
|
||||
onKeyDown={onKeyDown}
|
||||
variant="darkFilled"
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
@@ -83,6 +91,8 @@ export const ParamPositivePrompt = memo(() => {
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
resize="vertical"
|
||||
minH={28}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
|
||||
@@ -12,7 +12,8 @@ import {
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { Group, PickerContextState } from 'common/components/Picker/Picker';
|
||||
import { buildGroup, getRegex, Picker, usePickerContext } from 'common/components/Picker/Picker';
|
||||
import { useDisclosure } from 'common/hooks/useBoolean';
|
||||
@@ -22,6 +23,7 @@ import { BASE_COLOR_MAP } from 'features/modelManagerV2/subpanels/ModelManagerPa
|
||||
import ModelImage from 'features/modelManagerV2/subpanels/ModelManagerPanel/ModelImage';
|
||||
import { NavigateToModelManagerButton } from 'features/parameters/components/MainModel/NavigateToModelManagerButton';
|
||||
import { API_BASE_MODELS, MODEL_TYPE_MAP, MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants';
|
||||
import { selectIsModelsTabDisabled } from 'features/system/store/configSlice';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { filesize } from 'filesize';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
@@ -32,12 +34,23 @@ import type { AnyModelConfig, BaseModelType } from 'services/api/types';
|
||||
const getOptionId = (modelConfig: AnyModelConfig) => modelConfig.key;
|
||||
|
||||
const ModelManagerLink = memo((props: ButtonProps) => {
|
||||
const onClickGoToModelManager = useStore($onClickGoToModelManager);
|
||||
const dispatch = useAppDispatch();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(setActiveTab('models'));
|
||||
$installModelsTab.set(3);
|
||||
}, [dispatch]);
|
||||
return <Button size="sm" flexGrow={0} variant="link" color="base.200" onClick={onClick} {...props} />;
|
||||
|
||||
return (
|
||||
<Button
|
||||
size="sm"
|
||||
flexGrow={0}
|
||||
variant="link"
|
||||
color="base.200"
|
||||
onClick={onClickGoToModelManager ?? onClick}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
});
|
||||
ModelManagerLink.displayName = 'ModelManagerLink';
|
||||
|
||||
@@ -47,12 +60,17 @@ const components = {
|
||||
|
||||
const NoOptionsFallback = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isModelsTabDisabled = useAppSelector(selectIsModelsTabDisabled);
|
||||
const onClickGoToModelManager = useStore($onClickGoToModelManager);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={4} alignItems="center">
|
||||
<Text color="base.200">{t('modelManager.modelPickerFallbackNoModelsInstalled')}</Text>
|
||||
<Text color="base.200">
|
||||
<Trans i18nKey="modelManager.modelPickerFallbackNoModelsInstalled2" components={components} />
|
||||
</Text>
|
||||
{(!isModelsTabDisabled || onClickGoToModelManager) && (
|
||||
<Text color="base.200">
|
||||
<Trans i18nKey="modelManager.modelPickerFallbackNoModelsInstalled2" components={components} />
|
||||
</Text>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -26,6 +26,40 @@ export const getOptimalDimension = (base?: BaseModelType | null): number => {
|
||||
}
|
||||
};
|
||||
|
||||
const SDXL_TRAINING_DIMENSIONS: [number, number][] = [
|
||||
[512, 2048],
|
||||
[512, 1984],
|
||||
[512, 1920],
|
||||
[512, 1856],
|
||||
[576, 1792],
|
||||
[576, 1728],
|
||||
[576, 1664],
|
||||
[640, 1600],
|
||||
[640, 1536],
|
||||
[704, 1472],
|
||||
[704, 1408],
|
||||
[704, 1344],
|
||||
[768, 1344],
|
||||
[768, 1280],
|
||||
[832, 1216],
|
||||
[832, 1152],
|
||||
[896, 1152],
|
||||
[896, 1088],
|
||||
[960, 1088],
|
||||
[960, 1024],
|
||||
[1024, 1024],
|
||||
];
|
||||
|
||||
/**
|
||||
* Checks if the given width and height are in the SDXL training dimensions.
|
||||
* @param width The width to check
|
||||
* @param height The height to check
|
||||
* @returns Whether the width and height are in the SDXL training dimensions (order agnostic)
|
||||
*/
|
||||
export const isInSDXLTrainingDimensions = (width: number, height: number): boolean => {
|
||||
return SDXL_TRAINING_DIMENSIONS.some(([w, h]) => (w === width && h === height) || (w === height && h === width));
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets the grid size for a given base model. For Flux, the grid size is 16, otherwise it is 8.
|
||||
* - sd-1, sd-2, sdxl: 8
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePrompt2Changed, selectNegativePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -9,10 +10,17 @@ import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLNegativeStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePrompt2Changed, selectPositivePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
@@ -9,10 +10,17 @@ import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLPositiveStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
|
||||
@@ -58,7 +58,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
|
||||
{cloneElement(children, {
|
||||
onClick: onOpen,
|
||||
})}
|
||||
<Modal isOpen={isOpen} onClose={onClose} isCentered size="2xl" useInert={false}>
|
||||
<Modal isOpen={isOpen} onClose={onClose} isCentered size="5xl" useInert={false}>
|
||||
<ModalOverlay />
|
||||
<ModalContent maxH="80vh" h="34rem">
|
||||
<ModalHeader>{t('accessibility.about')}</ModalHeader>
|
||||
@@ -66,7 +66,7 @@ const AboutModal = ({ children }: AboutModalProps) => {
|
||||
<ModalBody display="flex" flexDir="column" gap={4}>
|
||||
<Grid templateColumns="repeat(2, 1fr)" h="full">
|
||||
<GridItem backgroundColor="base.750" borderRadius="base" p="4" h="full">
|
||||
<DataViewer label={t('common.systemInformation')} data={localData} />
|
||||
<DataViewer label={t('common.systemInformation')} data={localData} wrapData={false} />
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<Flex flexDir="column" gap={3} justifyContent="center" alignItems="center" h="full">
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSelector, createSlice } from '@reduxjs/toolkit';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { newSessionRequested } from 'features/controlLayers/store/actions';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import { workflowLoaded } from 'features/nodes/store/nodesSlice';
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
@@ -15,6 +16,7 @@ const initialUIState: UIState = {
|
||||
shouldShowProgressInViewer: true,
|
||||
accordions: {},
|
||||
expanders: {},
|
||||
textAreaSizes: {},
|
||||
shouldShowNotificationV2: true,
|
||||
};
|
||||
|
||||
@@ -42,6 +44,10 @@ export const uiSlice = createSlice({
|
||||
const { id, isOpen } = action.payload;
|
||||
state.expanders[id] = isOpen;
|
||||
},
|
||||
textAreaSizesStateChanged: (state, action: PayloadAction<{ id: string; size: Partial<Dimensions> }>) => {
|
||||
const { id, size } = action.payload;
|
||||
state.textAreaSizes[id] = size;
|
||||
},
|
||||
shouldShowNotificationChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldShowNotificationV2 = action.payload;
|
||||
},
|
||||
@@ -64,6 +70,7 @@ export const {
|
||||
accordionStateChanged,
|
||||
expanderStateChanged,
|
||||
shouldShowNotificationChanged,
|
||||
textAreaSizesStateChanged,
|
||||
} = uiSlice.actions;
|
||||
|
||||
export const selectUiSlice = (state: RootState) => state.ui;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
|
||||
export type TabName = 'canvas' | 'upscaling' | 'workflows' | 'models' | 'queue';
|
||||
export type CanvasRightPanelTabName = 'layers' | 'gallery';
|
||||
|
||||
@@ -30,6 +32,10 @@ export interface UIState {
|
||||
* The state of expanders. The key is the id of the expander, and the value is a boolean representing the open state.
|
||||
*/
|
||||
expanders: Record<string, boolean>;
|
||||
/**
|
||||
* The size of textareas. The key is the id of the text area, and the value is an object representing its width and/or height.
|
||||
*/
|
||||
textAreaSizes: Record<string, Partial<Dimensions>>;
|
||||
/**
|
||||
* Whether or not to show the user the open notification. Bump version to reset users who may have closed previous version.
|
||||
*/
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
|
||||
import type { OpenAPIV3_1 } from 'openapi-types';
|
||||
import type { paths } from 'services/api/schema';
|
||||
import type { AppConfig, AppDependencyVersions, AppVersion } from 'services/api/types';
|
||||
import type { AppConfig, AppVersion } from 'services/api/types';
|
||||
|
||||
import { api, buildV1Url } from '..';
|
||||
|
||||
@@ -22,7 +22,10 @@ export const appInfoApi = api.injectEndpoints({
|
||||
}),
|
||||
providesTags: ['FetchOnReconnect'],
|
||||
}),
|
||||
getAppDeps: build.query<AppDependencyVersions, void>({
|
||||
getAppDeps: build.query<
|
||||
paths['/api/v1/app/app_deps']['get']['responses']['200']['content']['application/json'],
|
||||
void
|
||||
>({
|
||||
query: () => ({
|
||||
url: buildAppInfoUrl('app_deps'),
|
||||
method: 'GET',
|
||||
|
||||
@@ -160,6 +160,42 @@ export const imagesApi = api.injectEndpoints({
|
||||
return [];
|
||||
},
|
||||
}),
|
||||
deleteUncategorizedImages: build.mutation<components['schemas']['DeleteImagesFromListResult'], void>({
|
||||
query: () => ({ url: buildImagesUrl('uncategorized'), method: 'DELETE' }),
|
||||
invalidatesTags: (result) => {
|
||||
if (result && result.deleted_images.length > 0) {
|
||||
const boardId = 'none';
|
||||
|
||||
const tags: ApiTagDescription[] = [
|
||||
{
|
||||
type: 'ImageList',
|
||||
id: getListImagesUrl({
|
||||
board_id: boardId,
|
||||
categories: IMAGE_CATEGORIES,
|
||||
}),
|
||||
},
|
||||
{
|
||||
type: 'ImageList',
|
||||
id: getListImagesUrl({
|
||||
board_id: boardId,
|
||||
categories: ASSETS_CATEGORIES,
|
||||
}),
|
||||
},
|
||||
{
|
||||
type: 'Board',
|
||||
id: boardId,
|
||||
},
|
||||
{
|
||||
type: 'BoardImagesTotal',
|
||||
id: boardId,
|
||||
},
|
||||
];
|
||||
|
||||
return tags;
|
||||
}
|
||||
return [];
|
||||
},
|
||||
}),
|
||||
/**
|
||||
* Change an image's `is_intermediate` property.
|
||||
*/
|
||||
@@ -270,12 +306,15 @@ export const imagesApi = api.injectEndpoints({
|
||||
},
|
||||
}),
|
||||
uploadImage: build.mutation<ImageDTO, UploadImageArg>({
|
||||
query: ({ file, image_category, is_intermediate, session_id, board_id, crop_visible, metadata }) => {
|
||||
query: ({ file, image_category, is_intermediate, session_id, board_id, crop_visible, metadata, resize_to }) => {
|
||||
const formData = new FormData();
|
||||
formData.append('file', file);
|
||||
if (metadata) {
|
||||
formData.append('metadata', JSON.stringify(metadata));
|
||||
}
|
||||
if (resize_to) {
|
||||
formData.append('resize_to', JSON.stringify(resize_to));
|
||||
}
|
||||
return {
|
||||
url: buildImagesUrl('upload'),
|
||||
method: 'POST',
|
||||
@@ -563,6 +602,7 @@ export const {
|
||||
useAddImagesToBoardMutation,
|
||||
useRemoveImagesFromBoardMutation,
|
||||
useDeleteBoardAndImagesMutation,
|
||||
useDeleteUncategorizedImagesMutation,
|
||||
useDeleteBoardMutation,
|
||||
useStarImagesMutation,
|
||||
useUnstarImagesMutation,
|
||||
|
||||
@@ -661,6 +661,26 @@ export type paths = {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/uncategorized": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
get?: never;
|
||||
put?: never;
|
||||
post?: never;
|
||||
/**
|
||||
* Delete Uncategorized Images
|
||||
* @description Deletes all images that are uncategorized
|
||||
*/
|
||||
delete: operations["delete_uncategorized_images"];
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/api/v1/images/star": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -1905,77 +1925,6 @@ export type components = {
|
||||
*/
|
||||
watermarking_methods: string[];
|
||||
};
|
||||
/**
|
||||
* AppDependencyVersions
|
||||
* @description App depencency Versions Response
|
||||
*/
|
||||
AppDependencyVersions: {
|
||||
/**
|
||||
* Accelerate
|
||||
* @description accelerate version
|
||||
*/
|
||||
accelerate: string;
|
||||
/**
|
||||
* Compel
|
||||
* @description compel version
|
||||
*/
|
||||
compel: string;
|
||||
/**
|
||||
* Cuda
|
||||
* @description CUDA version
|
||||
*/
|
||||
cuda: string | null;
|
||||
/**
|
||||
* Diffusers
|
||||
* @description diffusers version
|
||||
*/
|
||||
diffusers: string;
|
||||
/**
|
||||
* Numpy
|
||||
* @description Numpy version
|
||||
*/
|
||||
numpy: string;
|
||||
/**
|
||||
* Opencv
|
||||
* @description OpenCV version
|
||||
*/
|
||||
opencv: string;
|
||||
/**
|
||||
* Onnx
|
||||
* @description ONNX version
|
||||
*/
|
||||
onnx: string;
|
||||
/**
|
||||
* Pillow
|
||||
* @description Pillow (PIL) version
|
||||
*/
|
||||
pillow: string;
|
||||
/**
|
||||
* Python
|
||||
* @description Python version
|
||||
*/
|
||||
python: string;
|
||||
/**
|
||||
* Torch
|
||||
* @description PyTorch version
|
||||
*/
|
||||
torch: string;
|
||||
/**
|
||||
* Torchvision
|
||||
* @description PyTorch Vision version
|
||||
*/
|
||||
torchvision: string;
|
||||
/**
|
||||
* Transformers
|
||||
* @description transformers version
|
||||
*/
|
||||
transformers: string;
|
||||
/**
|
||||
* Xformers
|
||||
* @description xformers version
|
||||
*/
|
||||
xformers: string | null;
|
||||
};
|
||||
/**
|
||||
* AppVersion
|
||||
* @description App Version Response
|
||||
@@ -2723,6 +2672,11 @@ export type components = {
|
||||
* Format: binary
|
||||
*/
|
||||
file: Blob;
|
||||
/**
|
||||
* Resize To
|
||||
* @description Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: 16777216
|
||||
*/
|
||||
resize_to?: string | null;
|
||||
/**
|
||||
* Metadata
|
||||
* @description The metadata to associate with the image, must be a stringified JSON dict
|
||||
@@ -5573,7 +5527,7 @@ export type components = {
|
||||
};
|
||||
/**
|
||||
* Create Gradient Mask
|
||||
* @description Creates mask for denoising model run.
|
||||
* @description Creates mask for denoising.
|
||||
*/
|
||||
CreateGradientMaskInvocation: {
|
||||
/**
|
||||
@@ -5600,7 +5554,7 @@ export type components = {
|
||||
mask?: components["schemas"]["ImageField"] | null;
|
||||
/**
|
||||
* Edge Radius
|
||||
* @description How far to blur/expand the edges of the mask
|
||||
* @description How far to expand the edges of the mask
|
||||
* @default 16
|
||||
*/
|
||||
edge_radius?: number;
|
||||
@@ -9366,7 +9320,7 @@ export type components = {
|
||||
* @description Method to apply IP Weights with
|
||||
* @enum {string}
|
||||
*/
|
||||
method: "full" | "style" | "composition";
|
||||
method: "full" | "style" | "composition" | "style_strong" | "style_precise";
|
||||
/**
|
||||
* Weight
|
||||
* @description The weight given to the IP-Adapter
|
||||
@@ -10443,6 +10397,11 @@ export type components = {
|
||||
* @default null
|
||||
*/
|
||||
image?: components["schemas"]["ImageField"] | null;
|
||||
/**
|
||||
* @description Optional mask determining where to apply noise (black=noise, white=no noise)
|
||||
* @default null
|
||||
*/
|
||||
mask?: components["schemas"]["ImageField"] | null;
|
||||
/**
|
||||
* Seed
|
||||
* @description Seed for random number generation
|
||||
@@ -12032,7 +11991,7 @@ export type components = {
|
||||
* vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
* lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
* pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
* precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
* sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
* attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -12307,11 +12266,10 @@ export type components = {
|
||||
pytorch_cuda_alloc_conf?: string | null;
|
||||
/**
|
||||
* Device
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
* @default auto
|
||||
* @enum {string}
|
||||
*/
|
||||
device?: "auto" | "cpu" | "cuda" | "cuda:1" | "mps";
|
||||
device?: string;
|
||||
/**
|
||||
* Precision
|
||||
* @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
|
||||
@@ -23480,6 +23438,26 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
delete_uncategorized_images: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody?: never;
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["DeleteImagesFromListResult"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
star_images_in_list: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -23807,7 +23785,7 @@ export interface operations {
|
||||
};
|
||||
header?: never;
|
||||
path: {
|
||||
/** @description The id of the board */
|
||||
/** @description The id of the board or 'none' for uncategorized images */
|
||||
board_id: string;
|
||||
};
|
||||
cookie?: never;
|
||||
@@ -24176,7 +24154,9 @@ export interface operations {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["AppDependencyVersions"];
|
||||
"application/json": {
|
||||
[key: string]: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import type { components, paths } from 'services/api/schema';
|
||||
import type { JsonObject, SetRequired } from 'type-fest';
|
||||
|
||||
@@ -30,7 +31,6 @@ export type InvocationJSONSchemaExtra = S['UIConfigBase'];
|
||||
// App Info
|
||||
export type AppVersion = S['AppVersion'];
|
||||
export type AppConfig = S['AppConfig'];
|
||||
export type AppDependencyVersions = S['AppDependencyVersions'];
|
||||
|
||||
// Images
|
||||
export type ImageDTO = S['ImageDTO'];
|
||||
@@ -373,6 +373,10 @@ export type UploadImageArg = {
|
||||
* Whether this is the first upload of a batch (used when displaying user feedback with toasts - ignored if the upload is silent)
|
||||
*/
|
||||
isFirstUploadOfBatch?: boolean;
|
||||
/**
|
||||
* If provided, the uploaded image will resized to the given dimensions.
|
||||
*/
|
||||
resize_to?: Dimensions;
|
||||
};
|
||||
|
||||
export type ImageUploadEntryResponse = S['ImageUploadEntry'];
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "5.12.0"
|
||||
__version__ = "5.15.0"
|
||||
|
||||
@@ -10,20 +10,20 @@ readme = { content-type = "text/markdown", file = "README.md" }
|
||||
keywords = ["stable-diffusion", "AI"]
|
||||
dynamic = ["version"]
|
||||
license = { file = "LICENSE" }
|
||||
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
||||
authors = [{ name = "Invoke", email = "support@invoke.ai" }]
|
||||
classifiers = [
|
||||
'Development Status :: 4 - Beta',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: GPU',
|
||||
'Environment :: GPU :: NVIDIA CUDA',
|
||||
'Environment :: MacOS X',
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Programming Language :: Python :: 3 :: Only',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Programming Language :: Python :: 3.12',
|
||||
'Topic :: Artistic Software',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
||||
@@ -35,7 +35,7 @@ dependencies = [
|
||||
# Core generation dependencies, pinned for reproducible builds.
|
||||
"accelerate",
|
||||
"bitsandbytes; sys_platform!='darwin'",
|
||||
"compel==2.0.2",
|
||||
"compel==2.1.1",
|
||||
"diffusers[torch]==0.33.0",
|
||||
"gguf",
|
||||
"invisible-watermark==0.2.0", # needed to install SDXL base and refiner using their repo_ids
|
||||
@@ -43,7 +43,7 @@ dependencies = [
|
||||
"numpy<2.0.0",
|
||||
"onnx==1.16.1",
|
||||
"onnxruntime==1.19.2",
|
||||
"opencv-python==4.9.0.80",
|
||||
"opencv-contrib-python",
|
||||
"safetensors",
|
||||
"sentencepiece",
|
||||
"spandrel",
|
||||
@@ -109,6 +109,12 @@ dependencies = [
|
||||
"humanize==4.12.1",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
# Prevent opencv-python from ever being chosen during dependency resolution.
|
||||
# This prevents conflicts with opencv-contrib-python, which Invoke requires.
|
||||
override-dependencies = ["opencv-python; sys_platform=='never'"]
|
||||
|
||||
|
||||
[project.scripts]
|
||||
"invokeai-web" = "invokeai.app.run_app:run_app"
|
||||
|
||||
|
||||
@@ -38,6 +38,12 @@ echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show HEAD
|
||||
echo
|
||||
|
||||
# If the classifiers are invalid, publishing to PyPI will fail but the build will succeed.
|
||||
# It's a fast check, do it early.
|
||||
echo "Checking pyproject classifiers..."
|
||||
python3 ./check_classifiers.py ../pyproject.toml
|
||||
echo
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
|
||||
48
scripts/check_classifiers.py
Normal file
48
scripts/check_classifiers.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import re
|
||||
import sys
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
# This script checks the classifiers in a pyproject.toml file against the official Trove classifier list.
|
||||
# If the classifiers are invalid, PyPI will reject the package upload.
|
||||
|
||||
# Step 1: Get pyproject.toml path from args
|
||||
if len(sys.argv) != 2:
|
||||
print(f"Usage: {sys.argv[0]} path/to/pyproject.toml", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
pyproject_path = Path(sys.argv[1])
|
||||
if not pyproject_path.is_file():
|
||||
print(f"File not found: {pyproject_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Step 1: Download the official Trove classifier list
|
||||
url = "https://pypi.org/pypi?%3Aaction=list_classifiers"
|
||||
with urllib.request.urlopen(url) as response:
|
||||
trove_classifiers = {line.decode("utf-8").strip() for line in response}
|
||||
|
||||
# Step 2: Extract classifiers from pyproject.toml
|
||||
with open(pyproject_path) as f:
|
||||
content = f.read()
|
||||
|
||||
match = re.search(r"classifiers\s*=\s*\[([^\]]*)\]", content, re.MULTILINE | re.DOTALL)
|
||||
if not match:
|
||||
print("No 'classifiers' block found in pyproject.toml", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
raw_block = match.group(1)
|
||||
classifiers = [c.strip(" \"'\n") for c in raw_block.split(",") if c.strip()]
|
||||
|
||||
# Step 3: Check for invalid classifiers
|
||||
invalid = [c for c in classifiers if c not in trove_classifiers]
|
||||
|
||||
if invalid:
|
||||
print("❌ Invalid classifiers:")
|
||||
for c in invalid:
|
||||
print(f" - {c}")
|
||||
print("Valid classifiers:")
|
||||
for c in sorted(trove_classifiers):
|
||||
print(f" - {c}")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("✅ All classifiers are valid.")
|
||||
@@ -0,0 +1,458 @@
|
||||
state_dict_keys = {
|
||||
"diffusion_model.double_blocks.0.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.0.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.0.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.0.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.0.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.0.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.0.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.0.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.1.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.1.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.1.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.1.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.1.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.10.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.10.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.10.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.10.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.10.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.11.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.11.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.11.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.11.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.11.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.12.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.12.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.12.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.12.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.12.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.13.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.13.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.13.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.13.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.13.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.14.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.14.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.14.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.14.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.14.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.15.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.15.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.15.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.15.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.15.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.16.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.16.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.16.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.16.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.16.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.17.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.17.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.17.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.17.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.17.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.18.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.18.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.18.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.18.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.18.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.2.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.2.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.2.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.2.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.2.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.3.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.3.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.3.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.3.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.3.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.4.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.4.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.4.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.4.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.4.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.5.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.5.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.5.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.5.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.5.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.6.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.6.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.6.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.6.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.6.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.7.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.7.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.7.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.7.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.7.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.8.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.8.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.8.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.8.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.8.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.img_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.img_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.img_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.img_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.9.img_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.img_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.9.img_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.9.img_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.txt_attn.proj.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.txt_attn.proj.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.double_blocks.9.txt_attn.qkv.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.txt_attn.qkv.lora_B.weight": [9216, 16],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.0.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.0.lora_B.weight": [12288, 16],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.2.lora_A.weight": [16, 12288],
|
||||
"diffusion_model.double_blocks.9.txt_mlp.2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.0.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.0.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.0.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.0.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.1.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.1.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.1.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.1.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.10.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.10.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.10.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.10.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.11.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.11.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.11.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.11.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.12.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.12.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.12.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.12.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.13.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.13.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.13.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.13.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.14.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.14.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.14.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.14.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.15.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.15.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.15.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.15.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.16.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.16.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.16.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.16.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.17.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.17.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.17.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.17.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.18.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.18.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.18.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.18.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.19.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.19.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.19.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.19.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.2.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.2.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.2.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.2.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.20.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.20.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.20.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.20.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.21.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.21.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.21.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.21.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.22.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.22.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.22.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.22.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.23.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.23.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.23.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.23.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.24.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.24.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.24.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.24.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.25.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.25.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.25.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.25.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.26.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.26.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.26.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.26.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.27.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.27.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.27.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.27.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.28.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.28.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.28.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.28.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.29.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.29.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.29.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.29.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.3.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.3.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.3.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.3.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.30.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.30.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.30.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.30.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.31.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.31.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.31.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.31.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.32.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.32.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.32.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.32.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.33.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.33.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.33.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.33.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.34.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.34.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.34.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.34.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.35.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.35.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.35.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.35.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.36.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.36.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.36.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.36.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.37.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.37.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.37.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.37.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.4.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.4.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.4.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.4.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.5.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.5.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.5.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.5.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.6.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.6.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.6.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.6.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.7.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.7.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.7.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.7.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.8.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.8.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.8.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.8.linear2.lora_B.weight": [3072, 16],
|
||||
"diffusion_model.single_blocks.9.linear1.lora_A.weight": [16, 3072],
|
||||
"diffusion_model.single_blocks.9.linear1.lora_B.weight": [21504, 16],
|
||||
"diffusion_model.single_blocks.9.linear2.lora_A.weight": [16, 15360],
|
||||
"diffusion_model.single_blocks.9.linear2.lora_B.weight": [3072, 16],
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
import accelerate
|
||||
import pytest
|
||||
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.util import params
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
_group_state_by_submodel,
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
lora_model_from_flux_aitoolkit_state_dict,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.flux_dora_onetrainer_format import (
|
||||
state_dict_keys as flux_onetrainer_state_dict_keys,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.flux_lora_aitoolkit_format import (
|
||||
state_dict_keys as flux_aitoolkit_state_dict_keys,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.flux_lora_diffusers_format import (
|
||||
state_dict_keys as flux_diffusers_state_dict_keys,
|
||||
)
|
||||
from tests.backend.patches.lora_conversions.lora_state_dicts.utils import keys_to_mock_state_dict
|
||||
|
||||
|
||||
def test_is_state_dict_likely_in_flux_aitoolkit_format():
|
||||
state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
|
||||
assert is_state_dict_likely_in_flux_aitoolkit_format(state_dict)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sd_keys", [flux_diffusers_state_dict_keys, flux_onetrainer_state_dict_keys])
|
||||
def test_is_state_dict_likely_in_flux_kohya_format_false(sd_keys: dict[str, list[int]]):
|
||||
state_dict = keys_to_mock_state_dict(sd_keys)
|
||||
assert not is_state_dict_likely_in_flux_aitoolkit_format(state_dict)
|
||||
|
||||
|
||||
def test_flux_aitoolkit_transformer_state_dict_is_in_invoke_format():
|
||||
state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
|
||||
converted_state_dict = _group_state_by_submodel(state_dict).transformer
|
||||
|
||||
# Extract the prefixes from the converted state dict (without the lora suffixes)
|
||||
converted_key_prefixes: list[str] = []
|
||||
for k in converted_state_dict.keys():
|
||||
k = k.replace(".lora_A.weight", "")
|
||||
k = k.replace(".lora_B.weight", "")
|
||||
converted_key_prefixes.append(k)
|
||||
|
||||
# Initialize a FLUX model on the meta device.
|
||||
with accelerate.init_empty_weights():
|
||||
model = Flux(params["flux-schnell"])
|
||||
model_keys = set(model.state_dict().keys())
|
||||
|
||||
for converted_key_prefix in converted_key_prefixes:
|
||||
assert any(model_key.startswith(converted_key_prefix) for model_key in model_keys), (
|
||||
f"'{converted_key_prefix}' did not match any model keys."
|
||||
)
|
||||
|
||||
|
||||
def test_lora_model_from_flux_aitoolkit_state_dict():
|
||||
state_dict = keys_to_mock_state_dict(flux_aitoolkit_state_dict_keys)
|
||||
|
||||
assert lora_model_from_flux_aitoolkit_state_dict(state_dict)
|
||||
@@ -10,7 +10,7 @@ import torch
|
||||
from invokeai.app.services.config import get_config
|
||||
from invokeai.backend.util.devices import TorchDevice, choose_precision, choose_torch_device, torch_dtype
|
||||
|
||||
devices = ["cpu", "cuda:0", "cuda:1", "mps"]
|
||||
devices = ["cpu", "cuda:0", "cuda:1", "cuda:2", "mps"]
|
||||
device_types_cpu = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float32)]
|
||||
device_types_cuda = [("cpu", torch.float32), ("cuda:0", torch.float16), ("mps", torch.float32)]
|
||||
device_types_mps = [("cpu", torch.float32), ("cuda:0", torch.float32), ("mps", torch.float16)]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user