mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 13:17:55 -05:00
Compare commits
271 Commits
v5.1.1
...
ryan/flux-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ffa89126d1 | ||
|
|
d1bb4c2c70 | ||
|
|
bbd89d54b4 | ||
|
|
ee61006a49 | ||
|
|
0b43f5fd64 | ||
|
|
6c61266990 | ||
|
|
2d5afe8094 | ||
|
|
2430137d19 | ||
|
|
5440c03767 | ||
|
|
358dbdbf84 | ||
|
|
5ec2d71be0 | ||
|
|
8f28903c81 | ||
|
|
a071f2788a | ||
|
|
d9a257ef8a | ||
|
|
23fada3eea | ||
|
|
2917e59c38 | ||
|
|
c691855a67 | ||
|
|
a00347379b | ||
|
|
ad1a8fbb8d | ||
|
|
f03b77e882 | ||
|
|
2b000cb006 | ||
|
|
af636f08b8 | ||
|
|
f8150f46a5 | ||
|
|
b613be0f5d | ||
|
|
a833d74913 | ||
|
|
02df055e8a | ||
|
|
add31ce596 | ||
|
|
7d7ad3052e | ||
|
|
3b16dbffb2 | ||
|
|
d8b0648766 | ||
|
|
ae64ee224f | ||
|
|
1251dfd7f6 | ||
|
|
804ee3a7fb | ||
|
|
fc5f9047c2 | ||
|
|
0b208220e5 | ||
|
|
916b9f7741 | ||
|
|
0947a006cc | ||
|
|
2c2df6423e | ||
|
|
c3df9d38c0 | ||
|
|
3790c254f5 | ||
|
|
abf46eaacd | ||
|
|
166548246d | ||
|
|
985dcd9862 | ||
|
|
b1df592506 | ||
|
|
a09a0eff69 | ||
|
|
e73bd09d93 | ||
|
|
6f5477a3f0 | ||
|
|
f78a542401 | ||
|
|
8613efb03a | ||
|
|
d8347d856d | ||
|
|
336e6e0c19 | ||
|
|
5bd87ca89b | ||
|
|
fe87c198eb | ||
|
|
69a4a88925 | ||
|
|
6e7491b086 | ||
|
|
3da8076a2b | ||
|
|
80360a8abb | ||
|
|
acfeb4a276 | ||
|
|
b33dbfc95f | ||
|
|
f9bc29203b | ||
|
|
cbe7717409 | ||
|
|
d6add93901 | ||
|
|
ea45dce9dc | ||
|
|
8d44363d49 | ||
|
|
9933cdb6b7 | ||
|
|
e3e9d1f27c | ||
|
|
bb59ad438a | ||
|
|
e38f5b1576 | ||
|
|
1bb49b698f | ||
|
|
fa1fbd89fe | ||
|
|
190ef6732c | ||
|
|
947cd4694b | ||
|
|
ee32d0666d | ||
|
|
bc8ad9ccbf | ||
|
|
e96b290fa9 | ||
|
|
b9f83eae6a | ||
|
|
9868e23235 | ||
|
|
0060cae17c | ||
|
|
56f0845552 | ||
|
|
da3f85dd8b | ||
|
|
7185363f17 | ||
|
|
ac08c31fbc | ||
|
|
ea54a2655a | ||
|
|
cc83dede9f | ||
|
|
8464fd2ced | ||
|
|
c3316368d9 | ||
|
|
8b2d5ab28a | ||
|
|
3f6acdc2d3 | ||
|
|
4aa20a95b2 | ||
|
|
2d82e69a33 | ||
|
|
683f9a70e7 | ||
|
|
bb6d073828 | ||
|
|
7f7d8e5177 | ||
|
|
f37c5011f4 | ||
|
|
bb947c6162 | ||
|
|
a654dad20f | ||
|
|
2bd44662f3 | ||
|
|
e7f9086006 | ||
|
|
5141be8009 | ||
|
|
eacdfc660b | ||
|
|
5fd3c39431 | ||
|
|
7daf3b7d4a | ||
|
|
908f65698d | ||
|
|
63c4ac58e9 | ||
|
|
8c125681ea | ||
|
|
118f0ba3bf | ||
|
|
b3b7d084d0 | ||
|
|
812940eb95 | ||
|
|
0559480dd6 | ||
|
|
d99e7dd4e4 | ||
|
|
e854181417 | ||
|
|
de414c09fd | ||
|
|
ce4624f72b | ||
|
|
47c7df3476 | ||
|
|
4289b5e6c3 | ||
|
|
c8d1d14662 | ||
|
|
44c588d778 | ||
|
|
d75ac56d00 | ||
|
|
714dd5f0be | ||
|
|
2f4d3cb5e6 | ||
|
|
b76555bda9 | ||
|
|
1cdd501a0a | ||
|
|
1125218bc5 | ||
|
|
683504bfb5 | ||
|
|
03cf953398 | ||
|
|
24c115663d | ||
|
|
a9e7ecad49 | ||
|
|
76f4766324 | ||
|
|
3dfc242f77 | ||
|
|
1e43389cb4 | ||
|
|
cb33de34f7 | ||
|
|
7562ea48dc | ||
|
|
83f4700f5a | ||
|
|
704e7479b2 | ||
|
|
5f44559f30 | ||
|
|
7a22819100 | ||
|
|
70495665c5 | ||
|
|
ca30acc5b4 | ||
|
|
8121843d86 | ||
|
|
bc0ded0a23 | ||
|
|
30f6034f88 | ||
|
|
7d56a8ce54 | ||
|
|
e7dc439006 | ||
|
|
bce5a93eb1 | ||
|
|
93e98a1f63 | ||
|
|
0f93deab3b | ||
|
|
3f3aba8b10 | ||
|
|
0b84f567f1 | ||
|
|
69c0d7dcc9 | ||
|
|
5307248fcf | ||
|
|
2efaea8f79 | ||
|
|
c1dfd9b7d9 | ||
|
|
c594ef89d2 | ||
|
|
563db67b80 | ||
|
|
236c065edd | ||
|
|
1f5d744d01 | ||
|
|
b36c6af0ae | ||
|
|
4e431a9d5f | ||
|
|
48a8232285 | ||
|
|
94007fef5b | ||
|
|
9e6fb3bd3f | ||
|
|
8522129639 | ||
|
|
15033b1a9d | ||
|
|
743d78f82b | ||
|
|
06a434b0a2 | ||
|
|
7f2fdae870 | ||
|
|
00be03b5b9 | ||
|
|
0f98806a25 | ||
|
|
0f1541d091 | ||
|
|
c49bbb22e5 | ||
|
|
7bd4b586a6 | ||
|
|
754f049f54 | ||
|
|
883beb90eb | ||
|
|
ad76399702 | ||
|
|
69773a791d | ||
|
|
99e88e601d | ||
|
|
4050f7deae | ||
|
|
0399b04f29 | ||
|
|
3b349b2686 | ||
|
|
aa34dbe1e1 | ||
|
|
ac2476c63c | ||
|
|
f16489f1ce | ||
|
|
3b38b69192 | ||
|
|
2c601438eb | ||
|
|
5d6a2a3709 | ||
|
|
1d7a264050 | ||
|
|
c494e0642a | ||
|
|
849b9e8d86 | ||
|
|
4a66b7ac83 | ||
|
|
751eb59afa | ||
|
|
f537cf1916 | ||
|
|
0cc6f67bb1 | ||
|
|
b2bf03fd37 | ||
|
|
14bc06ab66 | ||
|
|
9c82cc7fcb | ||
|
|
c60cab97a7 | ||
|
|
eda979341a | ||
|
|
b6c7949bb7 | ||
|
|
d691f672a2 | ||
|
|
8deeac1372 | ||
|
|
4aace24f1f | ||
|
|
b1567fe0e4 | ||
|
|
3953e60a4f | ||
|
|
3c46522595 | ||
|
|
63a2e17f6b | ||
|
|
8b1ef4b902 | ||
|
|
5f2279c984 | ||
|
|
e82d67849c | ||
|
|
3977ffaa3e | ||
|
|
9a8a858fe4 | ||
|
|
859944f848 | ||
|
|
8d1a45863c | ||
|
|
6798bbab26 | ||
|
|
2c92e8a495 | ||
|
|
216b36c75d | ||
|
|
8bf8742984 | ||
|
|
c78eeb1645 | ||
|
|
cd88723a80 | ||
|
|
dea6cbd599 | ||
|
|
0dd9f1f772 | ||
|
|
5d11c30ce6 | ||
|
|
a783539cd2 | ||
|
|
2f8f30b497 | ||
|
|
f878e5e74e | ||
|
|
bfc460a5c6 | ||
|
|
a24581ede2 | ||
|
|
56731766ca | ||
|
|
80bc4ebee3 | ||
|
|
745b6dbd5d | ||
|
|
c7628945c4 | ||
|
|
728927ecff | ||
|
|
1a7eece695 | ||
|
|
2cd14dd066 | ||
|
|
5872f05342 | ||
|
|
4ad135c6ae | ||
|
|
c72c2770fe | ||
|
|
e733a1f30e | ||
|
|
4be3a33744 | ||
|
|
1751c380db | ||
|
|
16cda33025 | ||
|
|
8308e7d186 | ||
|
|
c0aab56d08 | ||
|
|
1795f4f8a2 | ||
|
|
5bfd2ec6b7 | ||
|
|
a35b229a9d | ||
|
|
e93da5d4b2 | ||
|
|
a17ea9bfad | ||
|
|
3578010ba4 | ||
|
|
459cf52043 | ||
|
|
9bcb93f575 | ||
|
|
d1a0e99701 | ||
|
|
92b1515d9d | ||
|
|
36515e1e2a | ||
|
|
c81bb761ed | ||
|
|
1d4a58e52b | ||
|
|
62d12e6468 | ||
|
|
9541156ce5 | ||
|
|
eb5b6625ea | ||
|
|
9758e5a622 | ||
|
|
58eba8bdbd | ||
|
|
2821ba8967 | ||
|
|
2cc72b19bc | ||
|
|
8544ba3798 | ||
|
|
65fe79fa0e | ||
|
|
c99852657e | ||
|
|
ed54b89e9e | ||
|
|
d56c80af8e | ||
|
|
0a65a01db8 | ||
|
|
5f416ee4fa | ||
|
|
115c82231b | ||
|
|
ccc1d4417e |
@@ -40,7 +40,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
|
||||
fi &&\
|
||||
|
||||
# xformers + triton fails to install on arm64
|
||||
|
||||
@@ -12,7 +12,7 @@ MINIMUM_PYTHON_VERSION=3.10.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.100
|
||||
PYTHON=""
|
||||
for candidate in python3.11 python3.10 python3 python ; do
|
||||
if ppath=`which $candidate`; then
|
||||
if ppath=`which $candidate 2>/dev/null`; then
|
||||
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
|
||||
# we check that this found executable can actually run
|
||||
if [ $($candidate --version &>/dev/null; echo ${PIPESTATUS}) -gt 0 ]; then continue; fi
|
||||
@@ -30,10 +30,11 @@ done
|
||||
if [ -z "$PYTHON" ]; then
|
||||
echo "A suitable Python interpreter could not be found"
|
||||
echo "Please install Python $MINIMUM_PYTHON_VERSION or higher (maximum $MAXIMUM_PYTHON_VERSION) before running this script. See instructions at $INSTRUCTIONS for help."
|
||||
echo "For the best user experience we suggest enlarging or maximizing this window now."
|
||||
read -p "Press any key to exit"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "For the best user experience we suggest enlarging or maximizing this window now."
|
||||
|
||||
exec $PYTHON ./lib/main.py ${@}
|
||||
read -p "Press any key to exit"
|
||||
|
||||
@@ -282,12 +282,6 @@ class InvokeAiInstance:
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
def remove(self):
|
||||
pass
|
||||
|
||||
|
||||
### Utility functions ###
|
||||
|
||||
@@ -402,7 +396,7 @@ def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
from messages import select_gpu
|
||||
from messages import GpuType, select_gpu
|
||||
|
||||
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||
device = select_gpu()
|
||||
@@ -412,15 +406,21 @@ def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
url = None
|
||||
optional_modules: str | None = None
|
||||
if OS == "Linux":
|
||||
if device.value == "rocm":
|
||||
if device == GpuType.ROCM:
|
||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||
elif device.value == "cpu":
|
||||
elif device == GpuType.CPU:
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
elif device.value == "cuda":
|
||||
# CUDA uses the default PyPi index
|
||||
elif device == GpuType.CUDA:
|
||||
url = "https://download.pytorch.org/whl/cu124"
|
||||
optional_modules = "[onnx-cuda]"
|
||||
elif device == GpuType.CUDA_WITH_XFORMERS:
|
||||
url = "https://download.pytorch.org/whl/cu124"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
elif OS == "Windows":
|
||||
if device.value == "cuda":
|
||||
if device == GpuType.CUDA:
|
||||
url = "https://download.pytorch.org/whl/cu124"
|
||||
optional_modules = "[onnx-cuda]"
|
||||
elif device == GpuType.CUDA_WITH_XFORMERS:
|
||||
url = "https://download.pytorch.org/whl/cu124"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
elif device.value == "cpu":
|
||||
|
||||
@@ -206,6 +206,7 @@ def dest_path(dest: Optional[str | Path] = None) -> Path | None:
|
||||
|
||||
|
||||
class GpuType(Enum):
|
||||
CUDA_WITH_XFORMERS = "xformers"
|
||||
CUDA = "cuda"
|
||||
ROCM = "rocm"
|
||||
CPU = "cpu"
|
||||
@@ -221,11 +222,15 @@ def select_gpu() -> GpuType:
|
||||
return GpuType.CPU
|
||||
|
||||
nvidia = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||
"an [gold1 b]NVIDIA[/] RTX 3060 or newer GPU using CUDA",
|
||||
GpuType.CUDA,
|
||||
)
|
||||
vintage_nvidia = (
|
||||
"an [gold1 b]NVIDIA[/] RTX 20xx or older GPU using CUDA+xFormers",
|
||||
GpuType.CUDA_WITH_XFORMERS,
|
||||
)
|
||||
amd = (
|
||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||
"an [gold1 b]AMD[/] GPU using ROCm",
|
||||
GpuType.ROCM,
|
||||
)
|
||||
cpu = (
|
||||
@@ -235,14 +240,13 @@ def select_gpu() -> GpuType:
|
||||
|
||||
options = []
|
||||
if OS == "Windows":
|
||||
options = [nvidia, cpu]
|
||||
options = [nvidia, vintage_nvidia, cpu]
|
||||
if OS == "Linux":
|
||||
options = [nvidia, amd, cpu]
|
||||
options = [nvidia, vintage_nvidia, amd, cpu]
|
||||
elif OS == "Darwin":
|
||||
options = [cpu]
|
||||
|
||||
if len(options) == 1:
|
||||
print(f'Your platform [gold1]{OS}-{ARCH}[/] only supports the "{options[0][1]}" driver. Proceeding with that.')
|
||||
return options[0][1]
|
||||
|
||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||
|
||||
@@ -5,9 +5,10 @@ from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy
|
||||
from invokeai.app.services.boards.boards_common import BoardDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
|
||||
boards_router = APIRouter(prefix="/v1/boards", tags=["boards"])
|
||||
|
||||
@@ -115,6 +116,8 @@ async def delete_board(
|
||||
response_model=Union[OffsetPaginatedResults[BoardDTO], list[BoardDTO]],
|
||||
)
|
||||
async def list_boards(
|
||||
order_by: BoardRecordOrderBy = Query(default=BoardRecordOrderBy.CreatedAt, description="The attribute to order by"),
|
||||
direction: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The direction to order by"),
|
||||
all: Optional[bool] = Query(default=None, description="Whether to list all boards"),
|
||||
offset: Optional[int] = Query(default=None, description="The page offset"),
|
||||
limit: Optional[int] = Query(default=None, description="The number of boards per page"),
|
||||
@@ -122,9 +125,9 @@ async def list_boards(
|
||||
) -> Union[OffsetPaginatedResults[BoardDTO], list[BoardDTO]]:
|
||||
"""Gets a list of boards"""
|
||||
if all:
|
||||
return ApiDependencies.invoker.services.boards.get_all(include_archived)
|
||||
return ApiDependencies.invoker.services.boards.get_all(order_by, direction, include_archived)
|
||||
elif offset is not None and limit is not None:
|
||||
return ApiDependencies.invoker.services.boards.get_many(offset, limit, include_archived)
|
||||
return ApiDependencies.invoker.services.boards.get_many(order_by, direction, offset, limit, include_archived)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
|
||||
@@ -38,7 +38,12 @@ from invokeai.backend.model_manager.load.model_cache.model_cache_base import Cac
|
||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel, StarterModelWithoutDependencies
|
||||
from invokeai.backend.model_manager.starter_models import (
|
||||
STARTER_BUNDLES,
|
||||
STARTER_MODELS,
|
||||
StarterModel,
|
||||
StarterModelWithoutDependencies,
|
||||
)
|
||||
|
||||
model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"])
|
||||
|
||||
@@ -792,22 +797,48 @@ async def convert_model(
|
||||
return new_config
|
||||
|
||||
|
||||
@model_manager_router.get("/starter_models", operation_id="get_starter_models", response_model=list[StarterModel])
|
||||
async def get_starter_models() -> list[StarterModel]:
|
||||
class StarterModelResponse(BaseModel):
|
||||
starter_models: list[StarterModel]
|
||||
starter_bundles: dict[str, list[StarterModel]]
|
||||
|
||||
|
||||
def get_is_installed(
|
||||
starter_model: StarterModel | StarterModelWithoutDependencies, installed_models: list[AnyModelConfig]
|
||||
) -> bool:
|
||||
for model in installed_models:
|
||||
if model.source == starter_model.source:
|
||||
return True
|
||||
if model.name == starter_model.name and model.base == starter_model.base and model.type == starter_model.type:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@model_manager_router.get("/starter_models", operation_id="get_starter_models", response_model=StarterModelResponse)
|
||||
async def get_starter_models() -> StarterModelResponse:
|
||||
installed_models = ApiDependencies.invoker.services.model_manager.store.search_by_attr()
|
||||
installed_model_sources = {m.source for m in installed_models}
|
||||
starter_models = deepcopy(STARTER_MODELS)
|
||||
starter_bundles = deepcopy(STARTER_BUNDLES)
|
||||
for model in starter_models:
|
||||
if model.source in installed_model_sources:
|
||||
model.is_installed = True
|
||||
model.is_installed = get_is_installed(model, installed_models)
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[StarterModelWithoutDependencies] = []
|
||||
|
||||
for dep in model.dependencies or []:
|
||||
if dep.source not in installed_model_sources:
|
||||
if not get_is_installed(dep, installed_models):
|
||||
missing_deps.append(dep)
|
||||
model.dependencies = missing_deps
|
||||
|
||||
return starter_models
|
||||
for bundle in starter_bundles.values():
|
||||
for model in bundle:
|
||||
model.is_installed = get_is_installed(model, installed_models)
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[StarterModelWithoutDependencies] = []
|
||||
for dep in model.dependencies or []:
|
||||
if not get_is_installed(dep, installed_models):
|
||||
missing_deps.append(dep)
|
||||
model.dependencies = missing_deps
|
||||
|
||||
return StarterModelResponse(starter_models=starter_models, starter_bundles=starter_bundles)
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
|
||||
@@ -88,7 +88,7 @@ async def list_workflows(
|
||||
default=WorkflowRecordOrderBy.Name, description="The attribute to order by"
|
||||
),
|
||||
direction: SQLiteDirection = Query(default=SQLiteDirection.Ascending, description="The direction to order by"),
|
||||
category: Optional[WorkflowCategory] = Query(default=None, description="The category of workflow to get"),
|
||||
category: WorkflowCategory = Query(default=WorkflowCategory.User, description="The category of workflow to get"),
|
||||
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
"""Gets a page of workflows"""
|
||||
|
||||
@@ -192,6 +192,7 @@ class FieldDescriptions:
|
||||
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
|
||||
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
|
||||
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
|
||||
instantx_control_mode = "The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'."
|
||||
|
||||
|
||||
class ImageField(BaseModel):
|
||||
|
||||
99
invokeai/app/invocations/flux_controlnet.py
Normal file
99
invokeai/app/invocations/flux_controlnet.py
Normal file
@@ -0,0 +1,99 @@
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
|
||||
|
||||
|
||||
class FluxControlNetField(BaseModel):
|
||||
image: ImageField = Field(description="The control image")
|
||||
control_model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
||||
control_weight: float | list[float] = Field(default=1, description="The weight given to the ControlNet")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = Field(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
instantx_control_mode: int | None = Field(default=-1, description=FieldDescriptions.instantx_control_mode)
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v: float | list[float]) -> float | list[float]:
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("flux_controlnet_output")
|
||||
class FluxControlNetOutput(BaseInvocationOutput):
|
||||
"""FLUX ControlNet info"""
|
||||
|
||||
control: FluxControlNetField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_controlnet",
|
||||
title="FLUX ControlNet",
|
||||
tags=["controlnet", "flux"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxControlNetInvocation(BaseInvocation):
|
||||
"""Collect FLUX ControlNet info to pass to other nodes."""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
|
||||
)
|
||||
control_weight: float | list[float] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
||||
# Note: We default to -1 instead of None, because in the workflow editor UI None is not currently supported.
|
||||
instantx_control_mode: int | None = InputField(default=-1, description=FieldDescriptions.instantx_control_mode)
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v: float | list[float]) -> float | list[float]:
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxControlNetOutput:
|
||||
return FluxControlNetOutput(
|
||||
control=FluxControlNetField(
|
||||
image=self.image,
|
||||
control_model=self.control_model,
|
||||
control_weight=self.control_weight,
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
resize_mode=self.resize_mode,
|
||||
instantx_control_mode=self.instantx_control_mode,
|
||||
),
|
||||
)
|
||||
@@ -16,11 +16,16 @@ from invokeai.app.invocations.fields import (
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField
|
||||
from invokeai.app.invocations.flux_controlnet import FluxControlNetField
|
||||
from invokeai.app.invocations.model import TransformerField, VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXControlNetFlux
|
||||
from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlNetFlux
|
||||
from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.sampling_utils import (
|
||||
clip_timestep_schedule_fractional,
|
||||
@@ -44,7 +49,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.0.0",
|
||||
version="3.1.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@@ -87,6 +92,14 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
|
||||
)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
control: FluxControlNetField | list[FluxControlNetField] | None = InputField(
|
||||
default=None, input=Input.Connection, description="ControlNet models."
|
||||
)
|
||||
controlnet_vae: VAEField | None = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
@@ -167,8 +180,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
inpaint_mask = self._prep_inpaint_mask(context, x)
|
||||
|
||||
b, _c, h, w = x.shape
|
||||
img_ids = generate_img_ids(h=h, w=w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
b, _c, latent_h, latent_w = x.shape
|
||||
img_ids = generate_img_ids(h=latent_h, w=latent_w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
|
||||
bs, t5_seq_len, _ = t5_embeddings.shape
|
||||
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
||||
@@ -192,12 +205,21 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
with (
|
||||
transformer_info.model_on_device() as (cached_weights, transformer),
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
assert isinstance(transformer, Flux)
|
||||
with ExitStack() as exit_stack:
|
||||
# Prepare ControlNet extensions.
|
||||
# Note: We do this before loading the transformer model to minimize peak memory (see implementation).
|
||||
controlnet_extensions = self._prep_controlnet_extensions(
|
||||
context=context,
|
||||
exit_stack=exit_stack,
|
||||
latent_height=latent_h,
|
||||
latent_width=latent_w,
|
||||
dtype=inference_dtype,
|
||||
device=x.device,
|
||||
)
|
||||
|
||||
# Load the transformer model.
|
||||
(cached_weights, transformer) = exit_stack.enter_context(transformer_info.model_on_device())
|
||||
assert isinstance(transformer, Flux)
|
||||
config = transformer_info.config
|
||||
assert config is not None
|
||||
|
||||
@@ -242,6 +264,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
step_callback=self._build_step_callback(context),
|
||||
guidance=self.guidance,
|
||||
inpaint_extension=inpaint_extension,
|
||||
controlnet_extensions=controlnet_extensions,
|
||||
)
|
||||
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
@@ -288,6 +311,104 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# `latents`.
|
||||
return mask.expand_as(latents)
|
||||
|
||||
def _prep_controlnet_extensions(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
exit_stack: ExitStack,
|
||||
latent_height: int,
|
||||
latent_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> list[XLabsControlNetExtension | InstantXControlNetExtension]:
|
||||
# Normalize the controlnet input to list[ControlField].
|
||||
controlnets: list[FluxControlNetField]
|
||||
if self.control is None:
|
||||
controlnets = []
|
||||
elif isinstance(self.control, FluxControlNetField):
|
||||
controlnets = [self.control]
|
||||
elif isinstance(self.control, list):
|
||||
controlnets = self.control
|
||||
else:
|
||||
raise ValueError(f"Unsupported controlnet type: {type(self.control)}")
|
||||
|
||||
# TODO(ryand): Add a field to the model config so that we can distinguish between XLabs and InstantX ControlNets
|
||||
# before loading the models. Then make sure that all VAE encoding is done before loading the ControlNets to
|
||||
# minimize peak memory.
|
||||
|
||||
# First, load the ControlNet models so that we can determine the ControlNet types.
|
||||
controlnet_models = [context.models.load(controlnet.control_model) for controlnet in controlnets]
|
||||
|
||||
# Calculate the controlnet conditioning tensors.
|
||||
# We do this before loading the ControlNet models because it may require running the VAE, and we are trying to
|
||||
# keep peak memory down.
|
||||
controlnet_conds: list[torch.Tensor] = []
|
||||
for controlnet, controlnet_model in zip(controlnets, controlnet_models, strict=True):
|
||||
image = context.images.get_pil(controlnet.image.image_name)
|
||||
if isinstance(controlnet_model.model, InstantXControlNetFlux):
|
||||
if self.controlnet_vae is None:
|
||||
raise ValueError("A ControlNet VAE is required when using an InstantX FLUX ControlNet.")
|
||||
vae_info = context.models.load(self.controlnet_vae.vae)
|
||||
controlnet_conds.append(
|
||||
InstantXControlNetExtension.prepare_controlnet_cond(
|
||||
controlnet_image=image,
|
||||
vae_info=vae_info,
|
||||
latent_height=latent_height,
|
||||
latent_width=latent_width,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
resize_mode=controlnet.resize_mode,
|
||||
)
|
||||
)
|
||||
elif isinstance(controlnet_model.model, XLabsControlNetFlux):
|
||||
controlnet_conds.append(
|
||||
XLabsControlNetExtension.prepare_controlnet_cond(
|
||||
controlnet_image=image,
|
||||
latent_height=latent_height,
|
||||
latent_width=latent_width,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
resize_mode=controlnet.resize_mode,
|
||||
)
|
||||
)
|
||||
|
||||
# Finally, load the ControlNet models and initialize the ControlNet extensions.
|
||||
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension] = []
|
||||
for controlnet, controlnet_cond, controlnet_model in zip(
|
||||
controlnets, controlnet_conds, controlnet_models, strict=True
|
||||
):
|
||||
model = exit_stack.enter_context(controlnet_model)
|
||||
|
||||
if isinstance(model, XLabsControlNetFlux):
|
||||
controlnet_extensions.append(
|
||||
XLabsControlNetExtension(
|
||||
model=model,
|
||||
controlnet_cond=controlnet_cond,
|
||||
weight=controlnet.control_weight,
|
||||
begin_step_percent=controlnet.begin_step_percent,
|
||||
end_step_percent=controlnet.end_step_percent,
|
||||
)
|
||||
)
|
||||
elif isinstance(model, InstantXControlNetFlux):
|
||||
instantx_control_mode: torch.Tensor | None = None
|
||||
if controlnet.instantx_control_mode is not None and controlnet.instantx_control_mode >= 0:
|
||||
instantx_control_mode = torch.tensor(controlnet.instantx_control_mode, dtype=torch.long)
|
||||
instantx_control_mode = instantx_control_mode.reshape([-1, 1])
|
||||
|
||||
controlnet_extensions.append(
|
||||
InstantXControlNetExtension(
|
||||
model=model,
|
||||
controlnet_cond=controlnet_cond,
|
||||
instantx_control_mode=instantx_control_mode,
|
||||
weight=controlnet.control_weight,
|
||||
begin_step_percent=controlnet.begin_step_percent,
|
||||
end_step_percent=controlnet.end_step_percent,
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported ControlNet model type: {type(model)}")
|
||||
|
||||
return controlnet_extensions
|
||||
|
||||
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in self.transformer.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecord
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecord, BoardRecordOrderBy
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
|
||||
|
||||
class BoardRecordStorageBase(ABC):
|
||||
@@ -39,12 +40,19 @@ class BoardRecordStorageBase(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def get_many(
|
||||
self, offset: int = 0, limit: int = 10, include_archived: bool = False
|
||||
self,
|
||||
order_by: BoardRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
include_archived: bool = False,
|
||||
) -> OffsetPaginatedResults[BoardRecord]:
|
||||
"""Gets many board records."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_all(self, include_archived: bool = False) -> list[BoardRecord]:
|
||||
def get_all(
|
||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||
) -> list[BoardRecord]:
|
||||
"""Gets all board records."""
|
||||
pass
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
from invokeai.app.util.misc import get_iso_timestamp
|
||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
|
||||
@@ -60,6 +62,13 @@ class BoardChanges(BaseModel, extra="forbid"):
|
||||
archived: Optional[bool] = Field(default=None, description="Whether or not the board is archived")
|
||||
|
||||
|
||||
class BoardRecordOrderBy(str, Enum, metaclass=MetaEnum):
|
||||
"""The order by options for board records"""
|
||||
|
||||
CreatedAt = "created_at"
|
||||
Name = "board_name"
|
||||
|
||||
|
||||
class BoardRecordNotFoundException(Exception):
|
||||
"""Raised when an board record is not found."""
|
||||
|
||||
|
||||
@@ -8,10 +8,12 @@ from invokeai.app.services.board_records.board_records_common import (
|
||||
BoardRecord,
|
||||
BoardRecordDeleteException,
|
||||
BoardRecordNotFoundException,
|
||||
BoardRecordOrderBy,
|
||||
BoardRecordSaveException,
|
||||
deserialize_board_record,
|
||||
)
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
@@ -144,7 +146,12 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
return self.get(board_id)
|
||||
|
||||
def get_many(
|
||||
self, offset: int = 0, limit: int = 10, include_archived: bool = False
|
||||
self,
|
||||
order_by: BoardRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
include_archived: bool = False,
|
||||
) -> OffsetPaginatedResults[BoardRecord]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
@@ -154,17 +161,16 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY created_at DESC
|
||||
ORDER BY {order_by} {direction}
|
||||
LIMIT ? OFFSET ?;
|
||||
"""
|
||||
|
||||
# Determine archived filter condition
|
||||
if include_archived:
|
||||
archived_filter = ""
|
||||
else:
|
||||
archived_filter = "WHERE archived = 0"
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
final_query = base_query.format(archived_filter=archived_filter)
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
# Execute query to fetch boards
|
||||
self._cursor.execute(final_query, (limit, offset))
|
||||
@@ -198,23 +204,32 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_all(self, include_archived: bool = False) -> list[BoardRecord]:
|
||||
def get_all(
|
||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||
) -> list[BoardRecord]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY created_at DESC
|
||||
"""
|
||||
|
||||
if include_archived:
|
||||
archived_filter = ""
|
||||
if order_by == BoardRecordOrderBy.Name:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY LOWER(board_name) {direction}
|
||||
"""
|
||||
else:
|
||||
archived_filter = "WHERE archived = 0"
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY {order_by} {direction}
|
||||
"""
|
||||
|
||||
final_query = base_query.format(archived_filter=archived_filter)
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
self._cursor.execute(final_query)
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy
|
||||
from invokeai.app.services.boards.boards_common import BoardDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
|
||||
|
||||
class BoardServiceABC(ABC):
|
||||
@@ -43,12 +44,19 @@ class BoardServiceABC(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def get_many(
|
||||
self, offset: int = 0, limit: int = 10, include_archived: bool = False
|
||||
self,
|
||||
order_by: BoardRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
include_archived: bool = False,
|
||||
) -> OffsetPaginatedResults[BoardDTO]:
|
||||
"""Gets many boards."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_all(self, include_archived: bool = False) -> list[BoardDTO]:
|
||||
def get_all(
|
||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||
) -> list[BoardDTO]:
|
||||
"""Gets all boards."""
|
||||
pass
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges
|
||||
from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy
|
||||
from invokeai.app.services.boards.boards_base import BoardServiceABC
|
||||
from invokeai.app.services.boards.boards_common import BoardDTO, board_record_to_dto
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
|
||||
|
||||
class BoardService(BoardServiceABC):
|
||||
@@ -47,9 +48,16 @@ class BoardService(BoardServiceABC):
|
||||
self.__invoker.services.board_records.delete(board_id)
|
||||
|
||||
def get_many(
|
||||
self, offset: int = 0, limit: int = 10, include_archived: bool = False
|
||||
self,
|
||||
order_by: BoardRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
include_archived: bool = False,
|
||||
) -> OffsetPaginatedResults[BoardDTO]:
|
||||
board_records = self.__invoker.services.board_records.get_many(offset, limit, include_archived)
|
||||
board_records = self.__invoker.services.board_records.get_many(
|
||||
order_by, direction, offset, limit, include_archived
|
||||
)
|
||||
board_dtos = []
|
||||
for r in board_records.items:
|
||||
cover_image = self.__invoker.services.image_records.get_most_recent_image_for_board(r.board_id)
|
||||
@@ -63,8 +71,10 @@ class BoardService(BoardServiceABC):
|
||||
|
||||
return OffsetPaginatedResults[BoardDTO](items=board_dtos, offset=offset, limit=limit, total=len(board_dtos))
|
||||
|
||||
def get_all(self, include_archived: bool = False) -> list[BoardDTO]:
|
||||
board_records = self.__invoker.services.board_records.get_all(include_archived)
|
||||
def get_all(
|
||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||
) -> list[BoardDTO]:
|
||||
board_records = self.__invoker.services.board_records.get_all(order_by, direction, include_archived)
|
||||
board_dtos = []
|
||||
for r in board_records:
|
||||
cover_image = self.__invoker.services.image_records.get_most_recent_image_for_board(r.board_id)
|
||||
|
||||
@@ -110,15 +110,26 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
except Exception as e:
|
||||
raise ImageFileDeleteException from e
|
||||
|
||||
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||
def get_path(self, image_name: str, thumbnail: bool = False) -> Path:
|
||||
path = self.__output_folder / image_name
|
||||
base_folder = self.__thumbnails_folder if thumbnail else self.__output_folder
|
||||
filename = get_thumbnail_name(image_name) if thumbnail else image_name
|
||||
|
||||
if thumbnail:
|
||||
thumbnail_name = get_thumbnail_name(image_name)
|
||||
path = self.__thumbnails_folder / thumbnail_name
|
||||
# Strip any path information from the filename
|
||||
basename = Path(filename).name
|
||||
|
||||
return path
|
||||
if basename != filename:
|
||||
raise ValueError("Invalid image name, potential directory traversal detected")
|
||||
|
||||
image_path = base_folder / basename
|
||||
|
||||
# Ensure the image path is within the base folder to prevent directory traversal
|
||||
resolved_base = base_folder.resolve()
|
||||
resolved_image_path = image_path.resolve()
|
||||
|
||||
if not resolved_image_path.is_relative_to(resolved_base):
|
||||
raise ValueError("Image path outside outputs folder, potential directory traversal detected")
|
||||
|
||||
return resolved_image_path
|
||||
|
||||
def validate_path(self, path: Union[str, Path]) -> bool:
|
||||
"""Validates the path given for an image or thumbnail."""
|
||||
|
||||
@@ -41,9 +41,9 @@ class WorkflowRecordsStorageBase(ABC):
|
||||
self,
|
||||
order_by: WorkflowRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
category: WorkflowCategory,
|
||||
page: int,
|
||||
per_page: Optional[int],
|
||||
category: Optional[WorkflowCategory],
|
||||
query: Optional[str],
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
"""Gets many workflows."""
|
||||
|
||||
@@ -127,9 +127,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
self,
|
||||
order_by: WorkflowRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
category: WorkflowCategory,
|
||||
page: int = 0,
|
||||
per_page: Optional[int] = None,
|
||||
category: Optional[WorkflowCategory] = None,
|
||||
query: Optional[str] = None,
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
try:
|
||||
@@ -137,7 +137,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
# sanitize!
|
||||
assert order_by in WorkflowRecordOrderBy
|
||||
assert direction in SQLiteDirection
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library"
|
||||
assert category in WorkflowCategory
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
@@ -148,26 +149,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
updated_at,
|
||||
opened_at
|
||||
FROM workflow_library
|
||||
WHERE category = ?
|
||||
"""
|
||||
main_params: list[int | str] = []
|
||||
count_params: list[int | str] = []
|
||||
|
||||
if category:
|
||||
assert category in WorkflowCategory
|
||||
main_query += " WHERE category = ?"
|
||||
count_query += " WHERE category = ?"
|
||||
main_params.append(category.value)
|
||||
count_params.append(category.value)
|
||||
main_params: list[int | str] = [category.value]
|
||||
count_params: list[int | str] = [category.value]
|
||||
|
||||
stripped_query = query.strip() if query else None
|
||||
if stripped_query:
|
||||
wildcard_query = "%" + stripped_query + "%"
|
||||
if "WHERE" in main_query:
|
||||
main_query += " AND (name LIKE ? OR description LIKE ?)"
|
||||
count_query += " AND (name LIKE ? OR description LIKE ?)"
|
||||
else:
|
||||
main_query += " WHERE name LIKE ? OR description LIKE ?"
|
||||
count_query += " WHERE name LIKE ? OR description LIKE ?"
|
||||
main_query += " AND name LIKE ? OR description LIKE ? "
|
||||
count_query += " AND name LIKE ? OR description LIKE ?;"
|
||||
main_params.extend([wildcard_query, wildcard_query])
|
||||
count_params.extend([wildcard_query, wildcard_query])
|
||||
|
||||
|
||||
0
invokeai/backend/flux/controlnet/__init__.py
Normal file
0
invokeai/backend/flux/controlnet/__init__.py
Normal file
58
invokeai/backend/flux/controlnet/controlnet_flux_output.py
Normal file
58
invokeai/backend/flux/controlnet/controlnet_flux_output.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@dataclass
|
||||
class ControlNetFluxOutput:
|
||||
single_block_residuals: list[torch.Tensor] | None
|
||||
double_block_residuals: list[torch.Tensor] | None
|
||||
|
||||
def apply_weight(self, weight: float):
|
||||
if self.single_block_residuals is not None:
|
||||
for i in range(len(self.single_block_residuals)):
|
||||
self.single_block_residuals[i] = self.single_block_residuals[i] * weight
|
||||
if self.double_block_residuals is not None:
|
||||
for i in range(len(self.double_block_residuals)):
|
||||
self.double_block_residuals[i] = self.double_block_residuals[i] * weight
|
||||
|
||||
|
||||
def add_tensor_lists_elementwise(
|
||||
list1: list[torch.Tensor] | None, list2: list[torch.Tensor] | None
|
||||
) -> list[torch.Tensor] | None:
|
||||
"""Add two tensor lists elementwise that could be None."""
|
||||
if list1 is None and list2 is None:
|
||||
return None
|
||||
if list1 is None:
|
||||
return list2
|
||||
if list2 is None:
|
||||
return list1
|
||||
|
||||
new_list: list[torch.Tensor] = []
|
||||
for list1_tensor, list2_tensor in zip(list1, list2, strict=True):
|
||||
new_list.append(list1_tensor + list2_tensor)
|
||||
return new_list
|
||||
|
||||
|
||||
def add_controlnet_flux_outputs(
|
||||
controlnet_output_1: ControlNetFluxOutput, controlnet_output_2: ControlNetFluxOutput
|
||||
) -> ControlNetFluxOutput:
|
||||
return ControlNetFluxOutput(
|
||||
single_block_residuals=add_tensor_lists_elementwise(
|
||||
controlnet_output_1.single_block_residuals, controlnet_output_2.single_block_residuals
|
||||
),
|
||||
double_block_residuals=add_tensor_lists_elementwise(
|
||||
controlnet_output_1.double_block_residuals, controlnet_output_2.double_block_residuals
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def sum_controlnet_flux_outputs(
|
||||
controlnet_outputs: list[ControlNetFluxOutput],
|
||||
) -> ControlNetFluxOutput:
|
||||
controlnet_output_sum = ControlNetFluxOutput(single_block_residuals=None, double_block_residuals=None)
|
||||
|
||||
for controlnet_output in controlnet_outputs:
|
||||
controlnet_output_sum = add_controlnet_flux_outputs(controlnet_output_sum, controlnet_output)
|
||||
|
||||
return controlnet_output_sum
|
||||
180
invokeai/backend/flux/controlnet/instantx_controlnet_flux.py
Normal file
180
invokeai/backend/flux/controlnet/instantx_controlnet_flux.py
Normal file
@@ -0,0 +1,180 @@
|
||||
# This file was initially copied from:
|
||||
# https://github.com/huggingface/diffusers/blob/99f608218caa069a2f16dcf9efab46959b15aec0/src/diffusers/models/controlnet_flux.py
|
||||
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from invokeai.backend.flux.controlnet.zero_module import zero_module
|
||||
from invokeai.backend.flux.model import FluxParams
|
||||
from invokeai.backend.flux.modules.layers import (
|
||||
DoubleStreamBlock,
|
||||
EmbedND,
|
||||
MLPEmbedder,
|
||||
SingleStreamBlock,
|
||||
timestep_embedding,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class InstantXControlNetFluxOutput:
|
||||
controlnet_block_samples: list[torch.Tensor] | None
|
||||
controlnet_single_block_samples: list[torch.Tensor] | None
|
||||
|
||||
|
||||
# NOTE(ryand): Mapping between diffusers FLUX transformer params and BFL FLUX transformer params:
|
||||
# - Diffusers: BFL
|
||||
# - in_channels: in_channels
|
||||
# - num_layers: depth
|
||||
# - num_single_layers: depth_single_blocks
|
||||
# - attention_head_dim: hidden_size // num_heads
|
||||
# - num_attention_heads: num_heads
|
||||
# - joint_attention_dim: context_in_dim
|
||||
# - pooled_projection_dim: vec_in_dim
|
||||
# - guidance_embeds: guidance_embed
|
||||
# - axes_dims_rope: axes_dim
|
||||
|
||||
|
||||
class InstantXControlNetFlux(torch.nn.Module):
|
||||
def __init__(self, params: FluxParams, num_control_modes: int | None = None):
|
||||
"""
|
||||
Args:
|
||||
params (FluxParams): The parameters for the FLUX model.
|
||||
num_control_modes (int | None, optional): The number of controlnet modes. If non-None, then the model is a
|
||||
'union controlnet' model and expects a mode conditioning input at runtime.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
# The following modules mirror the base FLUX transformer model.
|
||||
# -------------------------------------------------------------
|
||||
self.params = params
|
||||
self.in_channels = params.in_channels
|
||||
self.out_channels = self.in_channels
|
||||
if params.hidden_size % params.num_heads != 0:
|
||||
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
|
||||
pe_dim = params.hidden_size // params.num_heads
|
||||
if sum(params.axes_dim) != pe_dim:
|
||||
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
|
||||
self.hidden_size = params.hidden_size
|
||||
self.num_heads = params.num_heads
|
||||
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
|
||||
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
|
||||
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
|
||||
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
|
||||
self.guidance_in = (
|
||||
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
|
||||
)
|
||||
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
|
||||
|
||||
self.double_blocks = nn.ModuleList(
|
||||
[
|
||||
DoubleStreamBlock(
|
||||
self.hidden_size,
|
||||
self.num_heads,
|
||||
mlp_ratio=params.mlp_ratio,
|
||||
qkv_bias=params.qkv_bias,
|
||||
)
|
||||
for _ in range(params.depth)
|
||||
]
|
||||
)
|
||||
|
||||
self.single_blocks = nn.ModuleList(
|
||||
[
|
||||
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio)
|
||||
for _ in range(params.depth_single_blocks)
|
||||
]
|
||||
)
|
||||
|
||||
# The following modules are specific to the ControlNet model.
|
||||
# -----------------------------------------------------------
|
||||
self.controlnet_blocks = nn.ModuleList([])
|
||||
for _ in range(len(self.double_blocks)):
|
||||
self.controlnet_blocks.append(zero_module(nn.Linear(self.hidden_size, self.hidden_size)))
|
||||
|
||||
self.controlnet_single_blocks = nn.ModuleList([])
|
||||
for _ in range(len(self.single_blocks)):
|
||||
self.controlnet_single_blocks.append(zero_module(nn.Linear(self.hidden_size, self.hidden_size)))
|
||||
|
||||
self.is_union = False
|
||||
if num_control_modes is not None:
|
||||
self.is_union = True
|
||||
self.controlnet_mode_embedder = nn.Embedding(num_control_modes, self.hidden_size)
|
||||
|
||||
self.controlnet_x_embedder = zero_module(torch.nn.Linear(self.in_channels, self.hidden_size))
|
||||
|
||||
def forward(
|
||||
self,
|
||||
controlnet_cond: torch.Tensor,
|
||||
controlnet_mode: torch.Tensor | None,
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
timesteps: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
guidance: torch.Tensor | None = None,
|
||||
) -> InstantXControlNetFluxOutput:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
|
||||
img = self.img_in(img)
|
||||
|
||||
# Add controlnet_cond embedding.
|
||||
img = img + self.controlnet_x_embedder(controlnet_cond)
|
||||
|
||||
vec = self.time_in(timestep_embedding(timesteps, 256))
|
||||
if self.params.guidance_embed:
|
||||
if guidance is None:
|
||||
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
|
||||
vec = vec + self.vector_in(y)
|
||||
txt = self.txt_in(txt)
|
||||
|
||||
# If this is a union ControlNet, then concat the control mode embedding to the T5 text embedding.
|
||||
if self.is_union:
|
||||
if controlnet_mode is None:
|
||||
# We allow users to enter 'None' as the controlnet_mode if they don't want to worry about this input.
|
||||
# We've chosen to use a zero-embedding in this case.
|
||||
zero_index = torch.zeros([1, 1], dtype=torch.long, device=txt.device)
|
||||
controlnet_mode_emb = torch.zeros_like(self.controlnet_mode_embedder(zero_index))
|
||||
else:
|
||||
controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode)
|
||||
txt = torch.cat([controlnet_mode_emb, txt], dim=1)
|
||||
txt_ids = torch.cat([txt_ids[:, :1, :], txt_ids], dim=1)
|
||||
else:
|
||||
assert controlnet_mode is None
|
||||
|
||||
ids = torch.cat((txt_ids, img_ids), dim=1)
|
||||
pe = self.pe_embedder(ids)
|
||||
|
||||
double_block_samples: list[torch.Tensor] = []
|
||||
for block in self.double_blocks:
|
||||
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
|
||||
double_block_samples.append(img)
|
||||
|
||||
img = torch.cat((txt, img), 1)
|
||||
|
||||
single_block_samples: list[torch.Tensor] = []
|
||||
for block in self.single_blocks:
|
||||
img = block(img, vec=vec, pe=pe)
|
||||
single_block_samples.append(img[:, txt.shape[1] :])
|
||||
|
||||
# ControlNet Block
|
||||
controlnet_double_block_samples: list[torch.Tensor] = []
|
||||
for double_block_sample, controlnet_block in zip(double_block_samples, self.controlnet_blocks, strict=True):
|
||||
double_block_sample = controlnet_block(double_block_sample)
|
||||
controlnet_double_block_samples.append(double_block_sample)
|
||||
|
||||
controlnet_single_block_samples: list[torch.Tensor] = []
|
||||
for single_block_sample, controlnet_block in zip(
|
||||
single_block_samples, self.controlnet_single_blocks, strict=True
|
||||
):
|
||||
single_block_sample = controlnet_block(single_block_sample)
|
||||
controlnet_single_block_samples.append(single_block_sample)
|
||||
|
||||
return InstantXControlNetFluxOutput(
|
||||
controlnet_block_samples=controlnet_double_block_samples or None,
|
||||
controlnet_single_block_samples=controlnet_single_block_samples or None,
|
||||
)
|
||||
295
invokeai/backend/flux/controlnet/state_dict_utils.py
Normal file
295
invokeai/backend/flux/controlnet/state_dict_utils.py
Normal file
@@ -0,0 +1,295 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.model import FluxParams
|
||||
|
||||
|
||||
def is_state_dict_xlabs_controlnet(sd: Dict[str, Any]) -> bool:
|
||||
"""Is the state dict for an XLabs ControlNet model?
|
||||
|
||||
This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision.
|
||||
"""
|
||||
# If all of the expected keys are present, then this is very likely an XLabs ControlNet model.
|
||||
expected_keys = {
|
||||
"controlnet_blocks.0.bias",
|
||||
"controlnet_blocks.0.weight",
|
||||
"input_hint_block.0.bias",
|
||||
"input_hint_block.0.weight",
|
||||
"pos_embed_input.bias",
|
||||
"pos_embed_input.weight",
|
||||
}
|
||||
|
||||
if expected_keys.issubset(sd.keys()):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_state_dict_instantx_controlnet(sd: Dict[str, Any]) -> bool:
|
||||
"""Is the state dict for an InstantX ControlNet model?
|
||||
|
||||
This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision.
|
||||
"""
|
||||
# If all of the expected keys are present, then this is very likely an InstantX ControlNet model.
|
||||
expected_keys = {
|
||||
"controlnet_blocks.0.bias",
|
||||
"controlnet_blocks.0.weight",
|
||||
"controlnet_x_embedder.bias",
|
||||
"controlnet_x_embedder.weight",
|
||||
}
|
||||
|
||||
if expected_keys.issubset(sd.keys()):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _fuse_weights(*t: torch.Tensor) -> torch.Tensor:
|
||||
"""Fuse weights along dimension 0.
|
||||
|
||||
Used to fuse q, k, v attention weights into a single qkv tensor when converting from diffusers to BFL format.
|
||||
"""
|
||||
# TODO(ryand): Double check dim=0 is correct.
|
||||
return torch.cat(t, dim=0)
|
||||
|
||||
|
||||
def _convert_flux_double_block_sd_from_diffusers_to_bfl_format(
|
||||
sd: Dict[str, torch.Tensor], double_block_index: int
|
||||
) -> Dict[str, torch.Tensor]:
|
||||
"""Convert the state dict for a double block from diffusers format to BFL format."""
|
||||
to_prefix = f"double_blocks.{double_block_index}"
|
||||
from_prefix = f"transformer_blocks.{double_block_index}"
|
||||
|
||||
new_sd: dict[str, torch.Tensor] = {}
|
||||
|
||||
# Check one key to determine if this block exists.
|
||||
if f"{from_prefix}.attn.add_q_proj.bias" not in sd:
|
||||
return new_sd
|
||||
|
||||
# txt_attn.qkv
|
||||
new_sd[f"{to_prefix}.txt_attn.qkv.bias"] = _fuse_weights(
|
||||
sd.pop(f"{from_prefix}.attn.add_q_proj.bias"),
|
||||
sd.pop(f"{from_prefix}.attn.add_k_proj.bias"),
|
||||
sd.pop(f"{from_prefix}.attn.add_v_proj.bias"),
|
||||
)
|
||||
new_sd[f"{to_prefix}.txt_attn.qkv.weight"] = _fuse_weights(
|
||||
sd.pop(f"{from_prefix}.attn.add_q_proj.weight"),
|
||||
sd.pop(f"{from_prefix}.attn.add_k_proj.weight"),
|
||||
sd.pop(f"{from_prefix}.attn.add_v_proj.weight"),
|
||||
)
|
||||
|
||||
# img_attn.qkv
|
||||
new_sd[f"{to_prefix}.img_attn.qkv.bias"] = _fuse_weights(
|
||||
sd.pop(f"{from_prefix}.attn.to_q.bias"),
|
||||
sd.pop(f"{from_prefix}.attn.to_k.bias"),
|
||||
sd.pop(f"{from_prefix}.attn.to_v.bias"),
|
||||
)
|
||||
new_sd[f"{to_prefix}.img_attn.qkv.weight"] = _fuse_weights(
|
||||
sd.pop(f"{from_prefix}.attn.to_q.weight"),
|
||||
sd.pop(f"{from_prefix}.attn.to_k.weight"),
|
||||
sd.pop(f"{from_prefix}.attn.to_v.weight"),
|
||||
)
|
||||
|
||||
# Handle basic 1-to-1 key conversions.
|
||||
key_map = {
|
||||
# img_attn
|
||||
"attn.norm_k.weight": "img_attn.norm.key_norm.scale",
|
||||
"attn.norm_q.weight": "img_attn.norm.query_norm.scale",
|
||||
"attn.to_out.0.weight": "img_attn.proj.weight",
|
||||
"attn.to_out.0.bias": "img_attn.proj.bias",
|
||||
# img_mlp
|
||||
"ff.net.0.proj.weight": "img_mlp.0.weight",
|
||||
"ff.net.0.proj.bias": "img_mlp.0.bias",
|
||||
"ff.net.2.weight": "img_mlp.2.weight",
|
||||
"ff.net.2.bias": "img_mlp.2.bias",
|
||||
# img_mod
|
||||
"norm1.linear.weight": "img_mod.lin.weight",
|
||||
"norm1.linear.bias": "img_mod.lin.bias",
|
||||
# txt_attn
|
||||
"attn.norm_added_q.weight": "txt_attn.norm.query_norm.scale",
|
||||
"attn.norm_added_k.weight": "txt_attn.norm.key_norm.scale",
|
||||
"attn.to_add_out.weight": "txt_attn.proj.weight",
|
||||
"attn.to_add_out.bias": "txt_attn.proj.bias",
|
||||
# txt_mlp
|
||||
"ff_context.net.0.proj.weight": "txt_mlp.0.weight",
|
||||
"ff_context.net.0.proj.bias": "txt_mlp.0.bias",
|
||||
"ff_context.net.2.weight": "txt_mlp.2.weight",
|
||||
"ff_context.net.2.bias": "txt_mlp.2.bias",
|
||||
# txt_mod
|
||||
"norm1_context.linear.weight": "txt_mod.lin.weight",
|
||||
"norm1_context.linear.bias": "txt_mod.lin.bias",
|
||||
}
|
||||
for from_key, to_key in key_map.items():
|
||||
new_sd[f"{to_prefix}.{to_key}"] = sd.pop(f"{from_prefix}.{from_key}")
|
||||
|
||||
return new_sd
|
||||
|
||||
|
||||
def _convert_flux_single_block_sd_from_diffusers_to_bfl_format(
|
||||
sd: Dict[str, torch.Tensor], single_block_index: int
|
||||
) -> Dict[str, torch.Tensor]:
|
||||
"""Convert the state dict for a single block from diffusers format to BFL format."""
|
||||
to_prefix = f"single_blocks.{single_block_index}"
|
||||
from_prefix = f"single_transformer_blocks.{single_block_index}"
|
||||
|
||||
new_sd: dict[str, torch.Tensor] = {}
|
||||
|
||||
# Check one key to determine if this block exists.
|
||||
if f"{from_prefix}.attn.to_q.bias" not in sd:
|
||||
return new_sd
|
||||
|
||||
# linear1 (qkv)
|
||||
new_sd[f"{to_prefix}.linear1.bias"] = _fuse_weights(
|
||||
sd.pop(f"{from_prefix}.attn.to_q.bias"),
|
||||
sd.pop(f"{from_prefix}.attn.to_k.bias"),
|
||||
sd.pop(f"{from_prefix}.attn.to_v.bias"),
|
||||
sd.pop(f"{from_prefix}.proj_mlp.bias"),
|
||||
)
|
||||
new_sd[f"{to_prefix}.linear1.weight"] = _fuse_weights(
|
||||
sd.pop(f"{from_prefix}.attn.to_q.weight"),
|
||||
sd.pop(f"{from_prefix}.attn.to_k.weight"),
|
||||
sd.pop(f"{from_prefix}.attn.to_v.weight"),
|
||||
sd.pop(f"{from_prefix}.proj_mlp.weight"),
|
||||
)
|
||||
|
||||
# Handle basic 1-to-1 key conversions.
|
||||
key_map = {
|
||||
# linear2
|
||||
"proj_out.weight": "linear2.weight",
|
||||
"proj_out.bias": "linear2.bias",
|
||||
# modulation
|
||||
"norm.linear.weight": "modulation.lin.weight",
|
||||
"norm.linear.bias": "modulation.lin.bias",
|
||||
# norm
|
||||
"attn.norm_k.weight": "norm.key_norm.scale",
|
||||
"attn.norm_q.weight": "norm.query_norm.scale",
|
||||
}
|
||||
for from_key, to_key in key_map.items():
|
||||
new_sd[f"{to_prefix}.{to_key}"] = sd.pop(f"{from_prefix}.{from_key}")
|
||||
|
||||
return new_sd
|
||||
|
||||
|
||||
def convert_diffusers_instantx_state_dict_to_bfl_format(sd: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
||||
"""Convert an InstantX ControlNet state dict to the format that can be loaded by our internal
|
||||
InstantXControlNetFlux model.
|
||||
|
||||
The original InstantX ControlNet model was developed to be used in diffusers. We have ported the original
|
||||
implementation to InstantXControlNetFlux to make it compatible with BFL-style models. This function converts the
|
||||
original state dict to the format expected by InstantXControlNetFlux.
|
||||
"""
|
||||
# Shallow copy sd so that we can pop keys from it without modifying the original.
|
||||
sd = sd.copy()
|
||||
|
||||
new_sd: dict[str, torch.Tensor] = {}
|
||||
|
||||
# Handle basic 1-to-1 key conversions.
|
||||
basic_key_map = {
|
||||
# Base model keys.
|
||||
# ----------------
|
||||
# txt_in keys.
|
||||
"context_embedder.bias": "txt_in.bias",
|
||||
"context_embedder.weight": "txt_in.weight",
|
||||
# guidance_in MLPEmbedder keys.
|
||||
"time_text_embed.guidance_embedder.linear_1.bias": "guidance_in.in_layer.bias",
|
||||
"time_text_embed.guidance_embedder.linear_1.weight": "guidance_in.in_layer.weight",
|
||||
"time_text_embed.guidance_embedder.linear_2.bias": "guidance_in.out_layer.bias",
|
||||
"time_text_embed.guidance_embedder.linear_2.weight": "guidance_in.out_layer.weight",
|
||||
# vector_in MLPEmbedder keys.
|
||||
"time_text_embed.text_embedder.linear_1.bias": "vector_in.in_layer.bias",
|
||||
"time_text_embed.text_embedder.linear_1.weight": "vector_in.in_layer.weight",
|
||||
"time_text_embed.text_embedder.linear_2.bias": "vector_in.out_layer.bias",
|
||||
"time_text_embed.text_embedder.linear_2.weight": "vector_in.out_layer.weight",
|
||||
# time_in MLPEmbedder keys.
|
||||
"time_text_embed.timestep_embedder.linear_1.bias": "time_in.in_layer.bias",
|
||||
"time_text_embed.timestep_embedder.linear_1.weight": "time_in.in_layer.weight",
|
||||
"time_text_embed.timestep_embedder.linear_2.bias": "time_in.out_layer.bias",
|
||||
"time_text_embed.timestep_embedder.linear_2.weight": "time_in.out_layer.weight",
|
||||
# img_in keys.
|
||||
"x_embedder.bias": "img_in.bias",
|
||||
"x_embedder.weight": "img_in.weight",
|
||||
}
|
||||
for old_key, new_key in basic_key_map.items():
|
||||
v = sd.pop(old_key, None)
|
||||
if v is not None:
|
||||
new_sd[new_key] = v
|
||||
|
||||
# Handle the double_blocks.
|
||||
block_index = 0
|
||||
while True:
|
||||
converted_double_block_sd = _convert_flux_double_block_sd_from_diffusers_to_bfl_format(sd, block_index)
|
||||
if len(converted_double_block_sd) == 0:
|
||||
break
|
||||
new_sd.update(converted_double_block_sd)
|
||||
block_index += 1
|
||||
|
||||
# Handle the single_blocks.
|
||||
block_index = 0
|
||||
while True:
|
||||
converted_singe_block_sd = _convert_flux_single_block_sd_from_diffusers_to_bfl_format(sd, block_index)
|
||||
if len(converted_singe_block_sd) == 0:
|
||||
break
|
||||
new_sd.update(converted_singe_block_sd)
|
||||
block_index += 1
|
||||
|
||||
# Transfer controlnet keys as-is.
|
||||
for k in list(sd.keys()):
|
||||
if k.startswith("controlnet_"):
|
||||
new_sd[k] = sd.pop(k)
|
||||
|
||||
# Assert that all keys have been handled.
|
||||
assert len(sd) == 0
|
||||
return new_sd
|
||||
|
||||
|
||||
def infer_flux_params_from_state_dict(sd: Dict[str, torch.Tensor]) -> FluxParams:
|
||||
"""Infer the FluxParams from the shape of a FLUX state dict. When a model is distributed in diffusers format, this
|
||||
information is all contained in the config.json file that accompanies the model. However, being apple to infer the
|
||||
params from the state dict enables us to load models (e.g. an InstantX ControlNet) from a single weight file.
|
||||
"""
|
||||
hidden_size = sd["img_in.weight"].shape[0]
|
||||
mlp_hidden_dim = sd["double_blocks.0.img_mlp.0.weight"].shape[0]
|
||||
# mlp_ratio is a float, but we treat it as an int here to avoid having to think about possible float precision
|
||||
# issues. In practice, mlp_ratio is usually 4.
|
||||
mlp_ratio = mlp_hidden_dim // hidden_size
|
||||
|
||||
head_dim = sd["double_blocks.0.img_attn.norm.query_norm.scale"].shape[0]
|
||||
num_heads = hidden_size // head_dim
|
||||
|
||||
# Count the number of double blocks.
|
||||
double_block_index = 0
|
||||
while f"double_blocks.{double_block_index}.img_attn.qkv.weight" in sd:
|
||||
double_block_index += 1
|
||||
|
||||
# Count the number of single blocks.
|
||||
single_block_index = 0
|
||||
while f"single_blocks.{single_block_index}.linear1.weight" in sd:
|
||||
single_block_index += 1
|
||||
|
||||
return FluxParams(
|
||||
in_channels=sd["img_in.weight"].shape[1],
|
||||
vec_in_dim=sd["vector_in.in_layer.weight"].shape[1],
|
||||
context_in_dim=sd["txt_in.weight"].shape[1],
|
||||
hidden_size=hidden_size,
|
||||
mlp_ratio=mlp_ratio,
|
||||
num_heads=num_heads,
|
||||
depth=double_block_index,
|
||||
depth_single_blocks=single_block_index,
|
||||
# axes_dim cannot be inferred from the state dict. The hard-coded value is correct for dev/schnell models.
|
||||
axes_dim=[16, 56, 56],
|
||||
# theta cannot be inferred from the state dict. The hard-coded value is correct for dev/schnell models.
|
||||
theta=10_000,
|
||||
qkv_bias="double_blocks.0.img_attn.qkv.bias" in sd,
|
||||
guidance_embed="guidance_in.in_layer.weight" in sd,
|
||||
)
|
||||
|
||||
|
||||
def infer_instantx_num_control_modes_from_state_dict(sd: Dict[str, torch.Tensor]) -> int | None:
|
||||
"""Infer the number of ControlNet Union modes from the shape of a InstantX ControlNet state dict.
|
||||
|
||||
Returns None if the model is not a ControlNet Union model. Otherwise returns the number of modes.
|
||||
"""
|
||||
mode_embedder_key = "controlnet_mode_embedder.weight"
|
||||
if mode_embedder_key not in sd:
|
||||
return None
|
||||
|
||||
return sd[mode_embedder_key].shape[0]
|
||||
130
invokeai/backend/flux/controlnet/xlabs_controlnet_flux.py
Normal file
130
invokeai/backend/flux/controlnet/xlabs_controlnet_flux.py
Normal file
@@ -0,0 +1,130 @@
|
||||
# This file was initially based on:
|
||||
# https://github.com/XLabs-AI/x-flux/blob/47495425dbed499be1e8e5a6e52628b07349cba2/src/flux/controlnet.py
|
||||
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
|
||||
from invokeai.backend.flux.controlnet.zero_module import zero_module
|
||||
from invokeai.backend.flux.model import FluxParams
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock, EmbedND, MLPEmbedder, timestep_embedding
|
||||
|
||||
|
||||
@dataclass
|
||||
class XLabsControlNetFluxOutput:
|
||||
controlnet_double_block_residuals: list[torch.Tensor] | None
|
||||
|
||||
|
||||
class XLabsControlNetFlux(torch.nn.Module):
|
||||
"""A ControlNet model for FLUX.
|
||||
|
||||
The architecture is very similar to the base FLUX model, with the following differences:
|
||||
- A `controlnet_depth` parameter is passed to control the number of double_blocks that the ControlNet is applied to.
|
||||
In order to keep the ControlNet small, this is typically much less than the depth of the base FLUX model.
|
||||
- There is a set of `controlnet_blocks` that are applied to the output of each double_block.
|
||||
"""
|
||||
|
||||
def __init__(self, params: FluxParams, controlnet_depth: int = 2):
|
||||
super().__init__()
|
||||
|
||||
self.params = params
|
||||
self.in_channels = params.in_channels
|
||||
self.out_channels = self.in_channels
|
||||
if params.hidden_size % params.num_heads != 0:
|
||||
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
|
||||
pe_dim = params.hidden_size // params.num_heads
|
||||
if sum(params.axes_dim) != pe_dim:
|
||||
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
|
||||
self.hidden_size = params.hidden_size
|
||||
self.num_heads = params.num_heads
|
||||
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
|
||||
self.img_in = torch.nn.Linear(self.in_channels, self.hidden_size, bias=True)
|
||||
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
|
||||
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
|
||||
self.guidance_in = (
|
||||
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else torch.nn.Identity()
|
||||
)
|
||||
self.txt_in = torch.nn.Linear(params.context_in_dim, self.hidden_size)
|
||||
|
||||
self.double_blocks = torch.nn.ModuleList(
|
||||
[
|
||||
DoubleStreamBlock(
|
||||
self.hidden_size,
|
||||
self.num_heads,
|
||||
mlp_ratio=params.mlp_ratio,
|
||||
qkv_bias=params.qkv_bias,
|
||||
)
|
||||
for _ in range(controlnet_depth)
|
||||
]
|
||||
)
|
||||
|
||||
# Add ControlNet blocks.
|
||||
self.controlnet_blocks = torch.nn.ModuleList([])
|
||||
for _ in range(controlnet_depth):
|
||||
controlnet_block = torch.nn.Linear(self.hidden_size, self.hidden_size)
|
||||
controlnet_block = zero_module(controlnet_block)
|
||||
self.controlnet_blocks.append(controlnet_block)
|
||||
self.pos_embed_input = torch.nn.Linear(self.in_channels, self.hidden_size, bias=True)
|
||||
self.input_hint_block = torch.nn.Sequential(
|
||||
torch.nn.Conv2d(3, 16, 3, padding=1),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Conv2d(16, 16, 3, padding=1),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Conv2d(16, 16, 3, padding=1, stride=2),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Conv2d(16, 16, 3, padding=1),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Conv2d(16, 16, 3, padding=1, stride=2),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Conv2d(16, 16, 3, padding=1),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Conv2d(16, 16, 3, padding=1, stride=2),
|
||||
torch.nn.SiLU(),
|
||||
zero_module(torch.nn.Conv2d(16, 16, 3, padding=1)),
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
controlnet_cond: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
timesteps: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
guidance: torch.Tensor | None = None,
|
||||
) -> XLabsControlNetFluxOutput:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
|
||||
# running on sequences img
|
||||
img = self.img_in(img)
|
||||
controlnet_cond = self.input_hint_block(controlnet_cond)
|
||||
controlnet_cond = rearrange(controlnet_cond, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
||||
controlnet_cond = self.pos_embed_input(controlnet_cond)
|
||||
img = img + controlnet_cond
|
||||
vec = self.time_in(timestep_embedding(timesteps, 256))
|
||||
if self.params.guidance_embed:
|
||||
if guidance is None:
|
||||
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
|
||||
vec = vec + self.vector_in(y)
|
||||
txt = self.txt_in(txt)
|
||||
|
||||
ids = torch.cat((txt_ids, img_ids), dim=1)
|
||||
pe = self.pe_embedder(ids)
|
||||
|
||||
block_res_samples: list[torch.Tensor] = []
|
||||
|
||||
for block in self.double_blocks:
|
||||
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
|
||||
block_res_samples.append(img)
|
||||
|
||||
controlnet_block_res_samples: list[torch.Tensor] = []
|
||||
for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks, strict=True):
|
||||
block_res_sample = controlnet_block(block_res_sample)
|
||||
controlnet_block_res_samples.append(block_res_sample)
|
||||
|
||||
return XLabsControlNetFluxOutput(controlnet_double_block_residuals=controlnet_block_res_samples)
|
||||
12
invokeai/backend/flux/controlnet/zero_module.py
Normal file
12
invokeai/backend/flux/controlnet/zero_module.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import TypeVar
|
||||
|
||||
import torch
|
||||
|
||||
T = TypeVar("T", bound=torch.nn.Module)
|
||||
|
||||
|
||||
def zero_module(module: T) -> T:
|
||||
"""Initialize the parameters of a module to zero."""
|
||||
for p in module.parameters():
|
||||
torch.nn.init.zeros_(p)
|
||||
return module
|
||||
@@ -3,7 +3,10 @@ from typing import Callable
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.flux.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput, sum_controlnet_flux_outputs
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
|
||||
@@ -21,6 +24,7 @@ def denoise(
|
||||
step_callback: Callable[[PipelineIntermediateState], None],
|
||||
guidance: float,
|
||||
inpaint_extension: InpaintExtension | None,
|
||||
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension],
|
||||
):
|
||||
# step 0 is the initial state
|
||||
total_steps = len(timesteps) - 1
|
||||
@@ -38,6 +42,30 @@ def denoise(
|
||||
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
||||
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
|
||||
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
||||
|
||||
# Run ControlNet models.
|
||||
controlnet_residuals: list[ControlNetFluxOutput] = []
|
||||
for controlnet_extension in controlnet_extensions:
|
||||
controlnet_residuals.append(
|
||||
controlnet_extension.run_controlnet(
|
||||
timestep_index=step - 1,
|
||||
total_num_timesteps=total_steps,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
)
|
||||
|
||||
# Merge the ControlNet residuals from multiple ControlNets.
|
||||
# TODO(ryand): We may want to alculate the sum just-in-time to keep peak memory low. Keep in mind, that the
|
||||
# controlnet_residuals datastructure is efficient in that it likely contains multiple references to the same
|
||||
# tensors. Calculating the sum materializes each tensor into its own instance.
|
||||
merged_controlnet_residuals = sum_controlnet_flux_outputs(controlnet_residuals)
|
||||
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
@@ -46,6 +74,8 @@ def denoise(
|
||||
y=vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
controlnet_double_block_residuals=merged_controlnet_residuals.double_block_residuals,
|
||||
controlnet_single_block_residuals=merged_controlnet_residuals.single_block_residuals,
|
||||
)
|
||||
|
||||
preview_img = img - t_curr * pred
|
||||
|
||||
0
invokeai/backend/flux/extensions/__init__.py
Normal file
0
invokeai/backend/flux/extensions/__init__.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import math
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Union
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput
|
||||
|
||||
|
||||
class BaseControlNetExtension(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
weight: Union[float, List[float]],
|
||||
begin_step_percent: float,
|
||||
end_step_percent: float,
|
||||
):
|
||||
self._weight = weight
|
||||
self._begin_step_percent = begin_step_percent
|
||||
self._end_step_percent = end_step_percent
|
||||
|
||||
def _get_weight(self, timestep_index: int, total_num_timesteps: int) -> float:
|
||||
first_step = math.floor(self._begin_step_percent * total_num_timesteps)
|
||||
last_step = math.ceil(self._end_step_percent * total_num_timesteps)
|
||||
|
||||
if timestep_index < first_step or timestep_index > last_step:
|
||||
return 0.0
|
||||
|
||||
if isinstance(self._weight, list):
|
||||
return self._weight[timestep_index]
|
||||
|
||||
return self._weight
|
||||
|
||||
@abstractmethod
|
||||
def run_controlnet(
|
||||
self,
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
timesteps: torch.Tensor,
|
||||
guidance: torch.Tensor | None,
|
||||
) -> ControlNetFluxOutput: ...
|
||||
@@ -0,0 +1,194 @@
|
||||
import math
|
||||
from typing import List, Union
|
||||
|
||||
import torch
|
||||
from PIL.Image import Image
|
||||
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES, prepare_control_image
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput
|
||||
from invokeai.backend.flux.controlnet.instantx_controlnet_flux import (
|
||||
InstantXControlNetFlux,
|
||||
InstantXControlNetFluxOutput,
|
||||
)
|
||||
from invokeai.backend.flux.extensions.base_controlnet_extension import BaseControlNetExtension
|
||||
from invokeai.backend.flux.sampling_utils import pack
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
|
||||
|
||||
class InstantXControlNetExtension(BaseControlNetExtension):
|
||||
def __init__(
|
||||
self,
|
||||
model: InstantXControlNetFlux,
|
||||
controlnet_cond: torch.Tensor,
|
||||
instantx_control_mode: torch.Tensor | None,
|
||||
weight: Union[float, List[float]],
|
||||
begin_step_percent: float,
|
||||
end_step_percent: float,
|
||||
):
|
||||
super().__init__(
|
||||
weight=weight,
|
||||
begin_step_percent=begin_step_percent,
|
||||
end_step_percent=end_step_percent,
|
||||
)
|
||||
self._model = model
|
||||
# The VAE-encoded and 'packed' control image to pass to the ControlNet model.
|
||||
self._controlnet_cond = controlnet_cond
|
||||
# TODO(ryand): Should we define an enum for the instantx_control_mode? Is it likely to change for future models?
|
||||
# The control mode for InstantX ControlNet union models.
|
||||
# See the values defined here: https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union#control-mode
|
||||
# Expected shape: (batch_size, 1), Expected dtype: torch.long
|
||||
# If None, a zero-embedding will be used.
|
||||
self._instantx_control_mode = instantx_control_mode
|
||||
|
||||
# TODO(ryand): Pass in these params if a new base transformer / InstantX ControlNet pair get released.
|
||||
self._flux_transformer_num_double_blocks = 19
|
||||
self._flux_transformer_num_single_blocks = 38
|
||||
|
||||
@classmethod
|
||||
def prepare_controlnet_cond(
|
||||
cls,
|
||||
controlnet_image: Image,
|
||||
vae_info: LoadedModel,
|
||||
latent_height: int,
|
||||
latent_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES,
|
||||
):
|
||||
image_height = latent_height * LATENT_SCALE_FACTOR
|
||||
image_width = latent_width * LATENT_SCALE_FACTOR
|
||||
|
||||
resized_controlnet_image = prepare_control_image(
|
||||
image=controlnet_image,
|
||||
do_classifier_free_guidance=False,
|
||||
width=image_width,
|
||||
height=image_height,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
control_mode="balanced",
|
||||
resize_mode=resize_mode,
|
||||
)
|
||||
|
||||
# Shift the image from [0, 1] to [-1, 1].
|
||||
resized_controlnet_image = resized_controlnet_image * 2 - 1
|
||||
|
||||
# Run VAE encoder.
|
||||
controlnet_cond = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=resized_controlnet_image)
|
||||
controlnet_cond = pack(controlnet_cond)
|
||||
|
||||
return controlnet_cond
|
||||
|
||||
@classmethod
|
||||
def from_controlnet_image(
|
||||
cls,
|
||||
model: InstantXControlNetFlux,
|
||||
controlnet_image: Image,
|
||||
instantx_control_mode: torch.Tensor | None,
|
||||
vae_info: LoadedModel,
|
||||
latent_height: int,
|
||||
latent_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES,
|
||||
weight: Union[float, List[float]],
|
||||
begin_step_percent: float,
|
||||
end_step_percent: float,
|
||||
):
|
||||
image_height = latent_height * LATENT_SCALE_FACTOR
|
||||
image_width = latent_width * LATENT_SCALE_FACTOR
|
||||
|
||||
resized_controlnet_image = prepare_control_image(
|
||||
image=controlnet_image,
|
||||
do_classifier_free_guidance=False,
|
||||
width=image_width,
|
||||
height=image_height,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
control_mode="balanced",
|
||||
resize_mode=resize_mode,
|
||||
)
|
||||
|
||||
# Shift the image from [0, 1] to [-1, 1].
|
||||
resized_controlnet_image = resized_controlnet_image * 2 - 1
|
||||
|
||||
# Run VAE encoder.
|
||||
controlnet_cond = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=resized_controlnet_image)
|
||||
controlnet_cond = pack(controlnet_cond)
|
||||
|
||||
return cls(
|
||||
model=model,
|
||||
controlnet_cond=controlnet_cond,
|
||||
instantx_control_mode=instantx_control_mode,
|
||||
weight=weight,
|
||||
begin_step_percent=begin_step_percent,
|
||||
end_step_percent=end_step_percent,
|
||||
)
|
||||
|
||||
def _instantx_output_to_controlnet_output(
|
||||
self, instantx_output: InstantXControlNetFluxOutput
|
||||
) -> ControlNetFluxOutput:
|
||||
# The `interval_control` logic here is based on
|
||||
# https://github.com/huggingface/diffusers/blob/31058cdaef63ca660a1a045281d156239fba8192/src/diffusers/models/transformers/transformer_flux.py#L507-L511
|
||||
|
||||
# Handle double block residuals.
|
||||
double_block_residuals: list[torch.Tensor] = []
|
||||
double_block_samples = instantx_output.controlnet_block_samples
|
||||
if double_block_samples:
|
||||
interval_control = self._flux_transformer_num_double_blocks / len(double_block_samples)
|
||||
interval_control = int(math.ceil(interval_control))
|
||||
for i in range(self._flux_transformer_num_double_blocks):
|
||||
double_block_residuals.append(double_block_samples[i // interval_control])
|
||||
|
||||
# Handle single block residuals.
|
||||
single_block_residuals: list[torch.Tensor] = []
|
||||
single_block_samples = instantx_output.controlnet_single_block_samples
|
||||
if single_block_samples:
|
||||
interval_control = self._flux_transformer_num_single_blocks / len(single_block_samples)
|
||||
interval_control = int(math.ceil(interval_control))
|
||||
for i in range(self._flux_transformer_num_single_blocks):
|
||||
single_block_residuals.append(single_block_samples[i // interval_control])
|
||||
|
||||
return ControlNetFluxOutput(
|
||||
double_block_residuals=double_block_residuals or None,
|
||||
single_block_residuals=single_block_residuals or None,
|
||||
)
|
||||
|
||||
def run_controlnet(
|
||||
self,
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
timesteps: torch.Tensor,
|
||||
guidance: torch.Tensor | None,
|
||||
) -> ControlNetFluxOutput:
|
||||
weight = self._get_weight(timestep_index=timestep_index, total_num_timesteps=total_num_timesteps)
|
||||
if weight < 1e-6:
|
||||
return ControlNetFluxOutput(single_block_residuals=None, double_block_residuals=None)
|
||||
|
||||
# Make sure inputs have correct device and dtype.
|
||||
self._controlnet_cond = self._controlnet_cond.to(device=img.device, dtype=img.dtype)
|
||||
self._instantx_control_mode = (
|
||||
self._instantx_control_mode.to(device=img.device) if self._instantx_control_mode is not None else None
|
||||
)
|
||||
|
||||
instantx_output: InstantXControlNetFluxOutput = self._model(
|
||||
controlnet_cond=self._controlnet_cond,
|
||||
controlnet_mode=self._instantx_control_mode,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
timesteps=timesteps,
|
||||
y=y,
|
||||
guidance=guidance,
|
||||
)
|
||||
|
||||
controlnet_output = self._instantx_output_to_controlnet_output(instantx_output)
|
||||
controlnet_output.apply_weight(weight)
|
||||
return controlnet_output
|
||||
150
invokeai/backend/flux/extensions/xlabs_controlnet_extension.py
Normal file
150
invokeai/backend/flux/extensions/xlabs_controlnet_extension.py
Normal file
@@ -0,0 +1,150 @@
|
||||
from typing import List, Union
|
||||
|
||||
import torch
|
||||
from PIL.Image import Image
|
||||
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES, prepare_control_image
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput
|
||||
from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlNetFlux, XLabsControlNetFluxOutput
|
||||
from invokeai.backend.flux.extensions.base_controlnet_extension import BaseControlNetExtension
|
||||
|
||||
|
||||
class XLabsControlNetExtension(BaseControlNetExtension):
|
||||
def __init__(
|
||||
self,
|
||||
model: XLabsControlNetFlux,
|
||||
controlnet_cond: torch.Tensor,
|
||||
weight: Union[float, List[float]],
|
||||
begin_step_percent: float,
|
||||
end_step_percent: float,
|
||||
):
|
||||
super().__init__(
|
||||
weight=weight,
|
||||
begin_step_percent=begin_step_percent,
|
||||
end_step_percent=end_step_percent,
|
||||
)
|
||||
|
||||
self._model = model
|
||||
# _controlnet_cond is the control image passed to the ControlNet model.
|
||||
# Pixel values are in the range [-1, 1]. Shape: (batch_size, 3, height, width).
|
||||
self._controlnet_cond = controlnet_cond
|
||||
|
||||
# TODO(ryand): Pass in these params if a new base transformer / XLabs ControlNet pair get released.
|
||||
self._flux_transformer_num_double_blocks = 19
|
||||
self._flux_transformer_num_single_blocks = 38
|
||||
|
||||
@classmethod
|
||||
def prepare_controlnet_cond(
|
||||
cls,
|
||||
controlnet_image: Image,
|
||||
latent_height: int,
|
||||
latent_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES,
|
||||
):
|
||||
image_height = latent_height * LATENT_SCALE_FACTOR
|
||||
image_width = latent_width * LATENT_SCALE_FACTOR
|
||||
|
||||
controlnet_cond = prepare_control_image(
|
||||
image=controlnet_image,
|
||||
do_classifier_free_guidance=False,
|
||||
width=image_width,
|
||||
height=image_height,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
control_mode="balanced",
|
||||
resize_mode=resize_mode,
|
||||
)
|
||||
|
||||
# Map pixel values from [0, 1] to [-1, 1].
|
||||
controlnet_cond = controlnet_cond * 2 - 1
|
||||
|
||||
return controlnet_cond
|
||||
|
||||
@classmethod
|
||||
def from_controlnet_image(
|
||||
cls,
|
||||
model: XLabsControlNetFlux,
|
||||
controlnet_image: Image,
|
||||
latent_height: int,
|
||||
latent_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES,
|
||||
weight: Union[float, List[float]],
|
||||
begin_step_percent: float,
|
||||
end_step_percent: float,
|
||||
):
|
||||
image_height = latent_height * LATENT_SCALE_FACTOR
|
||||
image_width = latent_width * LATENT_SCALE_FACTOR
|
||||
|
||||
controlnet_cond = prepare_control_image(
|
||||
image=controlnet_image,
|
||||
do_classifier_free_guidance=False,
|
||||
width=image_width,
|
||||
height=image_height,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
control_mode="balanced",
|
||||
resize_mode=resize_mode,
|
||||
)
|
||||
|
||||
# Map pixel values from [0, 1] to [-1, 1].
|
||||
controlnet_cond = controlnet_cond * 2 - 1
|
||||
|
||||
return cls(
|
||||
model=model,
|
||||
controlnet_cond=controlnet_cond,
|
||||
weight=weight,
|
||||
begin_step_percent=begin_step_percent,
|
||||
end_step_percent=end_step_percent,
|
||||
)
|
||||
|
||||
def _xlabs_output_to_controlnet_output(self, xlabs_output: XLabsControlNetFluxOutput) -> ControlNetFluxOutput:
|
||||
# The modulo index logic used here is based on:
|
||||
# https://github.com/XLabs-AI/x-flux/blob/47495425dbed499be1e8e5a6e52628b07349cba2/src/flux/model.py#L198-L200
|
||||
|
||||
# Handle double block residuals.
|
||||
double_block_residuals: list[torch.Tensor] = []
|
||||
xlabs_double_block_residuals = xlabs_output.controlnet_double_block_residuals
|
||||
if xlabs_double_block_residuals is not None:
|
||||
for i in range(self._flux_transformer_num_double_blocks):
|
||||
double_block_residuals.append(xlabs_double_block_residuals[i % len(xlabs_double_block_residuals)])
|
||||
|
||||
return ControlNetFluxOutput(
|
||||
double_block_residuals=double_block_residuals,
|
||||
single_block_residuals=None,
|
||||
)
|
||||
|
||||
def run_controlnet(
|
||||
self,
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
timesteps: torch.Tensor,
|
||||
guidance: torch.Tensor | None,
|
||||
) -> ControlNetFluxOutput:
|
||||
weight = self._get_weight(timestep_index=timestep_index, total_num_timesteps=total_num_timesteps)
|
||||
if weight < 1e-6:
|
||||
return ControlNetFluxOutput(single_block_residuals=None, double_block_residuals=None)
|
||||
|
||||
xlabs_output: XLabsControlNetFluxOutput = self._model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
controlnet_cond=self._controlnet_cond,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
timesteps=timesteps,
|
||||
y=y,
|
||||
guidance=guidance,
|
||||
)
|
||||
|
||||
controlnet_output = self._xlabs_output_to_controlnet_output(xlabs_output)
|
||||
controlnet_output.apply_weight(weight)
|
||||
return controlnet_output
|
||||
@@ -9,8 +9,12 @@ def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
|
||||
q, k = apply_rope(q, k, pe)
|
||||
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
|
||||
# Replaced original einops.rearrange(...) call with torch.reshape(...) for slightly faster performance.
|
||||
# Original call: x = rearrange(x, "B H L D -> B L (H D)")
|
||||
# x = x.permute(0, 2, 1, 3) # BHLD -> BLHD
|
||||
# x = x.reshape(x.shape[0], x.shape[1], -1) # BLHD -> BL(HD)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
return x
|
||||
|
||||
|
||||
@@ -23,6 +27,9 @@ def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
||||
omega = 1.0 / (theta**scale)
|
||||
out = torch.einsum("...n,d->...nd", pos, omega)
|
||||
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
||||
# Replaced original einops.rearrange(...) call with torch.view(...) for slightly faster performance.
|
||||
# Original call: out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
||||
# out = out.view(*out.shape[:-1], 2, 2)
|
||||
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
||||
return out.float()
|
||||
|
||||
|
||||
@@ -87,7 +87,9 @@ class Flux(nn.Module):
|
||||
txt_ids: Tensor,
|
||||
timesteps: Tensor,
|
||||
y: Tensor,
|
||||
guidance: Tensor | None = None,
|
||||
guidance: Tensor | None,
|
||||
controlnet_double_block_residuals: list[Tensor] | None,
|
||||
controlnet_single_block_residuals: list[Tensor] | None,
|
||||
) -> Tensor:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
@@ -105,12 +107,27 @@ class Flux(nn.Module):
|
||||
ids = torch.cat((txt_ids, img_ids), dim=1)
|
||||
pe = self.pe_embedder(ids)
|
||||
|
||||
for block in self.double_blocks:
|
||||
# Validate double_block_residuals shape.
|
||||
if controlnet_double_block_residuals is not None:
|
||||
assert len(controlnet_double_block_residuals) == len(self.double_blocks)
|
||||
for block_index, block in enumerate(self.double_blocks):
|
||||
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
|
||||
|
||||
if controlnet_double_block_residuals is not None:
|
||||
img += controlnet_double_block_residuals[block_index]
|
||||
|
||||
img = torch.cat((txt, img), 1)
|
||||
for block in self.single_blocks:
|
||||
|
||||
# Validate single_block_residuals shape.
|
||||
if controlnet_single_block_residuals is not None:
|
||||
assert len(controlnet_single_block_residuals) == len(self.single_blocks)
|
||||
|
||||
for block_index, block in enumerate(self.single_blocks):
|
||||
img = block(img, vec=vec, pe=pe)
|
||||
|
||||
if controlnet_single_block_residuals is not None:
|
||||
img[:, txt.shape[1] :, ...] += controlnet_single_block_residuals[block_index]
|
||||
|
||||
img = img[:, txt.shape[1] :, ...]
|
||||
|
||||
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
|
||||
|
||||
@@ -4,7 +4,6 @@ import math
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.math import attention, rope
|
||||
@@ -94,13 +93,14 @@ class SelfAttention(nn.Module):
|
||||
self.norm = QKNorm(head_dim)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
|
||||
def forward(self, x: Tensor, pe: Tensor) -> Tensor:
|
||||
qkv = self.qkv(x)
|
||||
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
q, k = self.norm(q, k, v)
|
||||
x = attention(q, k, v, pe=pe)
|
||||
x = self.proj(x)
|
||||
return x
|
||||
# Unused code for reference:
|
||||
# def forward(self, x: Tensor, pe: Tensor) -> Tensor:
|
||||
# qkv = self.qkv(x)
|
||||
# q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
# q, k = self.norm(q, k, v)
|
||||
# x = attention(q, k, v, pe=pe)
|
||||
# x = self.proj(x)
|
||||
# return x
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -163,14 +163,22 @@ class DoubleStreamBlock(nn.Module):
|
||||
img_modulated = self.img_norm1(img)
|
||||
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
|
||||
img_qkv = self.img_attn.qkv(img_modulated)
|
||||
img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
# img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(
|
||||
2, 0, 3, 1, 4
|
||||
)
|
||||
|
||||
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
|
||||
|
||||
# prepare txt for attention
|
||||
txt_modulated = self.txt_norm1(txt)
|
||||
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
|
||||
txt_qkv = self.txt_attn.qkv(txt_modulated)
|
||||
txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
# txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(
|
||||
2, 0, 3, 1, 4
|
||||
)
|
||||
|
||||
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
|
||||
|
||||
# run actual attention
|
||||
@@ -229,7 +237,8 @@ class SingleStreamBlock(nn.Module):
|
||||
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
|
||||
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
||||
|
||||
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
# q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||
q, k = self.norm(q, k, v)
|
||||
|
||||
# compute attention
|
||||
|
||||
@@ -8,17 +8,36 @@ from diffusers import ControlNetModel
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
BaseModelType,
|
||||
ControlNetCheckpointConfig,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusion1, type=ModelType.ControlNet, format=ModelFormat.Diffusers
|
||||
)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusion1, type=ModelType.ControlNet, format=ModelFormat.Checkpoint
|
||||
)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusion2, type=ModelType.ControlNet, format=ModelFormat.Diffusers
|
||||
)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusion2, type=ModelType.ControlNet, format=ModelFormat.Checkpoint
|
||||
)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusionXL, type=ModelType.ControlNet, format=ModelFormat.Diffusers
|
||||
)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusionXL, type=ModelType.ControlNet, format=ModelFormat.Checkpoint
|
||||
)
|
||||
class ControlNetLoader(GenericDiffusersLoader):
|
||||
"""Class to load ControlNet models."""
|
||||
|
||||
|
||||
@@ -10,6 +10,15 @@ from safetensors.torch import load_file
|
||||
from transformers import AutoConfig, AutoModelForTextEncoding, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXControlNetFlux
|
||||
from invokeai.backend.flux.controlnet.state_dict_utils import (
|
||||
convert_diffusers_instantx_state_dict_to_bfl_format,
|
||||
infer_flux_params_from_state_dict,
|
||||
infer_instantx_num_control_modes_from_state_dict,
|
||||
is_state_dict_instantx_controlnet,
|
||||
is_state_dict_xlabs_controlnet,
|
||||
)
|
||||
from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlNetFlux
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.util import ae_params, params
|
||||
@@ -24,6 +33,8 @@ from invokeai.backend.model_manager import (
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
CLIPEmbedDiffusersConfig,
|
||||
ControlNetCheckpointConfig,
|
||||
ControlNetDiffusersConfig,
|
||||
MainBnbQuantized4bCheckpointConfig,
|
||||
MainCheckpointConfig,
|
||||
MainGGUFCheckpointConfig,
|
||||
@@ -293,3 +304,51 @@ class FluxBnbQuantizednf4bCheckpointModel(ModelLoader):
|
||||
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.ControlNet, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.ControlNet, format=ModelFormat.Diffusers)
|
||||
class FluxControlnetModel(ModelLoader):
|
||||
"""Class to load FLUX ControlNet models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if isinstance(config, ControlNetCheckpointConfig):
|
||||
model_path = Path(config.path)
|
||||
elif isinstance(config, ControlNetDiffusersConfig):
|
||||
# If this is a diffusers directory, we simply ignore the config file and load from the weight file.
|
||||
model_path = Path(config.path) / "diffusion_pytorch_model.safetensors"
|
||||
else:
|
||||
raise ValueError(f"Unexpected ControlNet model config type: {type(config)}")
|
||||
|
||||
sd = load_file(model_path)
|
||||
|
||||
# Detect the FLUX ControlNet model type from the state dict.
|
||||
if is_state_dict_xlabs_controlnet(sd):
|
||||
return self._load_xlabs_controlnet(sd)
|
||||
elif is_state_dict_instantx_controlnet(sd):
|
||||
return self._load_instantx_controlnet(sd)
|
||||
else:
|
||||
raise ValueError("Do not recognize the state dict as an XLabs or InstantX ControlNet model.")
|
||||
|
||||
def _load_xlabs_controlnet(self, sd: dict[str, torch.Tensor]) -> AnyModel:
|
||||
with accelerate.init_empty_weights():
|
||||
# HACK(ryand): Is it safe to assume dev here?
|
||||
model = XLabsControlNetFlux(params["flux-dev"])
|
||||
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
def _load_instantx_controlnet(self, sd: dict[str, torch.Tensor]) -> AnyModel:
|
||||
sd = convert_diffusers_instantx_state_dict_to_bfl_format(sd)
|
||||
flux_params = infer_flux_params_from_state_dict(sd)
|
||||
num_control_modes = infer_instantx_num_control_modes_from_state_dict(sd)
|
||||
|
||||
with accelerate.init_empty_weights():
|
||||
model = InstantXControlNetFlux(flux_params, num_control_modes)
|
||||
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
@@ -10,6 +10,10 @@ from picklescan.scanner import scan_file_path
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.flux.controlnet.state_dict_utils import (
|
||||
is_state_dict_instantx_controlnet,
|
||||
is_state_dict_xlabs_controlnet,
|
||||
)
|
||||
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
)
|
||||
@@ -116,6 +120,7 @@ class ModelProbe(object):
|
||||
"CLIPModel": ModelType.CLIPEmbed,
|
||||
"CLIPTextModel": ModelType.CLIPEmbed,
|
||||
"T5EncoderModel": ModelType.T5Encoder,
|
||||
"FluxControlNetModel": ModelType.ControlNet,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@@ -255,7 +260,19 @@ class ModelProbe(object):
|
||||
# LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models.
|
||||
elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight", "lora_A.weight", "lora_B.weight")):
|
||||
return ModelType.LoRA
|
||||
elif key.startswith(("controlnet", "control_model", "input_blocks")):
|
||||
elif key.startswith(
|
||||
(
|
||||
"controlnet",
|
||||
"control_model",
|
||||
"input_blocks",
|
||||
# XLabs FLUX ControlNet models have keys starting with "controlnet_blocks."
|
||||
# For example: https://huggingface.co/XLabs-AI/flux-controlnet-collections/blob/86ab1e915a389d5857135c00e0d350e9e38a9048/flux-canny-controlnet_v2.safetensors
|
||||
# TODO(ryand): This is very fragile. XLabs FLUX ControlNet models also contain keys starting with
|
||||
# "double_blocks.", which we check for above. But, I'm afraid to modify this logic because it is so
|
||||
# delicate.
|
||||
"controlnet_blocks",
|
||||
)
|
||||
):
|
||||
return ModelType.ControlNet
|
||||
elif key.startswith(("image_proj.", "ip_adapter.")):
|
||||
return ModelType.IPAdapter
|
||||
@@ -438,6 +455,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"lineart": "lineart_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
"pose": "dw_openpose_image_processor",
|
||||
"mediapipe": "mediapipe_face_processor",
|
||||
@@ -449,7 +467,8 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
|
||||
def get_default_settings_controlnet_t2i_adapter(model_name: str) -> Optional[ControlAdapterDefaultSettings]:
|
||||
for k, v in MODEL_NAME_TO_PREPROCESSOR.items():
|
||||
if k in model_name:
|
||||
model_name_lower = model_name.lower()
|
||||
if k in model_name_lower:
|
||||
return ControlAdapterDefaultSettings(preprocessor=v)
|
||||
return None
|
||||
|
||||
@@ -623,6 +642,11 @@ class ControlNetCheckpointProbe(CheckpointProbeBase):
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
if is_state_dict_xlabs_controlnet(checkpoint) or is_state_dict_instantx_controlnet(checkpoint):
|
||||
# TODO(ryand): Should I distinguish between XLabs, InstantX and other ControlNet models by implementing
|
||||
# get_format()?
|
||||
return BaseModelType.Flux
|
||||
|
||||
for key_name in (
|
||||
"control_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight",
|
||||
"controlnet_mid_block.bias",
|
||||
@@ -844,22 +868,19 @@ class ControlNetFolderProbe(FolderProbeBase):
|
||||
raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}")
|
||||
with open(config_file, "r") as file:
|
||||
config = json.load(file)
|
||||
|
||||
if config.get("_class_name", None) == "FluxControlNetModel":
|
||||
return BaseModelType.Flux
|
||||
|
||||
# no obvious way to distinguish between sd2-base and sd2-768
|
||||
dimension = config["cross_attention_dim"]
|
||||
base_model = (
|
||||
BaseModelType.StableDiffusion1
|
||||
if dimension == 768
|
||||
else (
|
||||
BaseModelType.StableDiffusion2
|
||||
if dimension == 1024
|
||||
else BaseModelType.StableDiffusionXL
|
||||
if dimension == 2048
|
||||
else None
|
||||
)
|
||||
)
|
||||
if not base_model:
|
||||
raise InvalidModelConfigException(f"Unable to determine model base for {self.model_path}")
|
||||
return base_model
|
||||
if dimension == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
if dimension == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
if dimension == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
raise InvalidModelConfigException(f"Unable to determine model base for {self.model_path}")
|
||||
|
||||
|
||||
class LoRAFolderProbe(FolderProbeBase):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -171,8 +171,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
"""
|
||||
if xformers is available, use it, otherwise use sliced attention.
|
||||
"""
|
||||
|
||||
# On 30xx and 40xx series GPUs, `torch-sdp` is faster than `xformers`. This corresponds to a CUDA major
|
||||
# version of 8 or higher. So, for major version 7 or below, we prefer `xformers`.
|
||||
# See:
|
||||
# - https://developer.nvidia.com/cuda-gpus
|
||||
# - https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
|
||||
try:
|
||||
prefer_xformers = torch.cuda.is_available() and torch.cuda.get_device_properties("cuda").major <= 7 # type: ignore # Type of "get_device_properties" is partially unknown
|
||||
except Exception:
|
||||
prefer_xformers = False
|
||||
|
||||
config = get_config()
|
||||
if config.attention_type == "xformers":
|
||||
if config.attention_type == "xformers" and is_xformers_available() and prefer_xformers:
|
||||
self.enable_xformers_memory_efficient_attention()
|
||||
return
|
||||
elif config.attention_type == "sliced":
|
||||
@@ -187,20 +198,24 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
self.disable_attention_slicing()
|
||||
return
|
||||
elif config.attention_type == "torch-sdp":
|
||||
if hasattr(torch.nn.functional, "scaled_dot_product_attention"):
|
||||
# diffusers enables sdp automatically
|
||||
return
|
||||
else:
|
||||
raise Exception("torch-sdp attention slicing not available")
|
||||
# torch-sdp is the default in diffusers.
|
||||
return
|
||||
|
||||
# the remainder if this code is called when attention_type=='auto'
|
||||
# See https://github.com/invoke-ai/InvokeAI/issues/7049 for context.
|
||||
# Bumping torch from 2.2.2 to 2.4.1 caused the sliced attention implementation to produce incorrect results.
|
||||
# For now, if a user is on an MPS device and has not explicitly set the attention_type, then we select the
|
||||
# non-sliced torch-sdp implementation. This keeps things working on MPS at the cost of increased peak memory
|
||||
# utilization.
|
||||
if torch.backends.mps.is_available():
|
||||
return
|
||||
|
||||
# The remainder if this code is called when attention_type=='auto'.
|
||||
if self.unet.device.type == "cuda":
|
||||
if is_xformers_available():
|
||||
if is_xformers_available() and prefer_xformers:
|
||||
self.enable_xformers_memory_efficient_attention()
|
||||
return
|
||||
elif hasattr(torch.nn.functional, "scaled_dot_product_attention"):
|
||||
# diffusers enables sdp automatically
|
||||
return
|
||||
# torch-sdp is the default in diffusers.
|
||||
return
|
||||
|
||||
if self.unet.device.type == "cpu" or self.unet.device.type == "mps":
|
||||
mem_free = psutil.virtual_memory().free
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.41",
|
||||
"@invoke-ai/ui-library": "^0.0.42",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
||||
95
invokeai/frontend/web/pnpm-lock.yaml
generated
95
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.41
|
||||
version: 0.0.41(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.42
|
||||
version: 0.0.42(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -551,12 +551,12 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/hooks@2.4.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-f81S0TiSh14LjiXBAvdezmIQLKGR7pckQ8kLMIxjngNW4VV92dhznOP+VcQmDOyXslfWsaWNFswlASE6clVw1g==}
|
||||
/@chakra-ui/hooks@2.4.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-LRKiVE1oA7afT5tbbSKAy7Uas2xFHE6IkrQdbhWCHmkHBUtPvjQQDgwtnd4IRZPmoEfNGwoJ/MQpwOM/NRTTwA==}
|
||||
peerDependencies:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@zag-js/element-size': 0.31.1
|
||||
copy-to-clipboard: 3.3.3
|
||||
framesync: 6.1.2
|
||||
@@ -574,13 +574,13 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.1)(react@18.3.1):
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-l5QdBgwrAg3Sc2BRqtNkJpfuLw/pWRDwwT58J6c4PqQT6wzXxyNa8Q0PForu1ltB5qEiFb1kxr/F/HO1EwNa6g==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/react': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/react': 2.10.1(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@@ -803,8 +803,8 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/react@2.10.1(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-YUOPyuq6Z0P1TSURNQRccwV/YbiWlkHlRLEMqNMwFQeQe7lyXfkPHn8GZyxvaiu+S02iopG9xg+wl68K4G3FSQ==}
|
||||
/@chakra-ui/react@2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-TfIHTqTlxTHYJZBtpiR5EZasPUrLYKJxdbHkdOJb5G1OQ+2c5kKl5XA7c2pMtsEptzb7KxAAIB62t3hxdfWp1w==}
|
||||
peerDependencies:
|
||||
'@emotion/react': '>=11'
|
||||
'@emotion/styled': '>=11'
|
||||
@@ -812,10 +812,10 @@ packages:
|
||||
react: '>=18'
|
||||
react-dom: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/hooks': 2.4.1(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.1(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.5(@chakra-ui/styled-system@2.11.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.1(react@18.3.1)
|
||||
'@chakra-ui/hooks': 2.4.2(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@popperjs/core': 2.11.8
|
||||
@@ -826,7 +826,6 @@ packages:
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
react-fast-compare: 3.2.2
|
||||
react-focus-lock: 2.13.2(@types/react@18.3.11)(react@18.3.1)
|
||||
react-lorem-component: 0.13.0(react@18.3.1)
|
||||
react-remove-scroll: 2.6.0(@types/react@18.3.11)(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- '@types/react'
|
||||
@@ -847,10 +846,10 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/styled-system@2.11.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-1O4vQSSJqDYl+xkQLgNLVsBhEUkFd5BKQAIBEZC5ppppvCXAt+BPqrOLFAkNyo7piKYp5mf6fdYBAW/ESurqeg==}
|
||||
/@chakra-ui/styled-system@2.11.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-y++z2Uop+hjfZX9mbH88F1ikazPv32asD2er56zMJBemUAzweXnHTpiCQbluEDSUDhqmghVZAdb+5L4XLbsRxA==}
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
csstype: 3.1.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -894,14 +893,14 @@ packages:
|
||||
color2k: 2.0.3
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme-tools@2.2.5(@chakra-ui/styled-system@2.11.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-tUIv04Pzm2W8v5Iugei15qREIFX2mslI3fEouIVih8vHzwY821neCfspxOsAhvYSLNX+1kzk6ArBo9WzUpqn+Q==}
|
||||
/@chakra-ui/theme-tools@2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-3UhKPyzKbV3l/bg1iQN9PBvffYp+EBOoYMUaeTUdieQRPFzo2jbYR0lNCxqv8h5aGM/k54nCHU2M/GStyi9F2A==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.0.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.1(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
color2k: 2.0.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -927,15 +926,15 @@ packages:
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme@3.4.5(@chakra-ui/styled-system@2.11.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-h/00W3xFCrUnsosVdh/SNMKfaez9jo1MMz2ruZAcpg/b1Jk0WnYa/ONhUwN89DR3UrPHSR+/vigZBxaA1+3QUQ==}
|
||||
/@chakra-ui/theme@3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ZwFBLfiMC3URwaO31ONXoKH9k0TX0OW3UjdPF3EQkQpYyrk/fm36GkkzajjtdpWEd7rzDLRsQjPmvwNaSoNDtg==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.8.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.1(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.5(@chakra-ui/styled-system@2.11.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.1(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
dev: false
|
||||
@@ -960,8 +959,8 @@ packages:
|
||||
lodash.mergewith: 4.6.2
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/utils@2.2.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-e9D6CDtQZWBx+rq7yW13mAeI3Vod4cWfUIip+XFNjPWQSJGOJb1MiXocoiJqaZ1S+jsN+mod4uCZmGcukv0iSg==}
|
||||
/@chakra-ui/utils@2.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-jUPLT0JzRMWxpdzH6c+t0YMJYrvc5CLericgITV3zDSXblkfx3DsYXqU11DJTSGZI9dUKzM1Wd0Wswn4eJwvFQ==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
@@ -1697,18 +1696,18 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.41(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-bV83fXxqSuEMghQ68nF6uFkPE/DXtbnY2ZiXcoXXhOvv9X32/uKWSrcaeka9lZxrr10GFvV5a5LRORDornDaGg==}
|
||||
/@invoke-ai/ui-library@0.0.42(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-OuDXRipBO5mu+Nv4qN8cd8MiwiGBdq6h4PirVgPI9/ltbdcIzePgUJ0dJns26lflHSTRWW38I16wl4YTw3mNWA==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.1)(react@18.3.1)
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.1(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
@@ -4691,14 +4690,6 @@ packages:
|
||||
yaml: 1.10.2
|
||||
dev: false
|
||||
|
||||
/create-react-class@15.7.0:
|
||||
resolution: {integrity: sha512-QZv4sFWG9S5RUvkTYWbflxeZX+JG7Cz0Tn33rQBJ+WFQTqTfUTjMjiv9tnfXazjsO5r0KhPs+AqCjyrQX6h2ng==}
|
||||
dependencies:
|
||||
loose-envify: 1.4.0
|
||||
object-assign: 4.1.1
|
||||
dev: false
|
||||
bundledDependencies: false
|
||||
|
||||
/cross-fetch@4.0.0:
|
||||
resolution: {integrity: sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==}
|
||||
dependencies:
|
||||
@@ -6817,13 +6808,6 @@ packages:
|
||||
dependencies:
|
||||
js-tokens: 4.0.0
|
||||
|
||||
/lorem-ipsum@1.0.6:
|
||||
resolution: {integrity: sha512-Rx4XH8X4KSDCKAVvWGYlhAfNqdUP5ZdT4rRyf0jjrvWgtViZimDIlopWNfn/y3lGM5K4uuiAoY28TaD+7YKFrQ==}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
minimist: 1.2.8
|
||||
dev: false
|
||||
|
||||
/loupe@2.3.7:
|
||||
resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==}
|
||||
dependencies:
|
||||
@@ -7017,6 +7001,7 @@ packages:
|
||||
|
||||
/minimist@1.2.8:
|
||||
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
|
||||
dev: true
|
||||
|
||||
/minipass@7.1.2:
|
||||
resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==}
|
||||
@@ -7775,18 +7760,6 @@ packages:
|
||||
resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==}
|
||||
dev: true
|
||||
|
||||
/react-lorem-component@0.13.0(react@18.3.1):
|
||||
resolution: {integrity: sha512-4mWjxmcG/DJJwdxdKwXWyP2N9zohbJg/yYaC+7JffQNrKj3LYDpA/A4u/Dju1v1ZF6Jew2gbFKGb5Z6CL+UNTw==}
|
||||
peerDependencies:
|
||||
react: 16.x
|
||||
dependencies:
|
||||
create-react-class: 15.7.0
|
||||
lorem-ipsum: 1.0.6
|
||||
object-assign: 4.1.1
|
||||
react: 18.3.1
|
||||
seedable-random: 0.0.1
|
||||
dev: false
|
||||
|
||||
/react-redux@9.1.2(@types/react@18.3.11)(react@18.3.1)(redux@5.0.1):
|
||||
resolution: {integrity: sha512-0OA4dhM1W48l3uzmv6B7TXPCGmokUU4p1M44DGN2/D9a1FjVPukVjER1PcPX97jIg6aUeLq1XJo1IpfbgULn0w==}
|
||||
peerDependencies:
|
||||
@@ -8301,10 +8274,6 @@ packages:
|
||||
engines: {node: '>=0.10.0'}
|
||||
dev: false
|
||||
|
||||
/seedable-random@0.0.1:
|
||||
resolution: {integrity: sha512-uZWbEfz3BQdBl4QlUPELPqhInGEO1Q6zjzqrTDkd3j7mHaWWJo7h4ydr2g24a2WtTLk3imTLc8mPbBdQqdsbGw==}
|
||||
dev: false
|
||||
|
||||
/semver-compare@1.0.0:
|
||||
resolution: {integrity: sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==}
|
||||
dev: false
|
||||
|
||||
@@ -936,7 +936,8 @@
|
||||
},
|
||||
"paramScheduler": {
|
||||
"paragraphs": [
|
||||
"\"Planer\" definiert, wie iterativ Rauschen zu einem Bild hinzugefügt wird, oder wie ein Sample bei der Ausgabe eines Modells aktualisiert wird."
|
||||
"Verwendeter Planer währende des Generierungsprozesses.",
|
||||
"Jeder Planer definiert, wie einem Bild iterativ Rauschen hinzugefügt wird, oder wie ein Sample basierend auf der Ausgabe eines Modells aktualisiert wird."
|
||||
],
|
||||
"heading": "Planer"
|
||||
},
|
||||
@@ -962,6 +963,61 @@
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Methode"
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "Planer",
|
||||
"paragraphs": [
|
||||
"Planer, der während der Veredelungsphase des Generierungsprozesses verwendet wird.",
|
||||
"Ähnlich wie der Generierungsplaner."
|
||||
]
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"paragraphs": [
|
||||
"Verwendete Methode zur Erstellung eines kohärenten Bildes mit dem neu generierten maskierten Bereich."
|
||||
],
|
||||
"heading": "Modus"
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"heading": "Kohärenzdurchlauf"
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet"
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"paragraphs": [
|
||||
"Die Maske anpassen."
|
||||
],
|
||||
"heading": "Maskenanpassungen"
|
||||
},
|
||||
"compositingMaskBlur": {
|
||||
"paragraphs": [
|
||||
"Der Unschärferadius der Maske."
|
||||
],
|
||||
"heading": "Maskenunschärfe"
|
||||
},
|
||||
"compositingBlurMethod": {
|
||||
"paragraphs": [
|
||||
"Die auf den maskierten Bereich angewendete Unschärfemethode."
|
||||
],
|
||||
"heading": "Unschärfemethode"
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "Größenänderungsmodus"
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "Breite",
|
||||
"paragraphs": [
|
||||
"Breite des generierten Bildes. Muss ein Vielfaches von 8 sein."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "Kontrollmodus"
|
||||
},
|
||||
"controlNetProcessor": {
|
||||
"heading": "Prozessor"
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Herunterskalieren"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1080,7 +1136,8 @@
|
||||
"workflowContact": "Kontaktdaten",
|
||||
"workflowNotes": "Notizen",
|
||||
"workflowTags": "Tags",
|
||||
"workflowVersion": "Version"
|
||||
"workflowVersion": "Version",
|
||||
"saveToGallery": "In Galerie speichern"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Korrektur für hohe Auflösungen",
|
||||
@@ -1250,7 +1307,16 @@
|
||||
"searchByName": "Nach Name suchen",
|
||||
"promptTemplateCleared": "Promptvorlage gelöscht",
|
||||
"preview": "Vorschau",
|
||||
"positivePrompt": "Positiv-Prompt"
|
||||
"positivePrompt": "Positiv-Prompt",
|
||||
"active": "Aktiv",
|
||||
"deleteTemplate2": "Sind Sie sicher, dass Sie diese Vorlage löschen möchten? Dies kann nicht rückgängig gemacht werden.",
|
||||
"deleteTemplate": "Vorlage löschen",
|
||||
"copyTemplate": "Vorlage kopieren",
|
||||
"editTemplate": "Vorlage bearbeiten",
|
||||
"deleteImage": "Bild löschen",
|
||||
"defaultTemplates": "Standardvorlagen",
|
||||
"nameColumn": "'name'",
|
||||
"exportDownloaded": "Export heruntergeladen"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"gettingStartedSeries": "Wünschen Sie weitere Anleitungen? In unserer <LinkComponent>Einführungsserie</LinkComponent> finden Sie Tipps, wie Sie das Potenzial von Invoke Studio voll ausschöpfen können.",
|
||||
@@ -1263,13 +1329,22 @@
|
||||
"bbox": "Bbox"
|
||||
},
|
||||
"transform": {
|
||||
"fitToBbox": "An Bbox anpassen"
|
||||
"fitToBbox": "An Bbox anpassen",
|
||||
"reset": "Zurücksetzen",
|
||||
"apply": "Anwenden",
|
||||
"cancel": "Abbrechen"
|
||||
},
|
||||
"pullBboxIntoLayerError": "Problem, Bbox in die Ebene zu ziehen",
|
||||
"pullBboxIntoLayer": "Bbox in Ebene ziehen",
|
||||
"HUD": {
|
||||
"bbox": "Bbox",
|
||||
"scaledBbox": "Skalierte Bbox"
|
||||
"scaledBbox": "Skalierte Bbox",
|
||||
"entityStatus": {
|
||||
"isHidden": "{{title}} ist ausgeblendet",
|
||||
"isDisabled": "{{title}} ist deaktiviert",
|
||||
"isLocked": "{{title}} ist gesperrt",
|
||||
"isEmpty": "{{title}} ist leer"
|
||||
}
|
||||
},
|
||||
"fitBboxToLayers": "Bbox an Ebenen anpassen",
|
||||
"pullBboxIntoReferenceImage": "Bbox ins Referenzbild ziehen",
|
||||
@@ -1279,7 +1354,12 @@
|
||||
"clipToBbox": "Pinselstriche auf Bbox beschränken",
|
||||
"canvasContextMenu": {
|
||||
"saveBboxToGallery": "Bbox in Galerie speichern",
|
||||
"bboxGroup": "Aus Bbox erstellen"
|
||||
"bboxGroup": "Aus Bbox erstellen",
|
||||
"canvasGroup": "Leinwand",
|
||||
"newGlobalReferenceImage": "Neues globales Referenzbild",
|
||||
"newRegionalReferenceImage": "Neues regionales Referenzbild",
|
||||
"newControlLayer": "Neue Kontroll-Ebene",
|
||||
"newRasterLayer": "Neue Raster-Ebene"
|
||||
},
|
||||
"rectangle": "Rechteck",
|
||||
"saveCanvasToGallery": "Leinwand in Galerie speichern",
|
||||
@@ -1310,7 +1390,7 @@
|
||||
"regional": "Regional",
|
||||
"newGlobalReferenceImageOk": "Globales Referenzbild erstellt",
|
||||
"savedToGalleryError": "Fehler beim Speichern in der Galerie",
|
||||
"savedToGalleryOk": "In Galerie speichern",
|
||||
"savedToGalleryOk": "In Galerie gespeichert",
|
||||
"newGlobalReferenceImageError": "Problem beim Erstellen eines globalen Referenzbilds",
|
||||
"newRegionalReferenceImageOk": "Regionales Referenzbild erstellt",
|
||||
"duplicate": "Duplizieren",
|
||||
@@ -1343,12 +1423,39 @@
|
||||
"showProgressOnCanvas": "Fortschritt auf Leinwand anzeigen",
|
||||
"controlMode": {
|
||||
"balanced": "Ausgewogen"
|
||||
}
|
||||
},
|
||||
"globalReferenceImages_withCount_hidden": "Globale Referenzbilder ({{count}} ausgeblendet)",
|
||||
"sendToGallery": "An Galerie senden",
|
||||
"stagingArea": {
|
||||
"accept": "Annehmen",
|
||||
"next": "Nächste",
|
||||
"discardAll": "Alle verwerfen",
|
||||
"discard": "Verwerfen",
|
||||
"previous": "Vorherige"
|
||||
},
|
||||
"regionalGuidance_withCount_visible": "Regionale Führung ({{count}})",
|
||||
"regionalGuidance_withCount_hidden": "Regionale Führung ({{count}} ausgeblendet)",
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"on": "Ein",
|
||||
"off": "Aus",
|
||||
"label": "Am Raster ausrichten"
|
||||
}
|
||||
},
|
||||
"layer_one": "Ebene",
|
||||
"layer_other": "Ebenen",
|
||||
"layer_withCount_one": "Ebene ({{count}})",
|
||||
"layer_withCount_other": "Ebenen ({{count}})"
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
"professional": "Professionell",
|
||||
"inviteTeammates": "Teamkollegen einladen",
|
||||
"professionalUpsell": "Verfügbar in der Professional Edition von Invoke. Klicken Sie hier oder besuchen Sie invoke.com/pricing für weitere Details."
|
||||
},
|
||||
"upscaling": {
|
||||
"creativity": "Kreativität",
|
||||
"structure": "Struktur",
|
||||
"scale": "Maßstab"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"toggleRightPanel": "Toggle Right Panel (G)",
|
||||
"toggleLeftPanel": "Toggle Left Panel (T)",
|
||||
"uploadImage": "Upload Image"
|
||||
"uploadImage": "Upload Image",
|
||||
"uploadImages": "Upload Image(s)"
|
||||
},
|
||||
"boards": {
|
||||
"addBoard": "Add Board",
|
||||
@@ -90,6 +91,7 @@
|
||||
"batch": "Batch Manager",
|
||||
"beta": "Beta",
|
||||
"cancel": "Cancel",
|
||||
"close": "Close",
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"on": "On",
|
||||
@@ -284,6 +286,7 @@
|
||||
"assetsTab": "Files you’ve uploaded for use in your projects.",
|
||||
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"boardsSettings": "Boards Settings",
|
||||
"copy": "Copy",
|
||||
"currentlyInUse": "This image is currently in use in the following features:",
|
||||
"drop": "Drop",
|
||||
@@ -303,6 +306,7 @@
|
||||
"go": "Go",
|
||||
"image": "image",
|
||||
"imagesTab": "Images you’ve created and saved within Invoke.",
|
||||
"imagesSettings": "Gallery Images Settings",
|
||||
"jump": "Jump",
|
||||
"loading": "Loading",
|
||||
"newestFirst": "Newest First",
|
||||
@@ -661,6 +665,7 @@
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"createdBy": "Created By",
|
||||
"generationMode": "Generation Mode",
|
||||
"guidance": "Guidance",
|
||||
"height": "Height",
|
||||
"imageDetails": "Image Details",
|
||||
"imageDimensions": "Image Dimensions",
|
||||
@@ -724,6 +729,7 @@
|
||||
"huggingFaceHelper": "If multiple models are found in this repo, you will be prompted to select one to install.",
|
||||
"hfToken": "HuggingFace Token",
|
||||
"imageEncoderModelId": "Image Encoder Model ID",
|
||||
"includesNModels": "Includes {{n}} models and their dependencies",
|
||||
"installQueue": "Install Queue",
|
||||
"inplaceInstall": "In-place install",
|
||||
"inplaceInstallDesc": "Install models without copying the files. When using the model, it will be loaded from its this location. If disabled, the model file(s) will be copied into the Invoke-managed models directory during installation.",
|
||||
@@ -777,6 +783,8 @@
|
||||
"simpleModelPlaceholder": "URL or path to a local file or diffusers folder",
|
||||
"source": "Source",
|
||||
"spandrelImageToImage": "Image to Image (Spandrel)",
|
||||
"starterBundles": "Starter Bundles",
|
||||
"starterBundleHelpText": "Easily install all models needed to get started with a base model, including a main model, controlnets, IP adapters, and more. Selecting a bundle will skip any models that you already have installed.",
|
||||
"starterModels": "Starter Models",
|
||||
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
|
||||
"syncModels": "Sync Models",
|
||||
@@ -794,7 +802,13 @@
|
||||
"vae": "VAE",
|
||||
"vaePrecision": "VAE Precision",
|
||||
"variant": "Variant",
|
||||
"width": "Width"
|
||||
"width": "Width",
|
||||
"installingBundle": "Installing Bundle",
|
||||
"installingModel": "Installing Model",
|
||||
"installingXModels_one": "Installing {{count}} model",
|
||||
"installingXModels_other": "Installing {{count}} models",
|
||||
"skippingXDuplicates_one": ", skipping {{count}} duplicate",
|
||||
"skippingXDuplicates_other": ", skipping {{count}} duplicates"
|
||||
},
|
||||
"models": {
|
||||
"addLora": "Add LoRA",
|
||||
@@ -874,7 +888,7 @@
|
||||
"nodeType": "Node Type",
|
||||
"noFieldsLinearview": "No fields added to Linear View",
|
||||
"noFieldsViewMode": "This workflow has no selected fields to display. View the full workflow to configure values.",
|
||||
"workflowHelpText": "Need Help? Check out our guide to <LinkComponent>Getting Started with Workflows</LinkComponent>",
|
||||
"workflowHelpText": "Need Help? Check out our guide to <LinkComponent>Getting Started with Workflows</LinkComponent>.",
|
||||
"noNodeSelected": "No node selected",
|
||||
"nodeOpacity": "Node Opacity",
|
||||
"nodeVersion": "Node Version",
|
||||
@@ -1118,13 +1132,15 @@
|
||||
"reloadingIn": "Reloading in"
|
||||
},
|
||||
"toast": {
|
||||
"addedToBoard": "Added to board",
|
||||
"addedToBoard": "Added to board {{name}}'s assets",
|
||||
"addedToUncategorized": "Added to board $t(boards.uncategorized)'s assets",
|
||||
"baseModelChanged": "Base Model Changed",
|
||||
"baseModelChangedCleared_one": "Cleared or disabled {{count}} incompatible submodel",
|
||||
"baseModelChangedCleared_other": "Cleared or disabled {{count}} incompatible submodels",
|
||||
"canceled": "Processing Canceled",
|
||||
"connected": "Connected to Server",
|
||||
"imageCopied": "Image Copied",
|
||||
"linkCopied": "Link Copied",
|
||||
"unableToLoadImage": "Unable to Load Image",
|
||||
"unableToLoadImageMetadata": "Unable to Load Image Metadata",
|
||||
"unableToLoadStylePreset": "Unable to Load Style Preset",
|
||||
@@ -1166,7 +1182,10 @@
|
||||
"setNodeField": "Set as node field",
|
||||
"somethingWentWrong": "Something Went Wrong",
|
||||
"uploadFailed": "Upload failed",
|
||||
"uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
|
||||
"imagesWillBeAddedTo": "Uploaded images will be added to board {{boardName}}'s assets.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Must be maximum of 1 PNG or JPEG image.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Must be maximum of {{count}} PNG or JPEG images.",
|
||||
"uploadFailedInvalidUploadDesc": "Must be PNG or JPEG images.",
|
||||
"workflowLoaded": "Workflow Loaded",
|
||||
"problemRetrievingWorkflow": "Problem Retrieving Workflow",
|
||||
"workflowDeleted": "Workflow Deleted",
|
||||
@@ -1559,7 +1578,12 @@
|
||||
"loadFromGraph": "Load Workflow from Graph",
|
||||
"convertGraph": "Convert Graph",
|
||||
"loadWorkflow": "$t(common.load) Workflow",
|
||||
"autoLayout": "Auto Layout"
|
||||
"autoLayout": "Auto Layout",
|
||||
"edit": "Edit",
|
||||
"download": "Download",
|
||||
"copyShareLink": "Copy Share Link",
|
||||
"copyShareLinkForWorkflow": "Copy Share Link for Workflow",
|
||||
"delete": "Delete"
|
||||
},
|
||||
"controlLayers": {
|
||||
"regional": "Regional",
|
||||
@@ -1634,6 +1658,7 @@
|
||||
"sendToCanvas": "Send To Canvas",
|
||||
"newLayerFromImage": "New Layer from Image",
|
||||
"newCanvasFromImage": "New Canvas from Image",
|
||||
"newImg2ImgCanvasFromImage": "New Img2Img from Image",
|
||||
"copyToClipboard": "Copy to Clipboard",
|
||||
"sendToCanvasDesc": "Pressing Invoke stages your work in progress on the canvas.",
|
||||
"viewProgressInViewer": "View progress and outputs in the <Btn>Image Viewer</Btn>.",
|
||||
@@ -1741,7 +1766,7 @@
|
||||
"label": "Canny Edge Detection",
|
||||
"description": "Generates an edge map from the selected layer using the Canny edge detection algorithm.",
|
||||
"low_threshold": "Low Threshold",
|
||||
"high_threshold": "Hight Threshold"
|
||||
"high_threshold": "High Threshold"
|
||||
},
|
||||
"color_map": {
|
||||
"label": "Color Map",
|
||||
@@ -1809,6 +1834,10 @@
|
||||
"transform": {
|
||||
"transform": "Transform",
|
||||
"fitToBbox": "Fit to Bbox",
|
||||
"fitMode": "Fit Mode",
|
||||
"fitModeContain": "Contain",
|
||||
"fitModeCover": "Cover",
|
||||
"fitModeFill": "Fill",
|
||||
"reset": "Reset",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel"
|
||||
@@ -1975,8 +2004,12 @@
|
||||
}
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "To get started, make sure to download or import models needed to run Invoke. Then, enter a prompt in the box and click <StrongComponent>Invoke</StrongComponent> to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the <StrongComponent>Gallery</StrongComponent> or edit them to the <StrongComponent>Canvas</StrongComponent>.",
|
||||
"toGetStarted": "To get started, enter a prompt in the box and click <StrongComponent>Invoke</StrongComponent> to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the <StrongComponent>Gallery</StrongComponent> or edit them to the <StrongComponent>Canvas</StrongComponent>.",
|
||||
"gettingStartedSeries": "Want more guidance? Check out our <LinkComponent>Getting Started Series</LinkComponent> for tips on unlocking the full potential of the Invoke Studio."
|
||||
"gettingStartedSeries": "Want more guidance? Check out our <LinkComponent>Getting Started Series</LinkComponent> for tips on unlocking the full potential of the Invoke Studio.",
|
||||
"downloadStarterModels": "Download Starter Models",
|
||||
"importModels": "Import Models",
|
||||
"noModelsInstalled": "It looks like you don't have any models installed"
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
|
||||
@@ -224,7 +224,9 @@
|
||||
"createIssue": "Crear un problema",
|
||||
"resetUI": "Interfaz de usuario $t(accessibility.reset)",
|
||||
"mode": "Modo",
|
||||
"submitSupportTicket": "Enviar Ticket de Soporte"
|
||||
"submitSupportTicket": "Enviar Ticket de Soporte",
|
||||
"toggleRightPanel": "Activar o desactivar el panel derecho (G)",
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -273,7 +275,12 @@
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archived": "Archivado"
|
||||
"archived": "Archivado",
|
||||
"selectedForAutoAdd": "Seleccionado para agregar automáticamente",
|
||||
"unarchiveBoard": "Desarchivar el tablero",
|
||||
"noBoards": "No hay tableros {{boardType}}",
|
||||
"shared": "Carpetas compartidas",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los tableros eliminados no se pueden restaurar. Al elegir \"Eliminar solo tablero\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen."
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -316,5 +323,13 @@
|
||||
"inviteTeammates": "Invitar compañeros de equipo",
|
||||
"shareAccess": "Compartir acceso",
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haz clic aquí o visita invoke.com/pricing para obtener más detalles."
|
||||
},
|
||||
"controlLayers": {
|
||||
"layer_one": "Capa",
|
||||
"layer_many": "Capas",
|
||||
"layer_other": "Capas",
|
||||
"layer_withCount_one": "({{count}}) capa",
|
||||
"layer_withCount_many": "({{count}}) capas",
|
||||
"layer_withCount_other": "({{count}}) capas"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -91,7 +91,8 @@
|
||||
"reset": "Reimposta",
|
||||
"none": "Niente",
|
||||
"new": "Nuovo",
|
||||
"view": "Vista"
|
||||
"view": "Vista",
|
||||
"close": "Chiudi"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -157,7 +158,9 @@
|
||||
"openViewer": "Apri visualizzatore",
|
||||
"closeViewer": "Chiudi visualizzatore",
|
||||
"imagesTab": "Immagini create e salvate in Invoke.",
|
||||
"assetsTab": "File che hai caricato per usarli nei tuoi progetti."
|
||||
"assetsTab": "File che hai caricato per usarli nei tuoi progetti.",
|
||||
"boardsSettings": "Impostazioni Bacheche",
|
||||
"imagesSettings": "Impostazioni Immagini Galleria"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -766,7 +769,8 @@
|
||||
"imageSaved": "Immagine salvata",
|
||||
"imageSavingFailed": "Salvataggio dell'immagine non riuscito",
|
||||
"layerCopiedToClipboard": "Livello copiato negli appunti",
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine"
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
|
||||
"linkCopied": "Collegamento copiato"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -922,7 +926,7 @@
|
||||
"saveToGallery": "Salva nella Galleria",
|
||||
"noMatchingWorkflows": "Nessun flusso di lavoro corrispondente",
|
||||
"noWorkflows": "Nessun flusso di lavoro",
|
||||
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>"
|
||||
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>."
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1519,7 +1523,8 @@
|
||||
"parameterSet": "Parametro {{parameter}} impostato",
|
||||
"parsingFailed": "Analisi non riuscita",
|
||||
"recallParameter": "Richiama {{label}}",
|
||||
"canvasV2Metadata": "Tela"
|
||||
"canvasV2Metadata": "Tela",
|
||||
"guidance": "Guida"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Abilita Correzione Alta Risoluzione",
|
||||
@@ -1570,7 +1575,12 @@
|
||||
"defaultWorkflows": "Flussi di lavoro predefiniti",
|
||||
"uploadAndSaveWorkflow": "Carica nella libreria",
|
||||
"chooseWorkflowFromLibrary": "Scegli il flusso di lavoro dalla libreria",
|
||||
"deleteWorkflow2": "Vuoi davvero eliminare questo flusso di lavoro? Questa operazione non può essere annullata."
|
||||
"deleteWorkflow2": "Vuoi davvero eliminare questo flusso di lavoro? Questa operazione non può essere annullata.",
|
||||
"edit": "Modifica",
|
||||
"download": "Scarica",
|
||||
"copyShareLink": "Copia Condividi Link",
|
||||
"copyShareLinkForWorkflow": "Copia Condividi Link del Flusso di lavoro",
|
||||
"delete": "Elimina"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -1730,7 +1740,8 @@
|
||||
"mlsd_detection": {
|
||||
"score_threshold": "Soglia di punteggio",
|
||||
"distance_threshold": "Soglia di distanza",
|
||||
"description": "Genera una mappa dei segmenti di linea dal livello selezionato utilizzando il modello di rilevamento dei segmenti di linea MLSD."
|
||||
"description": "Genera una mappa dei segmenti di linea dal livello selezionato utilizzando il modello di rilevamento dei segmenti di linea MLSD.",
|
||||
"label": "Rilevamento segmenti di linea"
|
||||
},
|
||||
"content_shuffle": {
|
||||
"label": "Mescola contenuto",
|
||||
@@ -1869,7 +1880,11 @@
|
||||
"fitToBbox": "Adatta al Riquadro",
|
||||
"transform": "Trasforma",
|
||||
"apply": "Applica",
|
||||
"cancel": "Annulla"
|
||||
"cancel": "Annulla",
|
||||
"fitMode": "Adattamento",
|
||||
"fitModeContain": "Contieni",
|
||||
"fitModeFill": "Riempi",
|
||||
"fitModeCover": "Copri"
|
||||
},
|
||||
"stagingArea": {
|
||||
"next": "Successiva",
|
||||
@@ -1904,7 +1919,8 @@
|
||||
"newRasterLayer": "Nuovo Livello Raster",
|
||||
"saveCanvasToGallery": "Salva la Tela nella Galleria",
|
||||
"saveToGalleryGroup": "Salva nella Galleria"
|
||||
}
|
||||
},
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
|
||||
@@ -158,7 +158,9 @@
|
||||
"move": "Двигать",
|
||||
"gallery": "Галерея",
|
||||
"openViewer": "Открыть просмотрщик",
|
||||
"closeViewer": "Закрыть просмотрщик"
|
||||
"closeViewer": "Закрыть просмотрщик",
|
||||
"imagesTab": "Изображения, созданные и сохраненные в Invoke.",
|
||||
"assetsTab": "Файлы, которые вы загрузили для использования в своих проектах."
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Поиск горячих клавиш",
|
||||
@@ -928,7 +930,10 @@
|
||||
"imageAccessError": "Невозможно найти изображение {{image_name}}, сбрасываем на значение по умолчанию",
|
||||
"boardAccessError": "Невозможно найти доску {{board_id}}, сбрасываем на значение по умолчанию",
|
||||
"modelAccessError": "Невозможно найти модель {{key}}, сброс на модель по умолчанию",
|
||||
"saveToGallery": "Сохранить в галерею"
|
||||
"saveToGallery": "Сохранить в галерею",
|
||||
"noWorkflows": "Нет рабочих процессов",
|
||||
"noMatchingWorkflows": "Нет совпадающих рабочих процессов",
|
||||
"workflowHelpText": "Нужна помощь? Ознакомьтесь с нашим руководством <LinkComponent>Getting Started with Workflows</LinkComponent>"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Авто добавление Доски",
|
||||
@@ -1553,7 +1558,10 @@
|
||||
"autoLayout": "Автоматическое расположение",
|
||||
"userWorkflows": "Пользовательские рабочие процессы",
|
||||
"projectWorkflows": "Рабочие процессы проекта",
|
||||
"defaultWorkflows": "Стандартные рабочие процессы"
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"deleteWorkflow2": "Вы уверены, что хотите удалить этот рабочий процесс? Это нельзя отменить.",
|
||||
"chooseWorkflowFromLibrary": "Выбрать рабочий процесс из библиотеки",
|
||||
"uploadAndSaveWorkflow": "Загрузить в библиотеку"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
@@ -1872,8 +1880,8 @@
|
||||
"duplicate": "Дублировать",
|
||||
"inpaintMasks_withCount_visible": "Маски перерисовки ({{count}})",
|
||||
"layer_one": "Слой",
|
||||
"layer_few": "",
|
||||
"layer_many": "",
|
||||
"layer_few": "Слоя",
|
||||
"layer_many": "Слоев",
|
||||
"prompt": "Запрос",
|
||||
"negativePrompt": "Исключающий запрос",
|
||||
"beginEndStepPercentShort": "Начало/конец %",
|
||||
@@ -2035,7 +2043,7 @@
|
||||
"whatsNewInInvoke": "Что нового в Invoke"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStarted": "Чтобы начать работу, введите в поле запрос и нажмите <StrongComponent>Invoke</StrongComponent>, чтобы сгенерировать первое изображение. Вы можете сохранить изображения непосредственно в <StrongComponent>Галерею</StrongComponent> или отредактировать их на <StrongComponent>Холсте</StrongComponent>.",
|
||||
"toGetStarted": "Чтобы начать работу, введите в поле запрос и нажмите <StrongComponent>Invoke</StrongComponent>, чтобы сгенерировать первое изображение. Выберите шаблон запроса, чтобы улучшить результаты. Вы можете сохранить изображения непосредственно в <StrongComponent>Галерею</StrongComponent> или отредактировать их на <StrongComponent>Холсте</StrongComponent>.",
|
||||
"gettingStartedSeries": "Хотите получить больше рекомендаций? Ознакомьтесь с нашей серией <LinkComponent>Getting Started Series</LinkComponent> для получения советов по раскрытию всего потенциала Invoke Studio."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
|
||||
import { useLogger } from 'app/logging/useLogger';
|
||||
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
||||
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
@@ -20,14 +21,18 @@ import {
|
||||
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
|
||||
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/WorkflowListMenu/ShareWorkflowModal';
|
||||
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
import { AnimatePresence } from 'framer-motion';
|
||||
import { DeleteWorkflowDialog } from 'features/workflowLibrary/components/DeleteLibraryWorkflowConfirmationAlertDialog';
|
||||
import { NewWorkflowConfirmationAlertDialog } from 'features/workflowLibrary/components/NewWorkflowConfirmationAlertDialog';
|
||||
import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback, useEffect } from 'react';
|
||||
@@ -55,6 +60,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
useSyncLoggingConfig();
|
||||
|
||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||
|
||||
@@ -96,22 +102,25 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
>
|
||||
<input {...dropzone.getInputProps()} />
|
||||
<AppContent />
|
||||
<AnimatePresence>
|
||||
{dropzone.isDragActive && isHandlingUpload && (
|
||||
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
|
||||
)}
|
||||
</AnimatePresence>
|
||||
{dropzone.isDragActive && isHandlingUpload && (
|
||||
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
|
||||
)}
|
||||
</Box>
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<StylePresetModal />
|
||||
<ClearQueueConfirmationsAlertDialog />
|
||||
<NewWorkflowConfirmationAlertDialog />
|
||||
<DeleteStylePresetDialog />
|
||||
<DeleteWorkflowDialog />
|
||||
<ShareWorkflowModal />
|
||||
<RefreshAfterResetModal />
|
||||
<DeleteBoardModal />
|
||||
<GlobalImageHotkeys />
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { useImageActions } from 'features/gallery/hooks/useImageActions';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
@@ -11,6 +12,7 @@ import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const GlobalImageHotkeys = memo(() => {
|
||||
useAssertSingleton('GlobalImageHotkeys');
|
||||
const lastSelectedImage = useAppSelector(selectLastSelectedImage);
|
||||
const { currentData: imageDTO } = useGetImageDTOQuery(lastSelectedImage?.image_name ?? skipToken);
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ import 'i18n';
|
||||
|
||||
import type { Middleware } from '@reduxjs/toolkit';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import type { LoggingOverrides } from 'app/logging/logger';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
|
||||
@@ -20,7 +22,7 @@ import Loading from 'common/components/Loading/Loading';
|
||||
import AppDndContext from 'features/dnd/components/AppDndContext';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useMemo } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
||||
import { $socketOptions } from 'services/events/stores';
|
||||
@@ -46,6 +48,7 @@ interface Props extends PropsWithChildren {
|
||||
isDebugging?: boolean;
|
||||
logo?: ReactNode;
|
||||
workflowCategories?: WorkflowCategory[];
|
||||
loggingOverrides?: LoggingOverrides;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
@@ -65,7 +68,26 @@ const InvokeAIUI = ({
|
||||
isDebugging = false,
|
||||
logo,
|
||||
workflowCategories,
|
||||
loggingOverrides,
|
||||
}: Props) => {
|
||||
useLayoutEffect(() => {
|
||||
/*
|
||||
* We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
|
||||
* possible opportunity.
|
||||
*
|
||||
* Once redux initializes, we will check the user's settings and update the logging config accordingly. See
|
||||
* `useSyncLoggingConfig`.
|
||||
*/
|
||||
$loggingOverrides.set(loggingOverrides);
|
||||
|
||||
// Until we get the user's settings, we will use the overrides OR default values.
|
||||
configureLogging(
|
||||
loggingOverrides?.logIsEnabled ?? true,
|
||||
loggingOverrides?.logLevel ?? 'debug',
|
||||
loggingOverrides?.logNamespaces ?? '*'
|
||||
);
|
||||
}, [loggingOverrides]);
|
||||
|
||||
useEffect(() => {
|
||||
// configure API client token
|
||||
if (token) {
|
||||
|
||||
@@ -12,7 +12,7 @@ import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { activeTabCanvasRightPanelChanged, setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { useGetAndLoadLibraryWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadLibraryWorkflow';
|
||||
import { useCallback, useEffect, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -140,6 +140,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
case 'generation':
|
||||
// Go to the canvas tab, open the image viewer, and enable send-to-gallery mode
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
store.dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
store.dispatch(settingsSendToCanvasChanged(false));
|
||||
$imageViewer.set(true);
|
||||
break;
|
||||
|
||||
@@ -9,11 +9,10 @@ const serializeMessage: MessageSerializer = (message) => {
|
||||
};
|
||||
|
||||
ROARR.serializeMessage = serializeMessage;
|
||||
ROARR.write = createLogWriter();
|
||||
|
||||
export const BASE_CONTEXT = {};
|
||||
const BASE_CONTEXT = {};
|
||||
|
||||
export const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
|
||||
const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
|
||||
|
||||
export const zLogNamespace = z.enum([
|
||||
'canvas',
|
||||
@@ -35,8 +34,22 @@ export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fat
|
||||
export type LogLevel = z.infer<typeof zLogLevel>;
|
||||
export const isLogLevel = (v: unknown): v is LogLevel => zLogLevel.safeParse(v).success;
|
||||
|
||||
/**
|
||||
* Override logging settings.
|
||||
* @property logIsEnabled Override the enabled log state. Omit to use the user's settings.
|
||||
* @property logNamespaces Override the enabled log namespaces. Use `"*"` for all namespaces. Omit to use the user's settings.
|
||||
* @property logLevel Override the log level. Omit to use the user's settings.
|
||||
*/
|
||||
export type LoggingOverrides = {
|
||||
logIsEnabled?: boolean;
|
||||
logNamespaces?: LogNamespace[] | '*';
|
||||
logLevel?: LogLevel;
|
||||
};
|
||||
|
||||
export const $loggingOverrides = atom<LoggingOverrides | undefined>();
|
||||
|
||||
// Translate human-readable log levels to numbers, used for log filtering
|
||||
export const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||
const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||
trace: 10,
|
||||
debug: 20,
|
||||
info: 30,
|
||||
@@ -44,3 +57,40 @@ export const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||
error: 50,
|
||||
fatal: 60,
|
||||
};
|
||||
|
||||
/**
|
||||
* Configure logging, pushing settings to local storage.
|
||||
*
|
||||
* @param logIsEnabled Whether logging is enabled
|
||||
* @param logLevel The log level
|
||||
* @param logNamespaces A list of log namespaces to enable, or '*' to enable all
|
||||
*/
|
||||
export const configureLogging = (
|
||||
logIsEnabled: boolean = true,
|
||||
logLevel: LogLevel = 'warn',
|
||||
logNamespaces: LogNamespace[] | '*'
|
||||
): void => {
|
||||
if (!logIsEnabled) {
|
||||
// Disable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'false');
|
||||
} else {
|
||||
// Enable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'true');
|
||||
|
||||
// Use a filter to show only logs of the given level
|
||||
let filter = `context.logLevel:>=${LOG_LEVEL_MAP[logLevel]}`;
|
||||
|
||||
const namespaces = logNamespaces === '*' ? zLogNamespace.options : logNamespaces;
|
||||
|
||||
if (namespaces.length > 0) {
|
||||
filter += ` AND (${namespaces.map((ns) => `context.namespace:${ns}`).join(' OR ')})`;
|
||||
} else {
|
||||
// This effectively hides all logs because we use namespaces for all logs
|
||||
filter += ' AND context.namespace:undefined';
|
||||
}
|
||||
|
||||
localStorage.setItem('ROARR_FILTER', filter);
|
||||
}
|
||||
|
||||
ROARR.write = createLogWriter();
|
||||
};
|
||||
|
||||
@@ -1,53 +1,9 @@
|
||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
selectSystemLogIsEnabled,
|
||||
selectSystemLogLevel,
|
||||
selectSystemLogNamespaces,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { useEffect, useMemo } from 'react';
|
||||
import { ROARR, Roarr } from 'roarr';
|
||||
import { useMemo } from 'react';
|
||||
|
||||
import type { LogNamespace } from './logger';
|
||||
import { $logger, BASE_CONTEXT, LOG_LEVEL_MAP, logger } from './logger';
|
||||
import { logger } from './logger';
|
||||
|
||||
export const useLogger = (namespace: LogNamespace) => {
|
||||
const logLevel = useAppSelector(selectSystemLogLevel);
|
||||
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
|
||||
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
|
||||
|
||||
// The provided Roarr browser log writer uses localStorage to config logging to console
|
||||
useEffect(() => {
|
||||
if (logIsEnabled) {
|
||||
// Enable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'true');
|
||||
|
||||
// Use a filter to show only logs of the given level
|
||||
let filter = `context.logLevel:>=${LOG_LEVEL_MAP[logLevel]}`;
|
||||
if (logNamespaces.length > 0) {
|
||||
filter += ` AND (${logNamespaces.map((ns) => `context.namespace:${ns}`).join(' OR ')})`;
|
||||
} else {
|
||||
filter += ' AND context.namespace:undefined';
|
||||
}
|
||||
localStorage.setItem('ROARR_FILTER', filter);
|
||||
} else {
|
||||
// Disable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'false');
|
||||
}
|
||||
ROARR.write = createLogWriter();
|
||||
}, [logLevel, logIsEnabled, logNamespaces]);
|
||||
|
||||
// Update the module-scoped logger context as needed
|
||||
useEffect(() => {
|
||||
// TODO: type this properly
|
||||
//eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const newContext: Record<string, any> = {
|
||||
...BASE_CONTEXT,
|
||||
};
|
||||
|
||||
$logger.set(Roarr.child(newContext));
|
||||
}, []);
|
||||
|
||||
const log = useMemo(() => logger(namespace), [namespace]);
|
||||
|
||||
return log;
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import {
|
||||
selectSystemLogIsEnabled,
|
||||
selectSystemLogLevel,
|
||||
selectSystemLogNamespaces,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { useLayoutEffect } from 'react';
|
||||
|
||||
/**
|
||||
* This hook synchronizes the logging configuration stored in Redux with the logging system, which uses localstorage.
|
||||
*
|
||||
* The sync is one-way: from Redux to localstorage. This means that changes made in the UI will be reflected in the
|
||||
* logging system, but changes made directly to localstorage will not be reflected in the UI.
|
||||
*
|
||||
* See {@link configureLogging}
|
||||
*/
|
||||
export const useSyncLoggingConfig = () => {
|
||||
useAssertSingleton('useSyncLoggingConfig');
|
||||
|
||||
const loggingOverrides = useStore($loggingOverrides);
|
||||
|
||||
const logLevel = useAppSelector(selectSystemLogLevel);
|
||||
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
|
||||
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
|
||||
|
||||
useLayoutEffect(() => {
|
||||
configureLogging(
|
||||
loggingOverrides?.logIsEnabled ?? logIsEnabled,
|
||||
loggingOverrides?.logLevel ?? logLevel,
|
||||
loggingOverrides?.logNamespaces ?? logNamespaces
|
||||
);
|
||||
}, [
|
||||
logIsEnabled,
|
||||
logLevel,
|
||||
logNamespaces,
|
||||
loggingOverrides?.logIsEnabled,
|
||||
loggingOverrides?.logLevel,
|
||||
loggingOverrides?.logNamespaces,
|
||||
]);
|
||||
};
|
||||
@@ -1,3 +1,4 @@
|
||||
export const STORAGE_PREFIX = '@@invokeai-';
|
||||
export const EMPTY_ARRAY = [];
|
||||
/** @knipignore */
|
||||
export const EMPTY_OBJECT = {};
|
||||
|
||||
@@ -7,12 +7,20 @@ import { diff } from 'jsondiffpatch';
|
||||
/**
|
||||
* Super simple logger middleware. Useful for debugging when the redux devtools are awkward.
|
||||
*/
|
||||
export const debugLoggerMiddleware: Middleware = (api: MiddlewareAPI) => (next) => (action) => {
|
||||
const originalState = api.getState();
|
||||
console.log('REDUX: dispatching', action);
|
||||
const result = next(action);
|
||||
const nextState = api.getState();
|
||||
console.log('REDUX: next state', nextState);
|
||||
console.log('REDUX: diff', diff(originalState, nextState));
|
||||
return result;
|
||||
};
|
||||
export const getDebugLoggerMiddleware =
|
||||
(options?: { withDiff?: boolean; withNextState?: boolean }): Middleware =>
|
||||
(api: MiddlewareAPI) =>
|
||||
(next) =>
|
||||
(action) => {
|
||||
const originalState = api.getState();
|
||||
console.log('REDUX: dispatching', action);
|
||||
const result = next(action);
|
||||
const nextState = api.getState();
|
||||
if (options?.withNextState) {
|
||||
console.log('REDUX: next state', nextState);
|
||||
}
|
||||
if (options?.withDiff) {
|
||||
console.log('REDUX: diff', diff(originalState, nextState));
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
@@ -29,13 +29,13 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
|
||||
const { autoAddBoardId, selectedBoardId } = state.gallery;
|
||||
|
||||
// If the deleted board was currently selected, we should reset the selected board to uncategorized
|
||||
if (deletedBoardId === selectedBoardId) {
|
||||
if (selectedBoardId !== 'none' && deletedBoardId === selectedBoardId) {
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('images'));
|
||||
}
|
||||
|
||||
// If the deleted board was selected for auto-add, we should reset the auto-add board to uncategorized
|
||||
if (deletedBoardId === autoAddBoardId) {
|
||||
if (autoAddBoardId !== 'none' && deletedBoardId === autoAddBoardId) {
|
||||
dispatch(autoAddBoardIdChanged('none'));
|
||||
}
|
||||
},
|
||||
@@ -46,11 +46,11 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
|
||||
matcher: boardsApi.endpoints.updateBoard.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const state = getState();
|
||||
const { shouldShowArchivedBoards } = state.gallery;
|
||||
const { shouldShowArchivedBoards, selectedBoardId, autoAddBoardId } = state.gallery;
|
||||
|
||||
const wasArchived = action.meta.arg.originalArgs.changes.archived === true;
|
||||
|
||||
if (wasArchived && !shouldShowArchivedBoards) {
|
||||
if (selectedBoardId !== 'none' && autoAddBoardId !== 'none' && wasArchived && !shouldShowArchivedBoards) {
|
||||
dispatch(autoAddBoardIdChanged('none'));
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('images'));
|
||||
@@ -80,7 +80,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
|
||||
|
||||
// Handle the case where selected board is archived
|
||||
const selectedBoard = queryResult.data.find((b) => b.board_id === selectedBoardId);
|
||||
if (!selectedBoard || selectedBoard.archived) {
|
||||
if (selectedBoardId !== 'none' && (!selectedBoard || selectedBoard.archived)) {
|
||||
// If we can't find the selected board or it's archived, we should reset the selected board to uncategorized
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('images'));
|
||||
@@ -88,7 +88,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
|
||||
|
||||
// Handle the case where auto-add board is archived
|
||||
const autoAddBoard = queryResult.data.find((b) => b.board_id === autoAddBoardId);
|
||||
if (!autoAddBoard || autoAddBoard.archived) {
|
||||
if (autoAddBoardId !== 'none' && (!autoAddBoard || autoAddBoard.archived)) {
|
||||
// If we can't find the auto-add board or it's archived, we should reset the selected board to uncategorized
|
||||
dispatch(autoAddBoardIdChanged('none'));
|
||||
}
|
||||
@@ -106,13 +106,13 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
|
||||
const { selectedBoardId, autoAddBoardId } = state.gallery;
|
||||
|
||||
// Handle the case where selected board isn't in the list of boards
|
||||
if (!boards.find((b) => b.board_id === selectedBoardId)) {
|
||||
if (selectedBoardId !== 'none' && !boards.find((b) => b.board_id === selectedBoardId)) {
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('images'));
|
||||
}
|
||||
|
||||
// Handle the case where auto-add board isn't in the list of boards
|
||||
if (!boards.find((b) => b.board_id === autoAddBoardId)) {
|
||||
if (autoAddBoardId !== 'none' && !boards.find((b) => b.board_id === autoAddBoardId)) {
|
||||
dispatch(autoAddBoardIdChanged('none'));
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import {
|
||||
entityRasterized,
|
||||
entitySelected,
|
||||
@@ -20,24 +21,39 @@ import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
/**
|
||||
* Gets the description for the toast that is shown when an image is uploaded.
|
||||
* @param boardId The board id of the uploaded image
|
||||
* @param state The current state of the app
|
||||
* @returns
|
||||
*/
|
||||
const getUploadedToastDescription = (boardId: string, state: RootState) => {
|
||||
if (boardId === 'none') {
|
||||
return t('toast.addedToUncategorized');
|
||||
}
|
||||
// Attempt to get the board's name for the toast
|
||||
const queryArgs = selectListBoardsQueryArgs(state);
|
||||
const { data } = boardsApi.endpoints.listAllBoards.select(queryArgs)(state);
|
||||
// Fall back to just the board id if we can't find the board for some reason
|
||||
const board = data?.find((b) => b.board_id === boardId);
|
||||
|
||||
return t('toast.addedToBoard', { name: board?.board_name ?? boardId });
|
||||
};
|
||||
|
||||
let lastUploadedToastTimeout: number | null = null;
|
||||
|
||||
export const addImageUploadedFulfilledListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.uploadImage.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const imageDTO = action.payload;
|
||||
const state = getState();
|
||||
const { autoAddBoardId } = state.gallery;
|
||||
|
||||
log.debug({ imageDTO }, 'Image uploaded');
|
||||
|
||||
const { postUploadAction } = action.meta.arg.originalArgs;
|
||||
|
||||
if (
|
||||
// No further actions needed for intermediate images,
|
||||
action.payload.is_intermediate &&
|
||||
// unless they have an explicit post-upload action
|
||||
!postUploadAction
|
||||
) {
|
||||
if (!postUploadAction) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -48,42 +64,40 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
} as const;
|
||||
|
||||
// default action - just upload and alert user
|
||||
if (postUploadAction?.type === 'TOAST') {
|
||||
if (!autoAddBoardId || autoAddBoardId === 'none') {
|
||||
const title = postUploadAction.title || DEFAULT_UPLOADED_TOAST.title;
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, title });
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
} else {
|
||||
// Add this image to the board
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
board_id: autoAddBoardId,
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
|
||||
// Attempt to get the board's name for the toast
|
||||
const queryArgs = selectListBoardsQueryArgs(state);
|
||||
const { data } = boardsApi.endpoints.listAllBoards.select(queryArgs)(state);
|
||||
|
||||
// Fall back to just the board id if we can't find the board for some reason
|
||||
const board = data?.find((b) => b.board_id === autoAddBoardId);
|
||||
const description = board
|
||||
? `${t('toast.addedToBoard')} ${board.board_name}`
|
||||
: `${t('toast.addedToBoard')} ${autoAddBoardId}`;
|
||||
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description,
|
||||
});
|
||||
dispatch(boardIdSelected({ boardId: autoAddBoardId }));
|
||||
if (postUploadAction.type === 'TOAST') {
|
||||
const boardId = imageDTO.board_id ?? 'none';
|
||||
if (lastUploadedToastTimeout !== null) {
|
||||
window.clearTimeout(lastUploadedToastTimeout);
|
||||
}
|
||||
const toastApi = toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
title: postUploadAction.title || DEFAULT_UPLOADED_TOAST.title,
|
||||
description: getUploadedToastDescription(boardId, state),
|
||||
duration: null, // we will close the toast manually
|
||||
});
|
||||
lastUploadedToastTimeout = window.setTimeout(() => {
|
||||
toastApi.close();
|
||||
}, 3000);
|
||||
/**
|
||||
* We only want to change the board and view if this is the first upload of a batch, else we end up hijacking
|
||||
* the user's gallery board and view selection:
|
||||
* - User uploads multiple images
|
||||
* - A couple uploads finish, but others are pending still
|
||||
* - User changes the board selection
|
||||
* - Pending uploads finish and change the board back to the original board
|
||||
* - User is confused as to why the board changed
|
||||
*
|
||||
* Default to true to not require _all_ image upload handlers to set this value
|
||||
*/
|
||||
const isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
|
||||
if (isFirstUploadOfBatch) {
|
||||
dispatch(boardIdSelected({ boardId }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_UPSCALE_INITIAL_IMAGE') {
|
||||
if (postUploadAction.type === 'SET_UPSCALE_INITIAL_IMAGE') {
|
||||
dispatch(upscaleInitialImageChanged(imageDTO));
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
@@ -92,21 +106,14 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
return;
|
||||
}
|
||||
|
||||
// if (postUploadAction?.type === 'SET_CA_IMAGE') {
|
||||
// const { id } = postUploadAction;
|
||||
// dispatch(caImageChanged({ id, imageDTO }));
|
||||
// toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
|
||||
// return;
|
||||
// }
|
||||
|
||||
if (postUploadAction?.type === 'SET_IPA_IMAGE') {
|
||||
if (postUploadAction.type === 'SET_IPA_IMAGE') {
|
||||
const { id } = postUploadAction;
|
||||
dispatch(referenceImageIPAdapterImageChanged({ entityIdentifier: { id, type: 'reference_image' }, imageDTO }));
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_RG_IP_ADAPTER_IMAGE') {
|
||||
if (postUploadAction.type === 'SET_RG_IP_ADAPTER_IMAGE') {
|
||||
const { id, referenceImageId } = postUploadAction;
|
||||
dispatch(
|
||||
rgIPAdapterImageChanged({ entityIdentifier: { id, type: 'regional_guidance' }, referenceImageId, imageDTO })
|
||||
@@ -115,14 +122,14 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_NODES_IMAGE') {
|
||||
if (postUploadAction.type === 'SET_NODES_IMAGE') {
|
||||
const { nodeId, fieldName } = postUploadAction;
|
||||
dispatch(fieldImageValueChanged({ nodeId, fieldName, value: imageDTO }));
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: `${t('toast.setNodeField')} ${fieldName}` });
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'REPLACE_LAYER_WITH_IMAGE') {
|
||||
if (postUploadAction.type === 'REPLACE_LAYER_WITH_IMAGE') {
|
||||
const { entityIdentifier } = postUploadAction;
|
||||
|
||||
const state = getState();
|
||||
|
||||
@@ -79,6 +79,7 @@ export type AppConfig = {
|
||||
metadataFetchDebounce?: number;
|
||||
workflowFetchDebounce?: number;
|
||||
isLocal?: boolean;
|
||||
maxImageUploadCount?: number;
|
||||
sd: {
|
||||
defaultModel?: string;
|
||||
disabledControlNetModels: string[];
|
||||
|
||||
@@ -4,9 +4,9 @@ import { IAILoadingImageFallback, IAINoContentFallback } from 'common/components
|
||||
import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import ImageContextMenu from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import { useImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import type { MouseEvent, ReactElement, ReactNode, SyntheticEvent } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { PiImageBold, PiUploadSimpleBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
@@ -17,7 +17,14 @@ const defaultUploadElement = <Icon as={PiUploadSimpleBold} boxSize={16} />;
|
||||
|
||||
const defaultNoContentFallback = <IAINoContentFallback icon={PiImageBold} />;
|
||||
|
||||
const baseStyles: SystemStyleObject = {
|
||||
touchAction: 'none',
|
||||
userSelect: 'none',
|
||||
webkitUserSelect: 'none',
|
||||
};
|
||||
|
||||
const sx: SystemStyleObject = {
|
||||
...baseStyles,
|
||||
'.gallery-image-container::before': {
|
||||
content: '""',
|
||||
display: 'inline-block',
|
||||
@@ -102,59 +109,10 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
useThumbailFallback,
|
||||
withHoverOverlay = false,
|
||||
children,
|
||||
onMouseOver,
|
||||
onMouseOut,
|
||||
dataTestId,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const handleMouseOver = useCallback(
|
||||
(e: MouseEvent<HTMLDivElement>) => {
|
||||
if (onMouseOver) {
|
||||
onMouseOver(e);
|
||||
}
|
||||
},
|
||||
[onMouseOver]
|
||||
);
|
||||
const handleMouseOut = useCallback(
|
||||
(e: MouseEvent<HTMLDivElement>) => {
|
||||
if (onMouseOut) {
|
||||
onMouseOut(e);
|
||||
}
|
||||
},
|
||||
[onMouseOut]
|
||||
);
|
||||
|
||||
const { getUploadButtonProps, getUploadInputProps } = useImageUploadButton({
|
||||
postUploadAction,
|
||||
isDisabled: isUploadDisabled,
|
||||
});
|
||||
|
||||
const uploadButtonStyles = useMemo<SystemStyleObject>(() => {
|
||||
const styles: SystemStyleObject = {
|
||||
minH: minSize,
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
borderRadius: 'base',
|
||||
transitionProperty: 'common',
|
||||
transitionDuration: '0.1s',
|
||||
color: 'base.500',
|
||||
};
|
||||
if (!isUploadDisabled) {
|
||||
Object.assign(styles, {
|
||||
cursor: 'pointer',
|
||||
bg: 'base.700',
|
||||
_hover: {
|
||||
bg: 'base.650',
|
||||
color: 'base.300',
|
||||
},
|
||||
});
|
||||
}
|
||||
return styles;
|
||||
}, [isUploadDisabled, minSize]);
|
||||
|
||||
const openInNewTab = useCallback(
|
||||
(e: MouseEvent) => {
|
||||
if (!imageDTO) {
|
||||
@@ -168,76 +126,126 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
[imageDTO]
|
||||
);
|
||||
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
useImageContextMenu(imageDTO, ref);
|
||||
|
||||
return (
|
||||
<ImageContextMenu imageDTO={imageDTO}>
|
||||
{(ref) => (
|
||||
<Flex
|
||||
ref={ref}
|
||||
width="full"
|
||||
height="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
position="relative"
|
||||
minW={minSize ? minSize : undefined}
|
||||
minH={minSize ? minSize : undefined}
|
||||
userSelect="none"
|
||||
cursor={isDragDisabled || !imageDTO ? 'default' : 'pointer'}
|
||||
sx={withHoverOverlay ? sx : baseStyles}
|
||||
data-selected={isSelectedForCompare ? 'selectedForCompare' : isSelected ? 'selected' : undefined}
|
||||
{...rest}
|
||||
>
|
||||
{imageDTO && (
|
||||
<Flex
|
||||
ref={ref}
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
width="full"
|
||||
height="full"
|
||||
className="gallery-image-container"
|
||||
w="full"
|
||||
h="full"
|
||||
position={fitContainer ? 'absolute' : 'relative'}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
position="relative"
|
||||
minW={minSize ? minSize : undefined}
|
||||
minH={minSize ? minSize : undefined}
|
||||
userSelect="none"
|
||||
cursor={isDragDisabled || !imageDTO ? 'default' : 'pointer'}
|
||||
sx={withHoverOverlay ? sx : undefined}
|
||||
data-selected={isSelectedForCompare ? 'selectedForCompare' : isSelected ? 'selected' : undefined}
|
||||
{...rest}
|
||||
>
|
||||
{imageDTO && (
|
||||
<Flex
|
||||
className="gallery-image-container"
|
||||
w="full"
|
||||
h="full"
|
||||
position={fitContainer ? 'absolute' : 'relative'}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<Image
|
||||
src={thumbnail ? imageDTO.thumbnail_url : imageDTO.image_url}
|
||||
fallbackStrategy="beforeLoadOrError"
|
||||
fallbackSrc={useThumbailFallback ? imageDTO.thumbnail_url : undefined}
|
||||
fallback={useThumbailFallback ? undefined : <IAILoadingImageFallback image={imageDTO} />}
|
||||
onError={onError}
|
||||
draggable={false}
|
||||
w={imageDTO.width}
|
||||
objectFit="contain"
|
||||
maxW="full"
|
||||
maxH="full"
|
||||
borderRadius="base"
|
||||
sx={imageSx}
|
||||
data-testid={dataTestId}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
|
||||
</Flex>
|
||||
)}
|
||||
{!imageDTO && !isUploadDisabled && (
|
||||
<>
|
||||
<Flex sx={uploadButtonStyles} {...getUploadButtonProps()}>
|
||||
<input {...getUploadInputProps()} />
|
||||
{uploadElement}
|
||||
</Flex>
|
||||
</>
|
||||
)}
|
||||
{!imageDTO && isUploadDisabled && noContentFallback}
|
||||
{imageDTO && !isDragDisabled && (
|
||||
<IAIDraggable
|
||||
data={draggableData}
|
||||
disabled={isDragDisabled || !imageDTO}
|
||||
onClick={onClick}
|
||||
onAuxClick={openInNewTab}
|
||||
/>
|
||||
)}
|
||||
{children}
|
||||
{!isDropDisabled && <IAIDroppable data={droppableData} disabled={isDropDisabled} dropLabel={dropLabel} />}
|
||||
<Image
|
||||
src={thumbnail ? imageDTO.thumbnail_url : imageDTO.image_url}
|
||||
fallbackStrategy="beforeLoadOrError"
|
||||
fallbackSrc={useThumbailFallback ? imageDTO.thumbnail_url : undefined}
|
||||
fallback={useThumbailFallback ? undefined : <IAILoadingImageFallback image={imageDTO} />}
|
||||
onError={onError}
|
||||
draggable={false}
|
||||
w={imageDTO.width}
|
||||
objectFit="contain"
|
||||
maxW="full"
|
||||
maxH="full"
|
||||
borderRadius="base"
|
||||
sx={imageSx}
|
||||
data-testid={dataTestId}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
|
||||
</Flex>
|
||||
)}
|
||||
</ImageContextMenu>
|
||||
{!imageDTO && !isUploadDisabled && (
|
||||
<UploadButton
|
||||
isUploadDisabled={isUploadDisabled}
|
||||
postUploadAction={postUploadAction}
|
||||
uploadElement={uploadElement}
|
||||
minSize={minSize}
|
||||
/>
|
||||
)}
|
||||
{!imageDTO && isUploadDisabled && noContentFallback}
|
||||
{imageDTO && !isDragDisabled && (
|
||||
<IAIDraggable
|
||||
data={draggableData}
|
||||
disabled={isDragDisabled || !imageDTO}
|
||||
onClick={onClick}
|
||||
onAuxClick={openInNewTab}
|
||||
/>
|
||||
)}
|
||||
{children}
|
||||
{!isDropDisabled && <IAIDroppable data={droppableData} disabled={isDropDisabled} dropLabel={dropLabel} />}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(IAIDndImage);
|
||||
|
||||
const UploadButton = memo(
|
||||
({
|
||||
isUploadDisabled,
|
||||
postUploadAction,
|
||||
uploadElement,
|
||||
minSize,
|
||||
}: {
|
||||
isUploadDisabled: boolean;
|
||||
postUploadAction?: PostUploadAction;
|
||||
uploadElement: ReactNode;
|
||||
minSize: number;
|
||||
}) => {
|
||||
const { getUploadButtonProps, getUploadInputProps } = useImageUploadButton({
|
||||
postUploadAction,
|
||||
isDisabled: isUploadDisabled,
|
||||
});
|
||||
|
||||
const uploadButtonStyles = useMemo<SystemStyleObject>(() => {
|
||||
const styles: SystemStyleObject = {
|
||||
minH: minSize,
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
borderRadius: 'base',
|
||||
transitionProperty: 'common',
|
||||
transitionDuration: '0.1s',
|
||||
color: 'base.500',
|
||||
};
|
||||
if (!isUploadDisabled) {
|
||||
Object.assign(styles, {
|
||||
cursor: 'pointer',
|
||||
bg: 'base.700',
|
||||
_hover: {
|
||||
bg: 'base.650',
|
||||
color: 'base.300',
|
||||
},
|
||||
});
|
||||
}
|
||||
return styles;
|
||||
}, [isUploadDisabled, minSize]);
|
||||
|
||||
return (
|
||||
<Flex sx={uploadButtonStyles} {...getUploadButtonProps()}>
|
||||
<input {...getUploadInputProps()} />
|
||||
{uploadElement}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
UploadButton.displayName = 'UploadButton';
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import { Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type Props = {
|
||||
isOver: boolean;
|
||||
label?: string;
|
||||
withBackdrop?: boolean;
|
||||
};
|
||||
|
||||
const IAIDropOverlay = (props: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const { isOver, label = t('gallery.drop') } = props;
|
||||
const { isOver, label, withBackdrop = true } = props;
|
||||
return (
|
||||
<Flex position="absolute" top={0} right={0} bottom={0} left={0}>
|
||||
<Flex
|
||||
@@ -20,7 +19,7 @@ const IAIDropOverlay = (props: Props) => {
|
||||
left={0}
|
||||
w="full"
|
||||
h="full"
|
||||
bg="base.900"
|
||||
bg={withBackdrop ? 'base.900' : 'transparent'}
|
||||
opacity={0.7}
|
||||
borderRadius="base"
|
||||
alignItems="center"
|
||||
@@ -45,16 +44,18 @@ const IAIDropOverlay = (props: Props) => {
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<Text
|
||||
fontSize="lg"
|
||||
fontWeight="semibold"
|
||||
color={isOver ? 'invokeYellow.300' : 'base.500'}
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
textAlign="center"
|
||||
>
|
||||
{label}
|
||||
</Text>
|
||||
{label && (
|
||||
<Text
|
||||
fontSize="lg"
|
||||
fontWeight="semibold"
|
||||
color={isOver ? 'invokeYellow.300' : 'base.500'}
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
textAlign="center"
|
||||
>
|
||||
{label}
|
||||
</Text>
|
||||
)}
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
|
||||
30
invokeai/frontend/web/src/common/components/IconMenuItem.tsx
Normal file
30
invokeai/frontend/web/src/common/components/IconMenuItem.tsx
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { MenuItemProps } from '@invoke-ai/ui-library';
|
||||
import { Flex, MenuItem, Tooltip } from '@invoke-ai/ui-library';
|
||||
import type { ReactNode } from 'react';
|
||||
|
||||
type Props = MenuItemProps & {
|
||||
tooltip?: ReactNode;
|
||||
icon: ReactNode;
|
||||
};
|
||||
|
||||
export const IconMenuItem = ({ tooltip, icon, ...props }: Props) => {
|
||||
return (
|
||||
<Tooltip label={tooltip} placement="top" gutter={12}>
|
||||
<MenuItem
|
||||
display="flex"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
w="min-content"
|
||||
aspectRatio="1"
|
||||
borderRadius="base"
|
||||
{...props}
|
||||
>
|
||||
{icon}
|
||||
</MenuItem>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
export const IconMenuItemGroup = ({ children }: { children: ReactNode }) => {
|
||||
return <Flex gap={2}>{children}</Flex>;
|
||||
};
|
||||
@@ -1,22 +1,12 @@
|
||||
import { Box, Flex, Heading } from '@invoke-ai/ui-library';
|
||||
import type { AnimationProps } from 'framer-motion';
|
||||
import { motion } from 'framer-motion';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { memo } from 'react';
|
||||
import type { DropzoneState } from 'react-dropzone';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const initial: AnimationProps['initial'] = {
|
||||
opacity: 0,
|
||||
};
|
||||
const animate: AnimationProps['animate'] = {
|
||||
opacity: 1,
|
||||
transition: { duration: 0.1 },
|
||||
};
|
||||
const exit: AnimationProps['exit'] = {
|
||||
opacity: 0,
|
||||
transition: { duration: 0.1 },
|
||||
};
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
|
||||
type ImageUploadOverlayProps = {
|
||||
dropzone: DropzoneState;
|
||||
@@ -24,7 +14,6 @@ type ImageUploadOverlayProps = {
|
||||
};
|
||||
|
||||
const ImageUploadOverlay = (props: ImageUploadOverlayProps) => {
|
||||
const { t } = useTranslation();
|
||||
const { dropzone, setIsHandlingUpload } = props;
|
||||
|
||||
useHotkeys(
|
||||
@@ -36,67 +25,65 @@ const ImageUploadOverlay = (props: ImageUploadOverlayProps) => {
|
||||
);
|
||||
|
||||
return (
|
||||
<Box
|
||||
key="image-upload-overlay"
|
||||
initial={initial}
|
||||
animate={animate}
|
||||
exit={exit}
|
||||
as={motion.div}
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
width="100dvw"
|
||||
height="100dvh"
|
||||
zIndex={999}
|
||||
backdropFilter="blur(20px)"
|
||||
>
|
||||
<Box position="absolute" top={0} right={0} bottom={0} left={0} zIndex={999} backdropFilter="blur(20px)">
|
||||
<Flex position="absolute" top={0} right={0} bottom={0} left={0} bg="base.900" opacity={0.7} />
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
bg="base.900"
|
||||
opacity={0.7}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
flexDir="column"
|
||||
gap={4}
|
||||
top={2}
|
||||
right={2}
|
||||
bottom={2}
|
||||
left={2}
|
||||
opacity={1}
|
||||
borderWidth={2}
|
||||
borderColor={dropzone.isDragAccept ? 'invokeYellow.300' : 'error.500'}
|
||||
borderRadius="base"
|
||||
borderStyle="dashed"
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
/>
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
width="full"
|
||||
height="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
p={4}
|
||||
color={dropzone.isDragReject ? 'error.300' : undefined}
|
||||
>
|
||||
<Flex
|
||||
width="full"
|
||||
height="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
flexDir="column"
|
||||
gap={4}
|
||||
borderWidth={3}
|
||||
borderRadius="xl"
|
||||
borderStyle="dashed"
|
||||
color="base.100"
|
||||
borderColor="base.200"
|
||||
>
|
||||
{dropzone.isDragAccept ? (
|
||||
<Heading size="lg">{t('gallery.dropToUpload')}</Heading>
|
||||
) : (
|
||||
<>
|
||||
<Heading size="lg">{t('toast.invalidUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.uploadFailedInvalidUploadDesc')}</Heading>
|
||||
</>
|
||||
)}
|
||||
</Flex>
|
||||
{dropzone.isDragAccept && <DragAcceptMessage />}
|
||||
{!dropzone.isDragAccept && <DragRejectMessage />}
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
export default memo(ImageUploadOverlay);
|
||||
|
||||
const DragAcceptMessage = () => {
|
||||
const { t } = useTranslation();
|
||||
const selectedBoardId = useAppSelector(selectSelectedBoardId);
|
||||
const boardName = useBoardName(selectedBoardId);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Heading size="lg">{t('gallery.dropToUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.imagesWillBeAddedTo', { boardName })}</Heading>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
const DragRejectMessage = () => {
|
||||
const { t } = useTranslation();
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
|
||||
if (maxImageUploadCount === undefined) {
|
||||
return (
|
||||
<>
|
||||
<Heading size="lg">{t('toast.invalidUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.uploadFailedInvalidUploadDesc')}</Heading>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Heading size="lg">{t('toast.invalidUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount })}</Heading>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -51,7 +51,7 @@ export const buildUseBoolean = (initialValue: boolean): [() => UseBoolean, Writa
|
||||
* Hook to manage a boolean state. Use this for a local boolean state.
|
||||
* @param initialValue Initial value of the boolean
|
||||
*/
|
||||
export const useBoolean = (initialValue: boolean) => {
|
||||
export const useBoolean = (initialValue: boolean): UseBoolean => {
|
||||
const [isTrue, set] = useState(initialValue);
|
||||
|
||||
const setTrue = useCallback(() => {
|
||||
@@ -72,3 +72,82 @@ export const useBoolean = (initialValue: boolean) => {
|
||||
toggle,
|
||||
};
|
||||
};
|
||||
|
||||
type UseDisclosure = {
|
||||
isOpen: boolean;
|
||||
open: () => void;
|
||||
close: () => void;
|
||||
set: (isOpen: boolean) => void;
|
||||
toggle: () => void;
|
||||
};
|
||||
|
||||
/**
|
||||
* This is the same as `buildUseBoolean`, but the method names are more descriptive,
|
||||
* serving the semantics of a disclosure state.
|
||||
*
|
||||
* Creates a hook to manage a boolean state. The boolean is stored in a nanostores atom.
|
||||
* Returns a tuple containing the hook and the atom. Use this for global boolean state.
|
||||
*
|
||||
* @param defaultIsOpen Initial state of the disclosure
|
||||
*/
|
||||
export const buildUseDisclosure = (defaultIsOpen: boolean): [() => UseDisclosure, WritableAtom<boolean>] => {
|
||||
const $isOpen = atom(defaultIsOpen);
|
||||
|
||||
const open = () => {
|
||||
$isOpen.set(true);
|
||||
};
|
||||
const close = () => {
|
||||
$isOpen.set(false);
|
||||
};
|
||||
const set = (isOpen: boolean) => {
|
||||
$isOpen.set(isOpen);
|
||||
};
|
||||
const toggle = () => {
|
||||
$isOpen.set(!$isOpen.get());
|
||||
};
|
||||
|
||||
const useDisclosure = () => {
|
||||
const isOpen = useStore($isOpen);
|
||||
|
||||
return {
|
||||
isOpen,
|
||||
open,
|
||||
close,
|
||||
set,
|
||||
toggle,
|
||||
};
|
||||
};
|
||||
|
||||
return [useDisclosure, $isOpen] as const;
|
||||
};
|
||||
|
||||
/**
|
||||
* This is the same as `useBoolean`, but the method names are more descriptive,
|
||||
* serving the semantics of a disclosure state.
|
||||
*
|
||||
* Hook to manage a boolean state. Use this for a local boolean state.
|
||||
* @param defaultIsOpen Initial state of the disclosure
|
||||
*
|
||||
* @knipignore
|
||||
*/
|
||||
export const useDisclosure = (defaultIsOpen: boolean): UseDisclosure => {
|
||||
const [isOpen, set] = useState(defaultIsOpen);
|
||||
|
||||
const open = useCallback(() => {
|
||||
set(true);
|
||||
}, [set]);
|
||||
const close = useCallback(() => {
|
||||
set(false);
|
||||
}, [set]);
|
||||
const toggle = useCallback(() => {
|
||||
set((val) => !val);
|
||||
}, [set]);
|
||||
|
||||
return {
|
||||
isOpen,
|
||||
open,
|
||||
close,
|
||||
set,
|
||||
toggle,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
@@ -10,89 +12,89 @@ import { useTranslation } from 'react-i18next';
|
||||
import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
import type { PostUploadAction } from 'services/api/types';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
const accept: Accept = {
|
||||
'image/png': ['.png'],
|
||||
'image/jpeg': ['.jpg', '.jpeg', '.png'],
|
||||
};
|
||||
|
||||
const selectPostUploadAction = createMemoizedSelector(selectActiveTab, (activeTabName) => {
|
||||
let postUploadAction: PostUploadAction = { type: 'TOAST' };
|
||||
|
||||
if (activeTabName === 'upscaling') {
|
||||
postUploadAction = { type: 'SET_UPSCALE_INITIAL_IMAGE' };
|
||||
}
|
||||
|
||||
return postUploadAction;
|
||||
});
|
||||
|
||||
export const useFullscreenDropzone = () => {
|
||||
useAssertSingleton('useFullscreenDropzone');
|
||||
const { t } = useTranslation();
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const [isHandlingUpload, setIsHandlingUpload] = useState<boolean>(false);
|
||||
const postUploadAction = useAppSelector(selectPostUploadAction);
|
||||
const [uploadImage] = useUploadImageMutation();
|
||||
const activeTabName = useAppSelector(selectActiveTab);
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
|
||||
const fileRejectionCallback = useCallback(
|
||||
(rejection: FileRejection) => {
|
||||
setIsHandlingUpload(true);
|
||||
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
title: t('toast.uploadFailed'),
|
||||
description: rejection.errors.map((error) => error.message).join('\n'),
|
||||
status: 'error',
|
||||
});
|
||||
},
|
||||
[t]
|
||||
);
|
||||
|
||||
const fileAcceptedCallback = useCallback(
|
||||
(file: File) => {
|
||||
uploadImage({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
});
|
||||
},
|
||||
[autoAddBoardId, postUploadAction, uploadImage]
|
||||
);
|
||||
const getPostUploadAction = useCallback((): PostUploadAction => {
|
||||
if (activeTabName === 'upscaling') {
|
||||
return { type: 'SET_UPSCALE_INITIAL_IMAGE' };
|
||||
} else {
|
||||
return { type: 'TOAST' };
|
||||
}
|
||||
}, [activeTabName]);
|
||||
|
||||
const onDrop = useCallback(
|
||||
(acceptedFiles: Array<File>, fileRejections: Array<FileRejection>) => {
|
||||
if (fileRejections.length > 1) {
|
||||
if (fileRejections.length > 0) {
|
||||
const errors = fileRejections.map((rejection) => ({
|
||||
errors: rejection.errors.map(({ message }) => message),
|
||||
file: rejection.file.path,
|
||||
}));
|
||||
log.error({ errors }, 'Invalid upload');
|
||||
const description =
|
||||
maxImageUploadCount === undefined
|
||||
? t('toast.uploadFailedInvalidUploadDesc')
|
||||
: t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount });
|
||||
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
title: t('toast.uploadFailed'),
|
||||
description: t('toast.uploadFailedInvalidUploadDesc'),
|
||||
description,
|
||||
status: 'error',
|
||||
});
|
||||
|
||||
setIsHandlingUpload(false);
|
||||
return;
|
||||
}
|
||||
|
||||
fileRejections.forEach((rejection: FileRejection) => {
|
||||
fileRejectionCallback(rejection);
|
||||
});
|
||||
for (const [i, file] of acceptedFiles.entries()) {
|
||||
uploadImage({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction: getPostUploadAction(),
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
// The `imageUploaded` listener does some extra logic, like switching to the asset view on upload on the
|
||||
// first upload of a "batch".
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
});
|
||||
}
|
||||
|
||||
acceptedFiles.forEach((file: File) => {
|
||||
fileAcceptedCallback(file);
|
||||
});
|
||||
setIsHandlingUpload(false);
|
||||
},
|
||||
[t, fileAcceptedCallback, fileRejectionCallback]
|
||||
[t, maxImageUploadCount, uploadImage, getPostUploadAction, autoAddBoardId]
|
||||
);
|
||||
|
||||
const onDragOver = useCallback(() => {
|
||||
setIsHandlingUpload(true);
|
||||
}, []);
|
||||
|
||||
const onDragLeave = useCallback(() => {
|
||||
setIsHandlingUpload(false);
|
||||
}, []);
|
||||
|
||||
const dropzone = useDropzone({
|
||||
accept,
|
||||
noClick: true,
|
||||
onDrop,
|
||||
onDragOver,
|
||||
multiple: false,
|
||||
onDragLeave,
|
||||
noKeyboard: true,
|
||||
multiple: maxImageUploadCount === undefined || maxImageUploadCount > 1,
|
||||
maxFiles: maxImageUploadCount,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback } from 'react';
|
||||
import type { FileRejection } from 'react-dropzone';
|
||||
import { useDropzone } from 'react-dropzone';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
import type { PostUploadAction } from 'services/api/types';
|
||||
|
||||
type UseImageUploadButtonArgs = {
|
||||
postUploadAction?: PostUploadAction;
|
||||
isDisabled?: boolean;
|
||||
allowMultiple?: boolean;
|
||||
};
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
/**
|
||||
* Provides image uploader functionality to any component.
|
||||
*
|
||||
@@ -29,28 +37,58 @@ type UseImageUploadButtonArgs = {
|
||||
* <Button {...getUploadButtonProps()} /> // will open the file dialog on click
|
||||
* <input {...getUploadInputProps()} /> // hidden, handles native upload functionality
|
||||
*/
|
||||
export const useImageUploadButton = ({ postUploadAction, isDisabled }: UseImageUploadButtonArgs) => {
|
||||
export const useImageUploadButton = ({
|
||||
postUploadAction,
|
||||
isDisabled,
|
||||
allowMultiple = false,
|
||||
}: UseImageUploadButtonArgs) => {
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const [uploadImage] = useUploadImageMutation();
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const onDropAccepted = useCallback(
|
||||
(files: File[]) => {
|
||||
const file = files[0];
|
||||
|
||||
if (!file) {
|
||||
return;
|
||||
for (const [i, file] of files.entries()) {
|
||||
uploadImage({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction: postUploadAction ?? { type: 'TOAST' },
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
});
|
||||
}
|
||||
|
||||
uploadImage({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction: postUploadAction ?? { type: 'TOAST' },
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
});
|
||||
},
|
||||
[autoAddBoardId, postUploadAction, uploadImage]
|
||||
);
|
||||
|
||||
const onDropRejected = useCallback(
|
||||
(fileRejections: FileRejection[]) => {
|
||||
if (fileRejections.length > 0) {
|
||||
const errors = fileRejections.map((rejection) => ({
|
||||
errors: rejection.errors.map(({ message }) => message),
|
||||
file: rejection.file.path,
|
||||
}));
|
||||
log.error({ errors }, 'Invalid upload');
|
||||
const description =
|
||||
maxImageUploadCount === undefined
|
||||
? t('toast.uploadFailedInvalidUploadDesc')
|
||||
: t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount });
|
||||
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
title: t('toast.uploadFailed'),
|
||||
description,
|
||||
status: 'error',
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
},
|
||||
[maxImageUploadCount, t]
|
||||
);
|
||||
|
||||
const {
|
||||
getRootProps: getUploadButtonProps,
|
||||
getInputProps: getUploadInputProps,
|
||||
@@ -58,9 +96,11 @@ export const useImageUploadButton = ({ postUploadAction, isDisabled }: UseImageU
|
||||
} = useDropzone({
|
||||
accept: { 'image/png': ['.png'], 'image/jpeg': ['.jpg', '.jpeg', '.png'] },
|
||||
onDropAccepted,
|
||||
onDropRejected,
|
||||
disabled: isDisabled,
|
||||
noDrag: true,
|
||||
multiple: false,
|
||||
multiple: allowMultiple && (maxImageUploadCount === undefined || maxImageUploadCount > 1),
|
||||
maxFiles: maxImageUploadCount,
|
||||
});
|
||||
|
||||
return { getUploadButtonProps, getUploadInputProps, openUploader };
|
||||
|
||||
@@ -3,12 +3,12 @@ import { Combobox, ConfirmationAlertDialog, Flex, FormControl, Text } from '@inv
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import {
|
||||
changeBoardReset,
|
||||
isModalOpenChanged,
|
||||
selectChangeBoardModalSlice,
|
||||
} from 'features/changeBoardModal/store/slice';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
|
||||
@@ -25,10 +25,10 @@ const selectIsModalOpen = createSelector(
|
||||
);
|
||||
|
||||
const ChangeBoardModal = () => {
|
||||
useAssertSingleton('ChangeBoardModal');
|
||||
const dispatch = useAppDispatch();
|
||||
const [selectedBoard, setSelectedBoard] = useState<string | null>();
|
||||
const queryArgs = useAppSelector(selectListBoardsQueryArgs);
|
||||
const { data: boards, isFetching } = useListAllBoardsQuery(queryArgs);
|
||||
const { data: boards, isFetching } = useListAllBoardsQuery({ include_archived: true });
|
||||
const isModalOpen = useAppSelector(selectIsModalOpen);
|
||||
const imagesToChange = useAppSelector(selectImagesToChange);
|
||||
const [addImagesToBoard] = useAddImagesToBoardMutation();
|
||||
|
||||
@@ -80,7 +80,6 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addControlLayer}
|
||||
isDisabled={isFLUX}
|
||||
>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</Button>
|
||||
|
||||
@@ -2,14 +2,10 @@ import { Alert, AlertDescription, AlertIcon, AlertTitle, Button, Flex } from '@i
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useBoolean } from 'common/hooks/useBoolean';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import {
|
||||
selectCanvasRightPanelGalleryTab,
|
||||
selectCanvasRightPanelLayersTab,
|
||||
} from 'features/controlLayers/store/ephemeral';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useCurrentDestination } from 'features/queue/hooks/useCurrentDestination';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { activeTabCanvasRightPanelChanged, setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { AnimatePresence, motion } from 'framer-motion';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
@@ -17,10 +13,11 @@ import { Trans, useTranslation } from 'react-i18next';
|
||||
|
||||
const ActivateImageViewerButton = (props: PropsWithChildren) => {
|
||||
const imageViewer = useImageViewer();
|
||||
const dispatch = useAppDispatch();
|
||||
const onClick = useCallback(() => {
|
||||
imageViewer.open();
|
||||
selectCanvasRightPanelGalleryTab();
|
||||
}, [imageViewer]);
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [imageViewer, dispatch]);
|
||||
return (
|
||||
<Button onClick={onClick} size="sm" variant="link" color="base.50">
|
||||
{props.children}
|
||||
@@ -60,7 +57,7 @@ const ActivateCanvasButton = (props: PropsWithChildren) => {
|
||||
const imageViewer = useImageViewer();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(setActiveTab('canvas'));
|
||||
selectCanvasRightPanelLayersTab();
|
||||
dispatch(activeTabCanvasRightPanelChanged('layers'));
|
||||
imageViewer.close();
|
||||
}, [dispatch, imageViewer]);
|
||||
return (
|
||||
|
||||
@@ -56,7 +56,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
<MenuGroup title={t('controlLayers.layer_other')}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer} isDisabled={isFLUX}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
|
||||
@@ -1,31 +1,50 @@
|
||||
import { useDndContext } from '@dnd-kit/core';
|
||||
import { Box, Button, Spacer, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDropOverlay from 'common/components/IAIDropOverlay';
|
||||
import { CanvasLayersPanelContent } from 'features/controlLayers/components/CanvasLayersPanelContent';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import {
|
||||
$canvasRightPanelTabIndex,
|
||||
selectCanvasRightPanelGalleryTab,
|
||||
selectCanvasRightPanelLayersTab,
|
||||
} from 'features/controlLayers/store/ephemeral';
|
||||
import { selectEntityCountActive } from 'features/controlLayers/store/selectors';
|
||||
import GalleryPanelContent from 'features/gallery/components/GalleryPanelContent';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { selectActiveTabCanvasRightPanel } from 'features/ui/store/uiSelectors';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const CanvasRightPanel = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const tabIndex = useStore($canvasRightPanelTabIndex);
|
||||
const activeTab = useAppSelector(selectActiveTabCanvasRightPanel);
|
||||
const imageViewer = useImageViewer();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const tabIndex = useMemo(() => {
|
||||
if (activeTab === 'gallery') {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}, [activeTab]);
|
||||
|
||||
const onClickViewerToggleButton = useCallback(() => {
|
||||
if ($canvasRightPanelTabIndex.get() !== 1) {
|
||||
$canvasRightPanelTabIndex.set(1);
|
||||
if (activeTab !== 'gallery') {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}
|
||||
imageViewer.toggle();
|
||||
}, [imageViewer]);
|
||||
}, [imageViewer, activeTab, dispatch]);
|
||||
|
||||
const onChangeTab = useCallback(
|
||||
(index: number) => {
|
||||
if (index === 0) {
|
||||
dispatch(activeTabCanvasRightPanelChanged('layers'));
|
||||
} else {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'toggleViewer',
|
||||
category: 'viewer',
|
||||
@@ -34,7 +53,7 @@ export const CanvasRightPanel = memo(() => {
|
||||
});
|
||||
|
||||
return (
|
||||
<Tabs index={tabIndex} onChange={$canvasRightPanelTabIndex.set} w="full" h="full" display="flex" flexDir="column">
|
||||
<Tabs index={tabIndex} onChange={onChangeTab} w="full" h="full" display="flex" flexDir="column">
|
||||
<TabList alignItems="center">
|
||||
<PanelTabs />
|
||||
<Spacer />
|
||||
@@ -60,27 +79,33 @@ CanvasRightPanel.displayName = 'CanvasRightPanel';
|
||||
|
||||
const PanelTabs = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const activeTab = useAppSelector(selectActiveTabCanvasRightPanel);
|
||||
const activeEntityCount = useAppSelector(selectEntityCountActive);
|
||||
const tabTimeout = useRef<number | null>(null);
|
||||
const dndCtx = useDndContext();
|
||||
const dispatch = useAppDispatch();
|
||||
const [mouseOverTab, setMouseOverTab] = useState<'layers' | 'gallery' | null>(null);
|
||||
|
||||
const onOnMouseOverLayersTab = useCallback(() => {
|
||||
setMouseOverTab('layers');
|
||||
tabTimeout.current = window.setTimeout(() => {
|
||||
if (dndCtx.active) {
|
||||
selectCanvasRightPanelLayersTab();
|
||||
dispatch(activeTabCanvasRightPanelChanged('layers'));
|
||||
}
|
||||
}, 300);
|
||||
}, [dndCtx.active]);
|
||||
}, [dndCtx.active, dispatch]);
|
||||
|
||||
const onOnMouseOverGalleryTab = useCallback(() => {
|
||||
setMouseOverTab('gallery');
|
||||
tabTimeout.current = window.setTimeout(() => {
|
||||
if (dndCtx.active) {
|
||||
selectCanvasRightPanelGalleryTab();
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}
|
||||
}, 300);
|
||||
}, [dndCtx.active]);
|
||||
}, [dndCtx.active, dispatch]);
|
||||
|
||||
const onMouseOut = useCallback(() => {
|
||||
setMouseOverTab(null);
|
||||
if (tabTimeout.current) {
|
||||
clearTimeout(tabTimeout.current);
|
||||
}
|
||||
@@ -99,9 +124,17 @@ const PanelTabs = memo(() => {
|
||||
<Box as="span" w="full">
|
||||
{layersTabLabel}
|
||||
</Box>
|
||||
{dndCtx.active && activeTab !== 'layers' && (
|
||||
<IAIDropOverlay isOver={mouseOverTab === 'layers'} withBackdrop={false} />
|
||||
)}
|
||||
</Tab>
|
||||
<Tab position="relative" onMouseOver={onOnMouseOverGalleryTab} onMouseOut={onMouseOut}>
|
||||
{t('gallery.gallery')}
|
||||
<Tab position="relative" onMouseOver={onOnMouseOverGalleryTab} onMouseOut={onMouseOut} w={32}>
|
||||
<Box as="span" w="full">
|
||||
{t('gallery.gallery')}
|
||||
</Box>
|
||||
{dndCtx.active && activeTab !== 'gallery' && (
|
||||
<IAIDropOverlay isOver={mouseOverTab === 'gallery'} withBackdrop={false} />
|
||||
)}
|
||||
</Tab>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
controlLayerModelChanged,
|
||||
controlLayerWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@@ -42,6 +43,7 @@ export const ControlLayerControlAdapter = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const controlAdapter = useControlLayerControlAdapter(entityIdentifier);
|
||||
const filter = useEntityFilter(entityIdentifier);
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
@@ -117,7 +119,7 @@ export const ControlLayerControlAdapter = memo(() => {
|
||||
</Flex>
|
||||
<Weight weight={controlAdapter.weight} onChange={onChangeWeight} />
|
||||
<BeginEndStepPct beginEndStepPct={controlAdapter.beginEndStepPct} onChange={onChangeBeginEndStepPct} />
|
||||
{controlAdapter.type === 'controlnet' && (
|
||||
{controlAdapter.type === 'controlnet' && !isFLUX && (
|
||||
<ControlLayerControlAdapterControlMode
|
||||
controlMode={controlAdapter.controlMode}
|
||||
onChange={onChangeControlMode}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
@@ -14,18 +15,20 @@ import { memo } from 'react';
|
||||
export const ControlLayerMenuItems = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<CanvasEntityMenuItemsFilter />
|
||||
<ControlLayerMenuItemsConvertControlToRaster />
|
||||
<ControlLayerMenuItemsTransparencyEffect />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<CanvasEntityMenuItemsSave />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
@@ -6,12 +6,11 @@ import { memo } from 'react';
|
||||
|
||||
export const IPAdapterMenuItems = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
@@ -9,13 +10,15 @@ import { memo } from 'react';
|
||||
export const InpaintMaskMenuItems = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
import { Checkbox, ConfirmationAlertDialog, Flex, FormControl, FormLabel, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { buildUseBoolean } from 'common/hooks/useBoolean';
|
||||
import { newCanvasSessionRequested, newGallerySessionRequested } from 'features/controlLayers/store/actions';
|
||||
import {
|
||||
selectCanvasRightPanelGalleryTab,
|
||||
selectCanvasRightPanelLayersTab,
|
||||
} from 'features/controlLayers/store/ephemeral';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import {
|
||||
selectSystemShouldConfirmOnNewSession,
|
||||
shouldConfirmOnNewSessionToggled,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -26,7 +24,7 @@ export const useNewGallerySession = () => {
|
||||
const newGallerySessionImmediate = useCallback(() => {
|
||||
dispatch(newGallerySessionRequested());
|
||||
imageViewer.open();
|
||||
selectCanvasRightPanelGalleryTab();
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch, imageViewer]);
|
||||
|
||||
const newGallerySessionWithDialog = useCallback(() => {
|
||||
@@ -49,7 +47,7 @@ export const useNewCanvasSession = () => {
|
||||
const newCanvasSessionImmediate = useCallback(() => {
|
||||
dispatch(newCanvasSessionRequested());
|
||||
imageViewer.close();
|
||||
selectCanvasRightPanelLayersTab();
|
||||
dispatch(activeTabCanvasRightPanelChanged('layers'));
|
||||
}, [dispatch, imageViewer]);
|
||||
|
||||
const newCanvasSessionWithDialog = useCallback(() => {
|
||||
@@ -65,6 +63,7 @@ export const useNewCanvasSession = () => {
|
||||
};
|
||||
|
||||
export const NewGallerySessionDialog = memo(() => {
|
||||
useAssertSingleton('NewGallerySessionDialog');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
@@ -100,6 +99,7 @@ export const NewGallerySessionDialog = memo(() => {
|
||||
NewGallerySessionDialog.displayName = 'NewGallerySessionDialog';
|
||||
|
||||
export const NewCanvasSessionDialog = memo(() => {
|
||||
useAssertSingleton('NewCanvasSessionDialog');
|
||||
const { t } = useTranslation();
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { IconMenuItemGroup } from 'common/components/IconMenuItem';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCopyToClipboard } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCopyToClipboard';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
@@ -13,17 +14,19 @@ import { memo } from 'react';
|
||||
export const RasterLayerMenuItems = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<IconMenuItemGroup>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<CanvasEntityMenuItemsFilter />
|
||||
<RasterLayerMenuItemsConvertRasterToControl />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsCopyToClipboard />
|
||||
<CanvasEntityMenuItemsSave />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { Flex, MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsCropToBbox } from 'features/controlLayers/components/common/CanvasEntityMenuItemsCropToBbox';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
@@ -11,16 +11,18 @@ import { memo } from 'react';
|
||||
export const RegionalGuidanceMenuItems = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<Flex gap={2}>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete asIcon />
|
||||
</Flex>
|
||||
<MenuDivider />
|
||||
<RegionalGuidanceMenuItemsAddPromptsAndIPAdapter />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsTransform />
|
||||
<RegionalGuidanceMenuItemsAutoNegative />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsCropToBbox />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -2,6 +2,7 @@ import { Button, ButtonGroup, Flex, FormControl, FormLabel, Heading, Spacer, Swi
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { TransformFitToBboxButtons } from 'features/controlLayers/components/Transform/TransformFitToBboxButtons';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import {
|
||||
@@ -11,7 +12,7 @@ import {
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsCounterClockwiseBold, PiArrowsOutBold, PiCheckBold, PiXBold } from 'react-icons/pi';
|
||||
import { PiArrowsCounterClockwiseBold, PiCheckBold, PiXBold } from 'react-icons/pi';
|
||||
|
||||
const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) => {
|
||||
const { t } = useTranslation();
|
||||
@@ -24,6 +25,7 @@ const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) =>
|
||||
const onChangeIsolatedPreview = useCallback(() => {
|
||||
dispatch(settingsIsolatedTransformingPreviewToggled());
|
||||
}, [dispatch]);
|
||||
const silentTransform = useStore(adapter.transformer.$silentTransform);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'applyTransform',
|
||||
@@ -41,6 +43,10 @@ const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) =>
|
||||
dependencies: [adapter.transformer, isProcessing, isCanvasFocused],
|
||||
});
|
||||
|
||||
if (silentTransform) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex
|
||||
ref={ref}
|
||||
@@ -49,7 +55,7 @@ const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) =>
|
||||
p={4}
|
||||
flexDir="column"
|
||||
gap={4}
|
||||
w={420}
|
||||
minW={420}
|
||||
h="auto"
|
||||
shadow="dark-lg"
|
||||
transitionProperty="height"
|
||||
@@ -65,16 +71,10 @@ const TransformContent = memo(({ adapter }: { adapter: CanvasEntityAdapter }) =>
|
||||
<Switch size="sm" isChecked={isolatedTransformingPreview} onChange={onChangeIsolatedPreview} />
|
||||
</FormControl>
|
||||
</Flex>
|
||||
|
||||
<TransformFitToBboxButtons adapter={adapter} />
|
||||
|
||||
<ButtonGroup isAttached={false} size="sm" w="full">
|
||||
<Button
|
||||
leftIcon={<PiArrowsOutBold />}
|
||||
onClick={adapter.transformer.fitProxyRectToBbox}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.transform.fitToBbox')}
|
||||
variant="ghost"
|
||||
>
|
||||
{t('controlLayers.transform.fitToBbox')}
|
||||
</Button>
|
||||
<Spacer />
|
||||
<Button
|
||||
leftIcon={<PiArrowsCounterClockwiseBold />}
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Button, Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsOutBold } from 'react-icons/pi';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
import { z } from 'zod';
|
||||
|
||||
const zMode = z.enum(['fill', 'contain', 'cover']);
|
||||
type Mode = z.infer<typeof zMode>;
|
||||
|
||||
export const TransformFitToBboxButtons = memo(({ adapter }: { adapter: CanvasEntityAdapter }) => {
|
||||
const { t } = useTranslation();
|
||||
const [mode, setMode] = useState<Mode>('contain');
|
||||
const isProcessing = useStore(adapter.transformer.$isProcessing);
|
||||
const onClick = useCallback(() => {
|
||||
if (mode === 'contain') {
|
||||
adapter.transformer.fitToBboxContain();
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode === 'cover') {
|
||||
adapter.transformer.fitToBboxCover();
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode === 'fill') {
|
||||
adapter.transformer.fitToBboxFill();
|
||||
return;
|
||||
}
|
||||
|
||||
assert<Equals<typeof mode, never>>(false);
|
||||
}, [adapter.transformer, mode]);
|
||||
|
||||
const options = useMemo(() => {
|
||||
return [
|
||||
{ value: 'contain', label: t('controlLayers.transform.fitModeContain') },
|
||||
{ value: 'cover', label: t('controlLayers.transform.fitModeCover') },
|
||||
{ value: 'fill', label: t('controlLayers.transform.fitModeFill') },
|
||||
];
|
||||
}, [t]);
|
||||
|
||||
const value = useMemo(() => options.find((o) => o.value === mode) ?? null, [options, mode]);
|
||||
|
||||
const onChange = useCallback<ComboboxOnChange>((v) => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
|
||||
setMode(zMode.parse(v.value));
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<Flex gap={4} w="full">
|
||||
<FormControl maxW={64}>
|
||||
<FormLabel m={0}>{t('controlLayers.transform.fitMode')}</FormLabel>
|
||||
<Combobox options={options} value={value} onChange={onChange} isSearchable={false} isClearable={false} />
|
||||
</FormControl>
|
||||
<Button
|
||||
leftIcon={<PiArrowsOutBold />}
|
||||
size="sm"
|
||||
onClick={onClick}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.transform.fitToBbox')}
|
||||
variant="ghost"
|
||||
>
|
||||
{t('controlLayers.transform.fitToBbox')}
|
||||
</Button>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
TransformFitToBboxButtons.displayName = 'TransformFitToBboxButtons';
|
||||
@@ -1,6 +1,6 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { IconMenuItem } from 'common/components/IconMenuItem';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import {
|
||||
@@ -88,36 +88,36 @@ export const CanvasEntityMenuItemsArrange = memo(() => {
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem
|
||||
<IconMenuItem
|
||||
aria-label={t('controlLayers.moveToFront')}
|
||||
tooltip={t('controlLayers.moveToFront')}
|
||||
onClick={moveToFront}
|
||||
isDisabled={!validActions.canMoveToFront || !isInteractable}
|
||||
icon={<PiArrowLineUpBold />}
|
||||
>
|
||||
{t('controlLayers.moveToFront')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
/>
|
||||
<IconMenuItem
|
||||
aria-label={t('controlLayers.moveForward')}
|
||||
tooltip={t('controlLayers.moveForward')}
|
||||
onClick={moveForwardOne}
|
||||
isDisabled={!validActions.canMoveForwardOne || !isInteractable}
|
||||
icon={<PiArrowUpBold />}
|
||||
>
|
||||
{t('controlLayers.moveForward')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
/>
|
||||
<IconMenuItem
|
||||
aria-label={t('controlLayers.moveBackward')}
|
||||
tooltip={t('controlLayers.moveBackward')}
|
||||
onClick={moveBackwardOne}
|
||||
isDisabled={!validActions.canMoveBackwardOne || !isInteractable}
|
||||
icon={<PiArrowDownBold />}
|
||||
>
|
||||
{t('controlLayers.moveBackward')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
/>
|
||||
<IconMenuItem
|
||||
aria-label={t('controlLayers.moveToBack')}
|
||||
tooltip={t('controlLayers.moveToBack')}
|
||||
onClick={moveToBack}
|
||||
isDisabled={!validActions.canMoveToBack || !isInteractable}
|
||||
icon={<PiArrowLineDownBold />}
|
||||
>
|
||||
{t('controlLayers.moveToBack')}
|
||||
</MenuItem>
|
||||
/>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityMenuItemsArrange.displayName = 'CanvasEntityArrangeMenuItems';
|
||||
CanvasEntityMenuItemsArrange.displayName = 'CanvasEntityMenuItemsArrange';
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { IconMenuItem } from 'common/components/IconMenuItem';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { entityDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
@@ -7,7 +8,11 @@ import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasEntityMenuItemsDelete = memo(() => {
|
||||
type Props = {
|
||||
asIcon?: boolean;
|
||||
};
|
||||
|
||||
export const CanvasEntityMenuItemsDelete = memo(({ asIcon = false }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
@@ -17,6 +22,19 @@ export const CanvasEntityMenuItemsDelete = memo(() => {
|
||||
dispatch(entityDeleted({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
if (asIcon) {
|
||||
return (
|
||||
<IconMenuItem
|
||||
aria-label={t('common.delete')}
|
||||
tooltip={t('common.delete')}
|
||||
onClick={deleteEntity}
|
||||
icon={<PiTrashSimpleBold />}
|
||||
isDestructive
|
||||
isDisabled={!isInteractable}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<MenuItem onClick={deleteEntity} icon={<PiTrashSimpleBold />} isDestructive isDisabled={!isInteractable}>
|
||||
{t('common.delete')}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { IconMenuItem } from 'common/components/IconMenuItem';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useIsEntityInteractable } from 'features/controlLayers/hooks/useEntityIsInteractable';
|
||||
import { entityDuplicated } from 'features/controlLayers/store/canvasSlice';
|
||||
@@ -18,9 +18,13 @@ export const CanvasEntityMenuItemsDuplicate = memo(() => {
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={onClick} icon={<PiCopyFill />} isDisabled={!isInteractable}>
|
||||
{t('controlLayers.duplicate')}
|
||||
</MenuItem>
|
||||
<IconMenuItem
|
||||
aria-label={t('controlLayers.duplicate')}
|
||||
tooltip={t('controlLayers.duplicate')}
|
||||
onClick={onClick}
|
||||
icon={<PiCopyFill />}
|
||||
isDisabled={!isInteractable}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import { useEntityIdentifierContext } from 'features/controlLayers/contexts/Enti
|
||||
import { useEntityTransform } from 'features/controlLayers/hooks/useEntityTransform';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFrameCornersBold } from 'react-icons/pi';
|
||||
import { PiBoundingBoxBold, PiFrameCornersBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasEntityMenuItemsTransform = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
@@ -11,9 +11,14 @@ export const CanvasEntityMenuItemsTransform = memo(() => {
|
||||
const transform = useEntityTransform(entityIdentifier);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={transform.start} icon={<PiFrameCornersBold />} isDisabled={transform.isDisabled}>
|
||||
{t('controlLayers.transform.transform')}
|
||||
</MenuItem>
|
||||
<>
|
||||
<MenuItem onClick={transform.start} icon={<PiFrameCornersBold />} isDisabled={transform.isDisabled}>
|
||||
{t('controlLayers.transform.transform')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={transform.fitToBbox} icon={<PiBoundingBoxBold />} isDisabled={transform.isDisabled}>
|
||||
{t('controlLayers.transform.fitToBbox')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -6,13 +6,12 @@ import { useEntityAdapter } from 'features/controlLayers/contexts/EntityAdapterC
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { TRANSPARENCY_CHECKERBOARD_PATTERN_DARK_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern';
|
||||
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
|
||||
import { debounce } from 'lodash-es';
|
||||
import { memo, useEffect, useMemo, useRef } from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
|
||||
const ChakraCanvas = chakra.canvas;
|
||||
|
||||
const PADDING = 2;
|
||||
|
||||
const CONTAINER_WIDTH = 36; // this is size 12 in our theme - need it in px for the canvas
|
||||
const CONTAINER_WIDTH_PX = `${CONTAINER_WIDTH}px`;
|
||||
|
||||
@@ -35,48 +34,57 @@ export const CanvasEntityPreviewImage = memo(() => {
|
||||
);
|
||||
const maskColor = useSelector(selectMaskColor);
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
const cache = useStore(adapter.renderer.$canvasCache);
|
||||
useEffect(() => {
|
||||
if (!canvasRef.current) {
|
||||
return;
|
||||
}
|
||||
const ctx = canvasRef.current.getContext('2d');
|
||||
if (!ctx) {
|
||||
return;
|
||||
}
|
||||
const pixelRect = useStore(adapter.transformer.$pixelRect);
|
||||
const nodeRect = useStore(adapter.transformer.$nodeRect);
|
||||
const canvasCache = useStore(adapter.$canvasCache);
|
||||
|
||||
if (!cache) {
|
||||
// Draw an empty canvas
|
||||
ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
|
||||
return;
|
||||
}
|
||||
const updatePreview = useMemo(
|
||||
() =>
|
||||
debounce(() => {
|
||||
if (!canvasRef.current) {
|
||||
return;
|
||||
}
|
||||
const ctx = canvasRef.current.getContext('2d');
|
||||
if (!ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
const { rect, canvas } = cache;
|
||||
const pixelRect = adapter.transformer.$pixelRect.get();
|
||||
const nodeRect = adapter.transformer.$nodeRect.get();
|
||||
const canvasCache = adapter.$canvasCache.get();
|
||||
|
||||
ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
|
||||
if (!canvasCache || canvasCache.width === 0 || canvasCache.height === 0) {
|
||||
// Draw an empty canvas
|
||||
ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
|
||||
return;
|
||||
}
|
||||
|
||||
canvasRef.current.width = rect.width;
|
||||
canvasRef.current.height = rect.height;
|
||||
ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
|
||||
|
||||
const scale = CONTAINER_WIDTH / rect.width;
|
||||
canvasRef.current.width = pixelRect.width;
|
||||
canvasRef.current.height = pixelRect.height;
|
||||
|
||||
const sx = rect.x;
|
||||
const sy = rect.y;
|
||||
const sWidth = rect.width;
|
||||
const sHeight = rect.height;
|
||||
const dx = PADDING / scale;
|
||||
const dy = PADDING / scale;
|
||||
const dWidth = rect.width - (PADDING * 2) / scale;
|
||||
const dHeight = rect.height - (PADDING * 2) / scale;
|
||||
const sx = pixelRect.x - nodeRect.x;
|
||||
const sy = pixelRect.y - nodeRect.y;
|
||||
const sWidth = pixelRect.width;
|
||||
const sHeight = pixelRect.height;
|
||||
const dx = 0;
|
||||
const dy = 0;
|
||||
const dWidth = pixelRect.width;
|
||||
const dHeight = pixelRect.height;
|
||||
|
||||
ctx.drawImage(canvas, sx, sy, sWidth, sHeight, dx, dy, dWidth, dHeight);
|
||||
ctx.drawImage(canvasCache, sx, sy, sWidth, sHeight, dx, dy, dWidth, dHeight);
|
||||
|
||||
if (maskColor) {
|
||||
ctx.fillStyle = maskColor;
|
||||
ctx.globalCompositeOperation = 'source-in';
|
||||
ctx.fillRect(0, 0, rect.width, rect.height);
|
||||
}
|
||||
}, [cache, maskColor]);
|
||||
if (maskColor) {
|
||||
ctx.fillStyle = maskColor;
|
||||
ctx.globalCompositeOperation = 'source-in';
|
||||
ctx.fillRect(0, 0, pixelRect.width, pixelRect.height);
|
||||
}
|
||||
}, 300),
|
||||
[adapter.$canvasCache, adapter.transformer.$nodeRect, adapter.transformer.$pixelRect, maskColor]
|
||||
);
|
||||
|
||||
useEffect(updatePreview, [updatePreview, canvasCache, nodeRect, pixelRect]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
|
||||
@@ -2,8 +2,11 @@ import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { CanvasEntityAdapterBase } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterBase';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import {
|
||||
bboxChangedFromCanvas,
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
rasterLayerAdded,
|
||||
@@ -14,19 +17,32 @@ import {
|
||||
rgPositivePromptChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
selectBboxModelBase,
|
||||
selectBboxRect,
|
||||
selectCanvasSlice,
|
||||
selectEntityOrThrow,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRasterLayerState,
|
||||
CanvasRegionalGuidanceState,
|
||||
ControlNetConfig,
|
||||
IPAdapterConfig,
|
||||
T2IAdapterConfig,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { initialControlNet, initialIPAdapter, initialT2IAdapter } from 'features/controlLayers/store/util';
|
||||
import {
|
||||
imageDTOToImageObject,
|
||||
initialControlNet,
|
||||
initialIPAdapter,
|
||||
initialT2IAdapter,
|
||||
} from 'features/controlLayers/store/util';
|
||||
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import { useCallback } from 'react';
|
||||
import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/api/endpoints/models';
|
||||
import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import type { ControlNetModelConfig, ImageDTO, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { isControlNetOrT2IAdapterModelConfig, isIPAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
export const selectDefaultControlAdapter = createSelector(
|
||||
@@ -90,6 +106,74 @@ export const useAddRasterLayer = () => {
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useNewRasterLayerFromImage = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const bboxRect = useAppSelector(selectBboxRect);
|
||||
const func = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
const overrides: Partial<CanvasRasterLayerState> = {
|
||||
position: { x: bboxRect.x, y: bboxRect.y },
|
||||
objects: [imageObject],
|
||||
};
|
||||
dispatch(rasterLayerAdded({ overrides, isSelected: true }));
|
||||
},
|
||||
[bboxRect.x, bboxRect.y, dispatch]
|
||||
);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a function that adds a new canvas with the given image as the initial image, replicating the img2img flow:
|
||||
* - Reset the canvas
|
||||
* - Resize the bbox to the image's aspect ratio at the optimal size for the selected model
|
||||
* - Add the image as a raster layer
|
||||
* - Resizes the layer to fit the bbox using the 'fill' strategy
|
||||
*
|
||||
* This allows the user to immediately generate a new image from the given image without any additional steps.
|
||||
*/
|
||||
export const useNewCanvasFromImage = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const bboxRect = useAppSelector(selectBboxRect);
|
||||
const base = useAppSelector(selectBboxModelBase);
|
||||
const func = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
// Calculate the new bbox dimensions to fit the image's aspect ratio at the optimal size
|
||||
const ratio = imageDTO.width / imageDTO.height;
|
||||
const optimalDimension = getOptimalDimension(base);
|
||||
const { width, height } = calculateNewSize(ratio, optimalDimension ** 2, base);
|
||||
|
||||
// The overrides need to include the layer's ID so we can transform the layer it is initialized
|
||||
const overrides = {
|
||||
id: getPrefixedId('raster_layer'),
|
||||
position: { x: bboxRect.x, y: bboxRect.y },
|
||||
objects: [imageDTOToImageObject(imageDTO)],
|
||||
} satisfies Partial<CanvasRasterLayerState>;
|
||||
|
||||
CanvasEntityAdapterBase.registerInitCallback(async (adapter) => {
|
||||
// Skip the callback if the adapter is not the one we are creating
|
||||
if (adapter.id !== overrides.id) {
|
||||
return false;
|
||||
}
|
||||
// Fit the layer to the bbox w/ fill strategy
|
||||
await adapter.transformer.startTransform({ silent: true });
|
||||
adapter.transformer.fitToBboxFill();
|
||||
await adapter.transformer.applyTransform();
|
||||
return true;
|
||||
});
|
||||
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
dispatch(rasterLayerAdded({ overrides, isSelected: true }));
|
||||
},
|
||||
[base, bboxRect.x, bboxRect.y, dispatch]
|
||||
);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddInpaintMask = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const func = useCallback(() => {
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { entityDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { $canvasRightPanelTab } from 'features/controlLayers/store/ephemeral';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { selectActiveTab, selectActiveTabCanvasRightPanel } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
|
||||
export function useCanvasDeleteLayerHotkey() {
|
||||
@@ -15,7 +13,7 @@ export function useCanvasDeleteLayerHotkey() {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const canvasRightPanelTab = useStore($canvasRightPanelTab);
|
||||
const canvasRightPanelTab = useAppSelector(selectActiveTabCanvasRightPanel);
|
||||
const appTab = useAppSelector(selectActiveTab);
|
||||
|
||||
const imageViewer = useImageViewer();
|
||||
|
||||
@@ -36,7 +36,7 @@ export const useEntityTransform = (entityIdentifier: CanvasEntityIdentifier | nu
|
||||
return false;
|
||||
}, [entityIdentifier, adapter, isBusy, isInteractable, isEmpty]);
|
||||
|
||||
const start = useCallback(() => {
|
||||
const start = useCallback(async () => {
|
||||
if (isDisabled) {
|
||||
return;
|
||||
}
|
||||
@@ -50,8 +50,27 @@ export const useEntityTransform = (entityIdentifier: CanvasEntityIdentifier | nu
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
adapter.transformer.startTransform();
|
||||
await adapter.transformer.startTransform();
|
||||
}, [isDisabled, entityIdentifier, canvasManager]);
|
||||
|
||||
return { isDisabled, start } as const;
|
||||
const fitToBbox = useCallback(async () => {
|
||||
if (isDisabled) {
|
||||
return;
|
||||
}
|
||||
if (!entityIdentifier) {
|
||||
return;
|
||||
}
|
||||
if (!isTransformableEntityIdentifier(entityIdentifier)) {
|
||||
return;
|
||||
}
|
||||
const adapter = canvasManager.getAdapter(entityIdentifier);
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
await adapter.transformer.startTransform({ silent: true });
|
||||
adapter.transformer.fitToBboxContain();
|
||||
await adapter.transformer.applyTransform();
|
||||
}, [canvasManager, entityIdentifier, isDisabled]);
|
||||
|
||||
return { isDisabled, start, fitToBbox } as const;
|
||||
};
|
||||
|
||||
@@ -69,7 +69,7 @@ export class CanvasBackgroundModule extends CanvasModuleBase {
|
||||
layer: new Konva.Layer({ name: `${this.type}:layer`, listening: false, imageSmoothingEnabled: false }),
|
||||
linesGroup: new Konva.Group({ name: `${this.type}:linesGroup` }),
|
||||
lines: [],
|
||||
patternRect: new Konva.Rect({ name: `${this.type}:patternRect` }),
|
||||
patternRect: new Konva.Rect({ name: `${this.type}:patternRect`, perfectDrawEnabled: false }),
|
||||
};
|
||||
|
||||
this.konva.layer.add(this.konva.patternRect);
|
||||
@@ -174,6 +174,7 @@ export class CanvasBackgroundModule extends CanvasModuleBase {
|
||||
stroke: _x % 64 ? this.config.GRID_LINE_COLOR_FINE : this.config.GRID_LINE_COLOR_COARSE,
|
||||
strokeWidth,
|
||||
listening: false,
|
||||
perfectDrawEnabled: false,
|
||||
});
|
||||
this.konva.lines.push(line);
|
||||
this.konva.linesGroup.add(line);
|
||||
@@ -187,6 +188,7 @@ export class CanvasBackgroundModule extends CanvasModuleBase {
|
||||
stroke: _y % 64 ? this.config.GRID_LINE_COLOR_FINE : this.config.GRID_LINE_COLOR_COARSE,
|
||||
strokeWidth,
|
||||
listening: false,
|
||||
perfectDrawEnabled: false,
|
||||
});
|
||||
this.konva.lines.push(line);
|
||||
this.konva.linesGroup.add(line);
|
||||
|
||||
@@ -83,6 +83,7 @@ export class CanvasBboxModule extends CanvasModuleBase {
|
||||
stroke: 'rgb(42,42,42)',
|
||||
strokeWidth: 1,
|
||||
strokeScaleEnabled: false,
|
||||
perfectDrawEnabled: false,
|
||||
}),
|
||||
overlayGroup: new Konva.Group({
|
||||
name: `${this.type}:overlayGroup`,
|
||||
@@ -123,6 +124,7 @@ export class CanvasBboxModule extends CanvasModuleBase {
|
||||
strokeEnabled: false,
|
||||
draggable: false,
|
||||
fill: 'hsl(220 12% 10% / 0.8)',
|
||||
perfectDrawEnabled: false,
|
||||
}),
|
||||
transformer: new Konva.Transformer({
|
||||
name: `${this.type}:transformer`,
|
||||
|
||||
@@ -7,21 +7,35 @@ import type { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/ko
|
||||
import type { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
|
||||
import type { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer';
|
||||
import type { CanvasEntityTransformer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer';
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import { getKonvaNodeDebugAttrs, getRectIntersection } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
selectIsolatedFilteringPreview,
|
||||
selectIsolatedTransformingPreview,
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { buildEntityIsHiddenSelector, selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
buildSelectIsHidden,
|
||||
buildSelectIsSelected,
|
||||
selectBboxRect,
|
||||
selectCanvasSlice,
|
||||
selectEntity,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, CanvasRenderableEntityState, Rect } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import { atom, computed } from 'nanostores';
|
||||
import rafThrottle from 'raf-throttle';
|
||||
import type { Logger } from 'roarr';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import stableHash from 'stable-hash';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
|
||||
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`. We'll need to do a
|
||||
// type assertion below in the `onInit` method, which calls these callbacks.
|
||||
type InitCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
|
||||
|
||||
export abstract class CanvasEntityAdapterBase<
|
||||
T extends CanvasRenderableEntityState,
|
||||
U extends string,
|
||||
@@ -68,7 +82,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
*
|
||||
* If `prevState` is undefined, this is the first time the entity is being synced.
|
||||
*/
|
||||
abstract sync: (state: T | undefined, prevState: T | undefined) => void;
|
||||
abstract sync: (state: T | undefined, prevState: T | undefined) => Promise<void>;
|
||||
|
||||
/**
|
||||
* Gets the canvas element for the entity. If `rect` is provided, the canvas will be clipped to that rectangle.
|
||||
@@ -80,7 +94,79 @@ export abstract class CanvasEntityAdapterBase<
|
||||
*/
|
||||
abstract getHashableState: () => SerializableObject;
|
||||
|
||||
private _selectIsHidden: Selector<RootState, boolean> | null = null;
|
||||
/**
|
||||
* Callbacks that are executed when the module is initialized.
|
||||
*/
|
||||
private static initCallbacks = new Set<InitCallback>();
|
||||
|
||||
/**
|
||||
* Register a callback to be run when an entity adapter is initialized.
|
||||
*
|
||||
* The callback is called for every adapter that is initialized with the adapter as its only argument. Use an early
|
||||
* return to skip entities that are not of interest, returning `false` to keep the callback registered. Return `true`
|
||||
* to unregister the callback after it is called.
|
||||
*
|
||||
* @param callback The callback to register.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // A callback that is executed once for a specific entity:
|
||||
* const myId = 'my_id';
|
||||
* canvasManager.entityRenderer.registerOnInitCallback(async (adapter) => {
|
||||
* if (adapter.id !== myId) {
|
||||
* // These are not the droids you are looking for, move along
|
||||
* return false;
|
||||
* }
|
||||
*
|
||||
* doSomething();
|
||||
*
|
||||
* // Remove the callback
|
||||
* return true;
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // A callback that is executed once for the next entity that is initialized:
|
||||
* canvasManager.entityRenderer.registerOnInitCallback(async (adapter) => {
|
||||
* doSomething();
|
||||
*
|
||||
* // Remove the callback
|
||||
* return true;
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // A callback that is executed for every entity and is never removed:
|
||||
* canvasManager.entityRenderer.registerOnInitCallback(async (adapter) => {
|
||||
* // Do something with the adapter
|
||||
* return false;
|
||||
* });
|
||||
*/
|
||||
static registerInitCallback = (callback: InitCallback) => {
|
||||
const wrapped = async (adapter: CanvasEntityAdapter) => {
|
||||
const result = await callback(adapter);
|
||||
if (result) {
|
||||
this.initCallbacks.delete(wrapped);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
this.initCallbacks.add(wrapped);
|
||||
};
|
||||
|
||||
/**
|
||||
* Runs all init callbacks with the given entity adapter.
|
||||
* @param adapter The adapter of the entity that was initialized.
|
||||
*/
|
||||
private static runInitCallbacks = (adapter: CanvasEntityAdapter) => {
|
||||
for (const callback of this.initCallbacks) {
|
||||
callback(adapter);
|
||||
}
|
||||
};
|
||||
|
||||
selectIsHidden: Selector<RootState, boolean>;
|
||||
selectIsSelected: Selector<RootState, boolean>;
|
||||
|
||||
/**
|
||||
* The Konva nodes that make up the entity adapter:
|
||||
@@ -124,6 +210,18 @@ export abstract class CanvasEntityAdapterBase<
|
||||
$isInteractable = computed([this.$isLocked, this.$isDisabled, this.$isHidden], (isLocked, isDisabled, isHidden) => {
|
||||
return !isLocked && !isDisabled && !isHidden;
|
||||
});
|
||||
/**
|
||||
* A cache of the entity's canvas element. This is generated from a clone of the entity's Konva layer.
|
||||
*/
|
||||
$canvasCache = atom<HTMLCanvasElement | null>(null);
|
||||
/**
|
||||
* Whether this entity is onscreen. This is computed based on the entity's bounding box and the stage's viewport rect.
|
||||
*/
|
||||
$isOnScreen = atom(true);
|
||||
/**
|
||||
* Whether this entity's rect intersects the bbox rect.
|
||||
*/
|
||||
$intersectsBbox = atom(false);
|
||||
|
||||
constructor(entityIdentifier: CanvasEntityIdentifier<T['type']>, manager: CanvasManager, adapterType: U) {
|
||||
super();
|
||||
@@ -152,6 +250,9 @@ export abstract class CanvasEntityAdapterBase<
|
||||
assert(state !== undefined, 'Missing entity state on creation');
|
||||
this.state = state;
|
||||
|
||||
this.selectIsHidden = buildSelectIsHidden(this.entityIdentifier);
|
||||
this.selectIsSelected = buildSelectIsSelected(this.entityIdentifier);
|
||||
|
||||
/**
|
||||
* There are a number of reason we may need to show or hide a layer:
|
||||
* - The entity is enabled/disabled
|
||||
@@ -159,6 +260,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
* - Staging status changes and `isolatedStagingPreview` is enabled
|
||||
* - Global filtering status changes and `isolatedFilteringPreview` is enabled
|
||||
* - Global transforming status changes and `isolatedTransformingPreview` is enabled
|
||||
* - The entity is selected or deselected (only selected and onscreen entities are rendered)
|
||||
*/
|
||||
this.subscriptions.add(this.manager.stateApi.createStoreSubscription(this.selectIsHidden, this.syncVisibility));
|
||||
this.subscriptions.add(
|
||||
@@ -169,12 +271,28 @@ export abstract class CanvasEntityAdapterBase<
|
||||
this.manager.stateApi.createStoreSubscription(selectIsolatedTransformingPreview, this.syncVisibility)
|
||||
);
|
||||
this.subscriptions.add(this.manager.stateApi.$transformingAdapter.listen(this.syncVisibility));
|
||||
this.subscriptions.add(this.manager.stateApi.createStoreSubscription(this.selectIsSelected, this.syncVisibility));
|
||||
|
||||
/**
|
||||
* The tool preview may need to be updated when the entity is locked or disabled. For example, when we disable the
|
||||
* entity, we should hide the tool preview & change the cursor.
|
||||
*/
|
||||
this.subscriptions.add(this.$isInteractable.subscribe(this.manager.tool.render));
|
||||
|
||||
/**
|
||||
* When the stage is transformed in any way (panning, zooming, resizing) or the entity is moved, we need to update
|
||||
* the entity's onscreen status. We also need to subscribe to changes to the entity's pixel rect, but this is
|
||||
* handled in the initialize method.
|
||||
*/
|
||||
this.subscriptions.add(this.manager.stage.$stageAttrs.listen(this.syncIsOnscreen));
|
||||
this.subscriptions.add(this.manager.stateApi.createStoreSubscription(this.selectPosition, this.syncIsOnscreen));
|
||||
|
||||
/**
|
||||
* When the bbox rect changes or the entity is moved, we need to update the intersectsBbox status. We also need to
|
||||
* subscribe to changes to the entity's pixel rect, but this is handled in the initialize method.
|
||||
*/
|
||||
this.subscriptions.add(this.manager.stateApi.createStoreSubscription(selectBboxRect, this.syncIntersectsBbox));
|
||||
this.subscriptions.add(this.manager.stateApi.createStoreSubscription(this.selectPosition, this.syncIntersectsBbox));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -185,21 +303,79 @@ export abstract class CanvasEntityAdapterBase<
|
||||
(canvas) => selectEntity(canvas, this.entityIdentifier) as T | undefined
|
||||
);
|
||||
|
||||
// This must be a getter because the selector depends on the entityIdentifier, which is set in the constructor.
|
||||
get selectIsHidden() {
|
||||
if (!this._selectIsHidden) {
|
||||
this._selectIsHidden = buildEntityIsHiddenSelector(this.entityIdentifier);
|
||||
/**
|
||||
* A redux selector that selects the entity's position from the canvas slice.
|
||||
*/
|
||||
selectPosition = createSelector(this.selectState, (entity) => entity?.position);
|
||||
|
||||
syncIsOnscreen = () => {
|
||||
const stageRect = this.manager.stage.getScaledStageRect();
|
||||
const isOnScreen = this.checkIntersection(stageRect);
|
||||
const prevIsOnScreen = this.$isOnScreen.get();
|
||||
this.$isOnScreen.set(isOnScreen);
|
||||
if (prevIsOnScreen !== isOnScreen) {
|
||||
this.log.trace(`Moved ${isOnScreen ? 'on-screen' : 'off-screen'}`);
|
||||
}
|
||||
return this._selectIsHidden;
|
||||
}
|
||||
this.syncVisibility();
|
||||
};
|
||||
|
||||
syncIntersectsBbox = () => {
|
||||
const bboxRect = this.manager.stateApi.getBbox().rect;
|
||||
const intersectsBbox = this.checkIntersection(bboxRect);
|
||||
const prevIntersectsBbox = this.$intersectsBbox.get();
|
||||
this.$intersectsBbox.set(intersectsBbox);
|
||||
if (prevIntersectsBbox !== intersectsBbox) {
|
||||
this.log.trace(`Moved ${intersectsBbox ? 'into bbox' : 'out of bbox'}`);
|
||||
}
|
||||
};
|
||||
|
||||
checkIntersection = (rect: Rect): boolean => {
|
||||
const entityRect = this.transformer.$pixelRect.get();
|
||||
const position = this.manager.stateApi.runSelector(this.selectPosition);
|
||||
if (!position) {
|
||||
return false;
|
||||
}
|
||||
const entityRectRelativeToStage = {
|
||||
x: entityRect.x + position.x,
|
||||
y: entityRect.y + position.y,
|
||||
width: entityRect.width,
|
||||
height: entityRect.height,
|
||||
};
|
||||
const intersection = getRectIntersection(rect, entityRectRelativeToStage);
|
||||
const doesIntersect = intersection.width > 0 && intersection.height > 0;
|
||||
return doesIntersect;
|
||||
};
|
||||
|
||||
initialize = async () => {
|
||||
this.log.debug('Initializing module');
|
||||
|
||||
/**
|
||||
* When the pixel rect changes, we need to sync the onscreen state of the parent entity.
|
||||
*
|
||||
* TODO(psyche): It'd be nice to set this listener in the constructor, but the transformer is only created in the
|
||||
* concrete classes, so we have to do this here. IIRC the reason for this awkwardness was to satisfy a circular
|
||||
* dependency between the transformer and concrete adapter classes
|
||||
*/
|
||||
this.subscriptions.add(this.transformer.$pixelRect.listen(this.syncIsOnscreen));
|
||||
|
||||
/**
|
||||
* When the pixel rect changes, we need to sync the bbox intersection state of the parent entity.
|
||||
*
|
||||
* TODO(psyche): It'd be nice to set this listener in the constructor, but the transformer is only created in the
|
||||
* concrete classes, so we have to do this here. IIRC the reason for this awkwardness was to satisfy a circular
|
||||
* dependency between the transformer and concrete adapter classes
|
||||
*/
|
||||
this.subscriptions.add(this.transformer.$pixelRect.listen(this.syncIntersectsBbox));
|
||||
|
||||
await this.sync(this.manager.stateApi.runSelector(this.selectState), undefined);
|
||||
this.transformer.initialize();
|
||||
await this.renderer.initialize();
|
||||
this.syncZIndices();
|
||||
this.syncVisibility();
|
||||
|
||||
// Call the init callbacks.
|
||||
// TODO(psyche): Get rid of the cast - see note in type def for `InitCallback`.
|
||||
CanvasEntityAdapterBase.runInitCallbacks(this as CanvasEntityAdapter);
|
||||
};
|
||||
|
||||
syncZIndices = () => {
|
||||
@@ -221,7 +397,7 @@ export abstract class CanvasEntityAdapterBase<
|
||||
syncIsEnabled = () => {
|
||||
this.log.trace('Updating visibility');
|
||||
this.konva.layer.visible(this.state.isEnabled);
|
||||
this.renderer.syncCache(this.state.isEnabled);
|
||||
this.renderer.syncKonvaCache(this.state.isEnabled);
|
||||
this.transformer.syncInteractionState();
|
||||
this.$isDisabled.set(!this.state.isEnabled);
|
||||
};
|
||||
@@ -252,26 +428,57 @@ export abstract class CanvasEntityAdapterBase<
|
||||
this.renderer.updateOpacity();
|
||||
};
|
||||
|
||||
syncVisibility = () => {
|
||||
let isHidden = this.manager.stateApi.runSelector(this.selectIsHidden);
|
||||
syncVisibility = rafThrottle(() => {
|
||||
// Handle the base hidden state
|
||||
if (this.manager.stateApi.runSelector(this.selectIsHidden)) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle isolated preview modes - if another entity is filtering or transforming, we may need to hide this entity.
|
||||
if (this.manager.stateApi.runSelector(selectIsolatedFilteringPreview)) {
|
||||
const filteringEntityIdentifier = this.manager.stateApi.$filteringAdapter.get()?.entityIdentifier;
|
||||
if (filteringEntityIdentifier && filteringEntityIdentifier.id !== this.id) {
|
||||
isHidden = true;
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.manager.stateApi.runSelector(selectIsolatedTransformingPreview)) {
|
||||
const transformingEntityIdentifier = this.manager.stateApi.$transformingAdapter.get()?.entityIdentifier;
|
||||
if (transformingEntityIdentifier && transformingEntityIdentifier.id !== this.id) {
|
||||
isHidden = true;
|
||||
const transformingEntity = this.manager.stateApi.$transformingAdapter.get();
|
||||
if (
|
||||
transformingEntity &&
|
||||
transformingEntity.entityIdentifier.id !== this.id &&
|
||||
// Silent transforms should be transparent to the user, so we don't need to hide the entity.
|
||||
!transformingEntity.transformer.$silentTransform.get()
|
||||
) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.$isHidden.set(isHidden);
|
||||
this.konva.layer.visible(!isHidden);
|
||||
// If the entity is not selected and offscreen, we can hide it
|
||||
if (!this.$isOnScreen.get() && !this.manager.stateApi.getIsSelected(this.entityIdentifier.id)) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
|
||||
this.setVisibility(true);
|
||||
});
|
||||
|
||||
setVisibility = (isVisible: boolean) => {
|
||||
const isHidden = this.$isHidden.get();
|
||||
const isLayerVisible = this.konva.layer.visible();
|
||||
|
||||
if (isHidden === !isVisible && isLayerVisible === isVisible) {
|
||||
// No change
|
||||
return;
|
||||
}
|
||||
this.log.trace(isVisible ? 'Showing' : 'Hiding');
|
||||
this.$isHidden.set(!isVisible);
|
||||
this.konva.layer.visible(isVisible);
|
||||
|
||||
this.renderer.syncKonvaCache();
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -328,6 +535,15 @@ export abstract class CanvasEntityAdapterBase<
|
||||
renderer: this.renderer.repr(),
|
||||
bufferRenderer: this.bufferRenderer.repr(),
|
||||
filterer: this.filterer?.repr(),
|
||||
hasCache: this.$canvasCache.get() !== null,
|
||||
isLocked: this.$isLocked.get(),
|
||||
isDisabled: this.$isDisabled.get(),
|
||||
isHidden: this.$isHidden.get(),
|
||||
isEmpty: this.$isEmpty.get(),
|
||||
isInteractable: this.$isInteractable.get(),
|
||||
isOnScreen: this.$isOnScreen.get(),
|
||||
intersectsBbox: this.$intersectsBbox.get(),
|
||||
konva: getKonvaNodeDebugAttrs(this.konva.layer),
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ export class CanvasEntityAdapterControlLayer extends CanvasEntityAdapterBase<
|
||||
this.log.trace({ rect }, 'Getting canvas');
|
||||
// The opacity may have been changed in response to user selecting a different entity category, so we must restore
|
||||
// the original opacity before rendering the canvas
|
||||
const attrs: GroupConfig = { opacity: this.state.opacity };
|
||||
const attrs: GroupConfig = { opacity: this.state.opacity, filters: [] };
|
||||
const canvas = this.renderer.getCanvas({ rect, attrs });
|
||||
return canvas;
|
||||
};
|
||||
|
||||
@@ -39,6 +39,9 @@ export class CanvasEntityAdapterInpaintMask extends CanvasEntityAdapterBase<
|
||||
return;
|
||||
}
|
||||
|
||||
// If prevState is undefined, this is the first render. Some logic is only needed on the first render, or required
|
||||
// on first render.
|
||||
|
||||
if (!prevState || this.state.isEnabled !== prevState.isEnabled) {
|
||||
this.syncIsEnabled();
|
||||
}
|
||||
@@ -55,21 +58,16 @@ export class CanvasEntityAdapterInpaintMask extends CanvasEntityAdapterBase<
|
||||
this.syncOpacity();
|
||||
}
|
||||
if (!prevState || this.state.fill !== prevState.fill) {
|
||||
this.syncCompositingRectFill();
|
||||
// On first render, we must force the update
|
||||
this.renderer.updateCompositingRectFill(!prevState);
|
||||
}
|
||||
if (!prevState) {
|
||||
this.syncCompositingRectSize();
|
||||
// On first render, we must force the updates
|
||||
this.renderer.updateCompositingRectSize(true);
|
||||
this.renderer.updateCompositingRectPosition(true);
|
||||
}
|
||||
};
|
||||
|
||||
syncCompositingRectSize = () => {
|
||||
this.renderer.updateCompositingRectSize();
|
||||
};
|
||||
|
||||
syncCompositingRectFill = () => {
|
||||
this.renderer.updateCompositingRectFill();
|
||||
};
|
||||
|
||||
getHashableState = (): SerializableObject => {
|
||||
const keysToOmit: (keyof CanvasInpaintMaskState)[] = ['fill', 'name', 'opacity'];
|
||||
return omit(this.state, keysToOmit);
|
||||
@@ -78,7 +76,7 @@ export class CanvasEntityAdapterInpaintMask extends CanvasEntityAdapterBase<
|
||||
getCanvas = (rect?: Rect): HTMLCanvasElement => {
|
||||
// The opacity may have been changed in response to user selecting a different entity category, and the mask regions
|
||||
// should be fully opaque - set opacity to 1 before rendering the canvas
|
||||
const attrs: GroupConfig = { opacity: 1 };
|
||||
const attrs: GroupConfig = { opacity: 1, filters: [] };
|
||||
const canvas = this.renderer.getCanvas({ rect, attrs });
|
||||
return canvas;
|
||||
};
|
||||
|
||||
@@ -62,7 +62,7 @@ export class CanvasEntityAdapterRasterLayer extends CanvasEntityAdapterBase<
|
||||
this.log.trace({ rect }, 'Getting canvas');
|
||||
// The opacity may have been changed in response to user selecting a different entity category, so we must restore
|
||||
// the original opacity before rendering the canvas
|
||||
const attrs: GroupConfig = { opacity: this.state.opacity };
|
||||
const attrs: GroupConfig = { opacity: this.state.opacity, filters: [] };
|
||||
const canvas = this.renderer.getCanvas({ rect, attrs });
|
||||
return canvas;
|
||||
};
|
||||
|
||||
@@ -39,6 +39,9 @@ export class CanvasEntityAdapterRegionalGuidance extends CanvasEntityAdapterBase
|
||||
return;
|
||||
}
|
||||
|
||||
// If prevState is undefined, this is the first render. Some logic is only needed on the first render, or required
|
||||
// on first render.
|
||||
|
||||
if (!prevState || this.state.isEnabled !== prevState.isEnabled) {
|
||||
this.syncIsEnabled();
|
||||
}
|
||||
@@ -55,21 +58,16 @@ export class CanvasEntityAdapterRegionalGuidance extends CanvasEntityAdapterBase
|
||||
this.syncOpacity();
|
||||
}
|
||||
if (!prevState || this.state.fill !== prevState.fill) {
|
||||
this.syncCompositingRectFill();
|
||||
// On first render, we must force the update
|
||||
this.renderer.updateCompositingRectFill(!prevState);
|
||||
}
|
||||
if (!prevState) {
|
||||
this.syncCompositingRectSize();
|
||||
// On first render, we must force the updates
|
||||
this.renderer.updateCompositingRectSize(true);
|
||||
this.renderer.updateCompositingRectPosition(true);
|
||||
}
|
||||
};
|
||||
|
||||
syncCompositingRectSize = () => {
|
||||
this.renderer.updateCompositingRectSize();
|
||||
};
|
||||
|
||||
syncCompositingRectFill = () => {
|
||||
this.renderer.updateCompositingRectFill();
|
||||
};
|
||||
|
||||
getHashableState = (): SerializableObject => {
|
||||
const keysToOmit: (keyof CanvasRegionalGuidanceState)[] = ['fill', 'name', 'opacity'];
|
||||
return omit(this.state, keysToOmit);
|
||||
@@ -78,7 +76,7 @@ export class CanvasEntityAdapterRegionalGuidance extends CanvasEntityAdapterBase
|
||||
getCanvas = (rect?: Rect): HTMLCanvasElement => {
|
||||
// The opacity may have been changed in response to user selecting a different entity category, and the mask regions
|
||||
// should be fully opaque - set opacity to 1 before rendering the canvas
|
||||
const attrs: GroupConfig = { opacity: 1 };
|
||||
const attrs: GroupConfig = { opacity: 1, filters: [] };
|
||||
const canvas = this.renderer.getCanvas({ rect, attrs });
|
||||
return canvas;
|
||||
};
|
||||
|
||||
@@ -216,7 +216,7 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
|
||||
this.abortController = null;
|
||||
this.parent.bufferRenderer.clearBuffer();
|
||||
this.parent.transformer.updatePosition();
|
||||
this.parent.renderer.syncCache(true);
|
||||
this.parent.renderer.syncKonvaCache(true);
|
||||
this.imageState = null;
|
||||
this.$hasProcessed.set(false);
|
||||
};
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { rgbColorToString } from 'common/util/colorCodeTransformers';
|
||||
import { withResult } from 'common/util/result';
|
||||
import { SyncableMap } from 'common/util/SyncableMap/SyncableMap';
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
@@ -26,10 +25,8 @@ import type { Rect } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import Konva from 'konva';
|
||||
import type { GroupConfig } from 'konva/lib/Group';
|
||||
import { debounce } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { throttle } from 'lodash-es';
|
||||
import type { Logger } from 'roarr';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { getImageDTOSafe, uploadImage } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -96,17 +93,6 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
} | null;
|
||||
};
|
||||
|
||||
/**
|
||||
* The entity's object group as a canvas element along with the pixel rect of the entity at the time the canvas was
|
||||
* drawn.
|
||||
*
|
||||
* Technically, this is an internal Konva object, created when a Konva node's `.cache()` method is called. We cache
|
||||
* the object group after every update, so we get this as a "free" side effect.
|
||||
*
|
||||
* This is used to render the entity's preview in the control layer.
|
||||
*/
|
||||
$canvasCache = atom<{ canvas: HTMLCanvasElement; rect: Rect } | null>(null);
|
||||
|
||||
constructor(parent: CanvasEntityAdapter) {
|
||||
super();
|
||||
this.id = getPrefixedId(this.type);
|
||||
@@ -146,13 +132,22 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
// The compositing rect must cover the whole stage at all times. When the stage is scaled, moved or resized, we
|
||||
// need to update the compositing rect to match the stage.
|
||||
this.subscriptions.add(
|
||||
this.manager.stage.$stageAttrs.listen(() => {
|
||||
this.manager.stage.$stageAttrs.listen((stageAttrs, oldStageAttrs) => {
|
||||
if (!this.konva.compositing) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
this.konva.compositing &&
|
||||
(this.parent.type === 'inpaint_mask_adapter' || this.parent.type === 'regional_guidance_adapter')
|
||||
stageAttrs.width !== oldStageAttrs.width ||
|
||||
stageAttrs.height !== oldStageAttrs.height ||
|
||||
stageAttrs.scale !== oldStageAttrs.scale
|
||||
) {
|
||||
this.updateCompositingRectSize();
|
||||
}
|
||||
|
||||
if (stageAttrs.x !== oldStageAttrs.x || stageAttrs.y !== oldStageAttrs.y) {
|
||||
this.updateCompositingRectPosition();
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
@@ -184,7 +179,7 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
didRender = (await this.renderObject(obj)) || didRender;
|
||||
}
|
||||
|
||||
this.syncCache(didRender);
|
||||
this.syncKonvaCache(didRender);
|
||||
|
||||
return didRender;
|
||||
};
|
||||
@@ -194,16 +189,21 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
renderer.konva.group.moveTo(this.konva.objectGroup);
|
||||
};
|
||||
|
||||
syncCache = (force: boolean = false) => {
|
||||
syncKonvaCache = (force: boolean = false) => {
|
||||
if (this.renderers.size === 0) {
|
||||
this.log.trace('Clearing object group cache');
|
||||
this.konva.objectGroup.clearCache();
|
||||
this.$canvasCache.set(null);
|
||||
} else if (force || !this.konva.objectGroup.isCached()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// We should never cache the entity if it is not visible - it will cache as a transparent image.
|
||||
const isVisible = this.parent.konva.layer.visible();
|
||||
const isCached = this.konva.objectGroup.isCached();
|
||||
|
||||
if (isVisible && (force || !isCached)) {
|
||||
this.log.trace('Caching object group');
|
||||
this.konva.objectGroup.clearCache();
|
||||
this.konva.objectGroup.cache({ pixelRatio: 1, imageSmoothingEnabled: false });
|
||||
this.parent.renderer.updatePreviewCanvas();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -214,7 +214,11 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
}
|
||||
};
|
||||
|
||||
updateCompositingRectFill = () => {
|
||||
updateCompositingRectFill = throttle((force?: boolean) => {
|
||||
if (!force && !this.hasObjects()) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.log.trace('Updating compositing rect fill');
|
||||
|
||||
assert(this.konva.compositing, 'Missing compositing rect');
|
||||
@@ -233,9 +237,13 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
});
|
||||
setFillPatternImage(this.konva.compositing.rect, fill.style, fill.color);
|
||||
}
|
||||
};
|
||||
}, 100);
|
||||
|
||||
updateCompositingRectSize = (force?: boolean) => {
|
||||
if (!force && !this.hasObjects()) {
|
||||
return;
|
||||
}
|
||||
|
||||
updateCompositingRectSize = () => {
|
||||
this.log.trace('Updating compositing rect size');
|
||||
|
||||
assert(this.konva.compositing, 'Missing compositing rect');
|
||||
@@ -249,7 +257,21 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
});
|
||||
};
|
||||
|
||||
updateOpacity = () => {
|
||||
updateCompositingRectPosition = (force?: boolean) => {
|
||||
if (!force && !this.hasObjects()) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.log.trace('Updating compositing rect position');
|
||||
|
||||
assert(this.konva.compositing, 'Missing compositing rect');
|
||||
|
||||
this.konva.compositing.rect.setAttrs({
|
||||
...this.manager.stage.getScaledStageRect(),
|
||||
});
|
||||
};
|
||||
|
||||
updateOpacity = throttle(() => {
|
||||
this.log.trace('Updating opacity');
|
||||
|
||||
const opacity = this.parent.state.opacity;
|
||||
@@ -260,7 +282,7 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
this.konva.objectGroup.opacity(opacity);
|
||||
}
|
||||
this.parent.bufferRenderer.konva.group.opacity(opacity);
|
||||
};
|
||||
}, 100);
|
||||
|
||||
/**
|
||||
* Renders the given object. If the object renderer does not exist, it will be created and its Konva group added to the
|
||||
@@ -404,19 +426,25 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
replaceObjects?: boolean;
|
||||
attrs?: GroupConfig;
|
||||
bg?: string;
|
||||
ignoreCache?: boolean;
|
||||
}): Promise<ImageDTO> => {
|
||||
const rasterizingAdapter = this.manager.stateApi.$rasterizingAdapter.get();
|
||||
if (rasterizingAdapter) {
|
||||
assert(false, `Already rasterizing an entity: ${rasterizingAdapter.id}`);
|
||||
}
|
||||
|
||||
const { rect, replaceObjects, attrs, bg } = { replaceObjects: false, attrs: {}, ...options };
|
||||
const { rect, replaceObjects, attrs, bg, ignoreCache } = {
|
||||
replaceObjects: false,
|
||||
ignoreCache: false,
|
||||
attrs: {},
|
||||
...options,
|
||||
};
|
||||
let imageDTO: ImageDTO | null = null;
|
||||
const rasterizeArgs = { rect, attrs, bg };
|
||||
const hash = this.parent.hash(rasterizeArgs);
|
||||
const cachedImageName = this.manager.cache.imageNameCache.get(hash);
|
||||
|
||||
if (cachedImageName) {
|
||||
if (cachedImageName && !ignoreCache) {
|
||||
imageDTO = await getImageDTOSafe(cachedImageName);
|
||||
if (imageDTO) {
|
||||
this.log.trace({ rect, cachedImageName, imageDTO }, 'Using cached rasterized image');
|
||||
@@ -453,52 +481,15 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
return imageDTO;
|
||||
};
|
||||
|
||||
updatePreviewCanvas = debounce(() => {
|
||||
if (this.parent.transformer.$isPendingRectCalculation.get()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const pixelRect = this.parent.transformer.$pixelRect.get();
|
||||
if (pixelRect.width === 0 || pixelRect.height === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO(psyche): This is an internal Konva method, so it may break in the future. Can we make this API public?
|
||||
*
|
||||
* This method's API is unknown. It has been experimentally determined that it may throw, so we need to handle
|
||||
* errors.
|
||||
*/
|
||||
const getCacheCanvasResult = withResult(
|
||||
() => this.konva.objectGroup._getCachedSceneCanvas()._canvas as HTMLCanvasElement | undefined | null
|
||||
);
|
||||
if (getCacheCanvasResult.isErr()) {
|
||||
// We are using an internal Konva method, so we need to catch any errors that may occur.
|
||||
this.log.warn({ error: serializeError(getCacheCanvasResult.error) }, 'Failed to update preview canvas');
|
||||
return;
|
||||
}
|
||||
|
||||
const canvas = getCacheCanvasResult.value;
|
||||
|
||||
if (canvas) {
|
||||
const nodeRect = this.parent.transformer.$nodeRect.get();
|
||||
const rect = {
|
||||
x: pixelRect.x - nodeRect.x,
|
||||
y: pixelRect.y - nodeRect.y,
|
||||
width: pixelRect.width,
|
||||
height: pixelRect.height,
|
||||
};
|
||||
this.$canvasCache.set({ rect, canvas });
|
||||
}
|
||||
}, 300);
|
||||
|
||||
cloneObjectGroup = (arg: { attrs?: GroupConfig } = {}): Konva.Group => {
|
||||
const { attrs } = arg;
|
||||
const clone = this.konva.objectGroup.clone();
|
||||
if (attrs) {
|
||||
clone.setAttrs(attrs);
|
||||
}
|
||||
clone.cache({ pixelRatio: 1, imageSmoothingEnabled: false });
|
||||
if (clone.hasChildren()) {
|
||||
clone.cache({ pixelRatio: 1, imageSmoothingEnabled: false });
|
||||
}
|
||||
return clone;
|
||||
};
|
||||
|
||||
@@ -545,7 +536,6 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
|
||||
konva: {
|
||||
objectGroup: getKonvaNodeDebugAttrs(this.konva.objectGroup),
|
||||
},
|
||||
hasCache: this.$canvasCache.get() !== null,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { Mutex } from 'async-mutex';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import { roundToMultiple } from 'common/util/roundDownToMultiple';
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
@@ -13,7 +14,7 @@ import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/sel
|
||||
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import type { GroupConfig } from 'konva/lib/Group';
|
||||
import { debounce, get } from 'lodash-es';
|
||||
import { clamp, debounce, get } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
import { serializeError } from 'serialize-error';
|
||||
@@ -96,14 +97,21 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
config: CanvasEntityTransformerConfig = DEFAULT_CONFIG;
|
||||
|
||||
/**
|
||||
* The rect of the parent, _including_ transparent regions.
|
||||
* The rect of the parent, _including_ transparent regions, **relative to the parent's position**. To get the rect
|
||||
* relative to the _stage_, add the parent's position.
|
||||
*
|
||||
* It is calculated via Konva's getClientRect method, which is fast but includes transparent regions.
|
||||
*
|
||||
* This rect is relative _to the parent's position_, not the stage.
|
||||
*/
|
||||
$nodeRect = atom<Rect>(getEmptyRect());
|
||||
|
||||
/**
|
||||
* The rect of the parent, _excluding_ transparent regions.
|
||||
* The rect of the parent, _excluding_ transparent regions, **relative to the parent's position**. To get the rect
|
||||
* relative to the _stage_, add the parent's position.
|
||||
*
|
||||
* If the parent's nodes have no possibility of transparent regions, this will be calculated the same way as nodeRect.
|
||||
*
|
||||
* If the parent's nodes may have transparent regions, this will be calculated manually by rasterizing the parent and
|
||||
* checking the pixel data.
|
||||
*/
|
||||
@@ -147,6 +155,25 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
*/
|
||||
$isProcessing = atom(false);
|
||||
|
||||
/**
|
||||
* Whether the transformer is currently in silent mode. In silent mode, the transform operation should not show any
|
||||
* visual feedback.
|
||||
*
|
||||
* This is set every time a transform is started.
|
||||
*
|
||||
* This is used for transform operations like directly fitting the entity to the bbox, which should not show the
|
||||
* transform controls, Transform react component or have any other visual feedback. The transform should just happen
|
||||
* silently.
|
||||
*/
|
||||
$silentTransform = atom(false);
|
||||
|
||||
/**
|
||||
* A mutex to prevent concurrent operations.
|
||||
*
|
||||
* The mutex is locked during transformation and during rect calculations which are handled in a web worker.
|
||||
*/
|
||||
transformMutex = new Mutex();
|
||||
|
||||
konva: {
|
||||
transformer: Konva.Transformer;
|
||||
proxyRect: Konva.Rect;
|
||||
@@ -170,7 +197,7 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
name: `${this.type}:outline_rect`,
|
||||
stroke: this.config.OUTLINE_COLOR,
|
||||
perfectDrawEnabled: false,
|
||||
strokeHitEnabled: false,
|
||||
hitStrokeWidth: 0,
|
||||
}),
|
||||
transformer: new Konva.Transformer({
|
||||
name: `${this.type}:transformer`,
|
||||
@@ -376,9 +403,15 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits the proxy rect to the bounding box of the parent entity, then syncs the object group with the proxy rect.
|
||||
* Fits the entity to the bbox using the "fill" strategy.
|
||||
*/
|
||||
fitProxyRectToBbox = () => {
|
||||
fitToBboxFill = () => {
|
||||
if (!this.$isTransformEnabled.get()) {
|
||||
this.log.warn(
|
||||
'Cannot fit to bbox contain when transform is disabled. Did you forget to call `await adapter.transformer.startTransform()`?'
|
||||
);
|
||||
return;
|
||||
}
|
||||
const { rect } = this.manager.stateApi.getBbox();
|
||||
const scaleX = rect.width / this.konva.proxyRect.width();
|
||||
const scaleY = rect.height / this.konva.proxyRect.height();
|
||||
@@ -392,6 +425,74 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.syncObjectGroupWithProxyRect();
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits the entity to the bbox using the "contain" strategy.
|
||||
*/
|
||||
fitToBboxContain = () => {
|
||||
if (!this.$isTransformEnabled.get()) {
|
||||
this.log.warn(
|
||||
'Cannot fit to bbox contain when transform is disabled. Did you forget to call `await adapter.transformer.startTransform()`?'
|
||||
);
|
||||
return;
|
||||
}
|
||||
const { rect } = this.manager.stateApi.getBbox();
|
||||
const gridSize = this.manager.stateApi.getGridSize();
|
||||
const width = this.konva.proxyRect.width();
|
||||
const height = this.konva.proxyRect.height();
|
||||
const scaleX = rect.width / width;
|
||||
const scaleY = rect.height / height;
|
||||
|
||||
// "contain" means that the entity should be scaled to fit within the bbox, but it should not exceed the bbox.
|
||||
const scale = Math.min(scaleX, scaleY);
|
||||
|
||||
// Center the shape within the bounding box
|
||||
const offsetX = (rect.width - width * scale) / 2;
|
||||
const offsetY = (rect.height - height * scale) / 2;
|
||||
|
||||
this.konva.proxyRect.setAttrs({
|
||||
x: clamp(roundToMultiple(rect.x + offsetX, gridSize), rect.x, rect.x + rect.width),
|
||||
y: clamp(roundToMultiple(rect.y + offsetY, gridSize), rect.y, rect.y + rect.height),
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
rotation: 0,
|
||||
});
|
||||
this.syncObjectGroupWithProxyRect();
|
||||
};
|
||||
|
||||
/**
|
||||
* Fits the entity to the bbox using the "cover" strategy.
|
||||
*/
|
||||
fitToBboxCover = () => {
|
||||
if (!this.$isTransformEnabled.get()) {
|
||||
this.log.warn(
|
||||
'Cannot fit to bbox contain when transform is disabled. Did you forget to call `await adapter.transformer.startTransform()`?'
|
||||
);
|
||||
return;
|
||||
}
|
||||
const { rect } = this.manager.stateApi.getBbox();
|
||||
const gridSize = this.manager.stateApi.getGridSize();
|
||||
const width = this.konva.proxyRect.width();
|
||||
const height = this.konva.proxyRect.height();
|
||||
const scaleX = rect.width / width;
|
||||
const scaleY = rect.height / height;
|
||||
|
||||
// "cover" is the same as "contain", but we choose the larger scale to cover the shape
|
||||
const scale = Math.max(scaleX, scaleY);
|
||||
|
||||
// Center the shape within the bounding box
|
||||
const offsetX = (rect.width - width * scale) / 2;
|
||||
const offsetY = (rect.height - height * scale) / 2;
|
||||
|
||||
this.konva.proxyRect.setAttrs({
|
||||
x: roundToMultiple(rect.x + offsetX, gridSize),
|
||||
y: roundToMultiple(rect.y + offsetY, gridSize),
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
rotation: 0,
|
||||
});
|
||||
this.syncObjectGroupWithProxyRect();
|
||||
};
|
||||
|
||||
onDragMove = () => {
|
||||
// Snap the interaction rect to the grid
|
||||
const gridSize = this.manager.stateApi.getGridSize();
|
||||
@@ -558,13 +659,31 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
|
||||
/**
|
||||
* Starts the transformation of the entity.
|
||||
*
|
||||
* This method will asynchronously acquire a mutex to prevent concurrent operations. If you need to perform an
|
||||
* operation after the transformation is started, you should await this method.
|
||||
*
|
||||
* @param arg Options for starting the transformation
|
||||
* @param arg.silent Whether the transformation should be silent. If silent, the transform controls will not be shown,
|
||||
* so you _must_ call `applyTransform` or `stopTransform` to complete the transformation.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await adapter.transformer.startTransform({ silent: true });
|
||||
* adapter.transformer.fitToBboxContain();
|
||||
* await adapter.transformer.applyTransform();
|
||||
* ```
|
||||
*/
|
||||
startTransform = () => {
|
||||
startTransform = async (arg?: { silent: boolean }) => {
|
||||
const transformingAdapter = this.manager.stateApi.$transformingAdapter.get();
|
||||
if (transformingAdapter) {
|
||||
assert(false, `Already transforming an entity: ${transformingAdapter.id}`);
|
||||
}
|
||||
// This will be released when the transformation is stopped
|
||||
await this.transformMutex.acquire();
|
||||
this.log.debug('Starting transform');
|
||||
const { silent } = { silent: false, ...arg };
|
||||
this.$silentTransform.set(silent);
|
||||
this.$isTransforming.set(true);
|
||||
this.manager.stateApi.$transformingAdapter.set(this.parent);
|
||||
this.syncInteractionState();
|
||||
@@ -574,11 +693,23 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
* Applies the transformation of the entity.
|
||||
*/
|
||||
applyTransform = async () => {
|
||||
if (!this.$isTransforming.get()) {
|
||||
this.log.warn(
|
||||
'Cannot apply transform when not transforming. Did you forget to call `await adapter.transformer.startTransform()`?'
|
||||
);
|
||||
return;
|
||||
}
|
||||
this.log.debug('Applying transform');
|
||||
this.$isProcessing.set(true);
|
||||
this._setInteractionMode('off');
|
||||
const rect = this.getRelativeRect();
|
||||
const rasterizeResult = await withResultAsync(() =>
|
||||
this.parent.renderer.rasterize({ rect, replaceObjects: true, attrs: { opacity: 1, filters: [] } })
|
||||
this.parent.renderer.rasterize({
|
||||
rect,
|
||||
replaceObjects: true,
|
||||
ignoreCache: true,
|
||||
attrs: { opacity: 1, filters: [] },
|
||||
})
|
||||
);
|
||||
if (rasterizeResult.isErr()) {
|
||||
this.log.error({ error: serializeError(rasterizeResult.error) }, 'Failed to rasterize entity');
|
||||
@@ -608,6 +739,7 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.syncInteractionState();
|
||||
this.manager.stateApi.$transformingAdapter.set(null);
|
||||
this.$isProcessing.set(false);
|
||||
this.transformMutex.release();
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -706,21 +838,21 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.parent.renderer.konva.objectGroup.setAttrs(groupAttrs);
|
||||
this.parent.bufferRenderer.konva.group.setAttrs(groupAttrs);
|
||||
}
|
||||
|
||||
this.parent.renderer.updatePreviewCanvas();
|
||||
};
|
||||
|
||||
calculateRect = debounce(() => {
|
||||
this.log.debug('Calculating bbox');
|
||||
|
||||
this.$isPendingRectCalculation.set(true);
|
||||
const canvas = this.parent.getCanvas();
|
||||
|
||||
if (!this.parent.renderer.hasObjects()) {
|
||||
this.log.trace('No objects, resetting bbox');
|
||||
this.$nodeRect.set(getEmptyRect());
|
||||
this.$pixelRect.set(getEmptyRect());
|
||||
this.parent.$canvasCache.set(canvas);
|
||||
this.$isPendingRectCalculation.set(false);
|
||||
this.updateBbox();
|
||||
this.transformMutex.release();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -730,13 +862,14 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.$nodeRect.set({ ...rect });
|
||||
this.$pixelRect.set({ ...rect });
|
||||
this.log.trace({ nodeRect: this.$nodeRect.get(), pixelRect: this.$pixelRect.get() }, 'Got bbox from client rect');
|
||||
this.parent.$canvasCache.set(canvas);
|
||||
this.$isPendingRectCalculation.set(false);
|
||||
this.updateBbox();
|
||||
this.transformMutex.release();
|
||||
return;
|
||||
}
|
||||
|
||||
// We have eraser strokes - we must calculate the bbox using pixel data
|
||||
const canvas = this.parent.renderer.getCanvas({ attrs: { opacity: 1, filters: [] } });
|
||||
const imageData = canvasToImageData(canvas);
|
||||
this.manager.worker.requestBbox(
|
||||
{ buffer: imageData.data.buffer, width: imageData.width, height: imageData.height },
|
||||
@@ -758,13 +891,17 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
{ nodeRect: this.$nodeRect.get(), pixelRect: this.$pixelRect.get(), extents },
|
||||
`Got bbox from worker`
|
||||
);
|
||||
this.parent.$canvasCache.set(canvas);
|
||||
this.$isPendingRectCalculation.set(false);
|
||||
this.updateBbox();
|
||||
this.transformMutex.release();
|
||||
}
|
||||
);
|
||||
}, this.config.RECT_CALC_DEBOUNCE_MS);
|
||||
|
||||
requestRectCalculation = () => {
|
||||
requestRectCalculation = async () => {
|
||||
// This will be released when the rect calculation is complete
|
||||
await this.transformMutex.acquire();
|
||||
this.$isPendingRectCalculation.set(true);
|
||||
this.syncInteractionState();
|
||||
this.calculateRect();
|
||||
|
||||
@@ -47,6 +47,7 @@ export class CanvasObjectBrushLine extends CanvasModuleBase {
|
||||
lineCap: 'round',
|
||||
lineJoin: 'round',
|
||||
globalCompositeOperation: 'source-over',
|
||||
perfectDrawEnabled: false,
|
||||
}),
|
||||
};
|
||||
this.konva.group.add(this.konva.line);
|
||||
|
||||
@@ -48,6 +48,7 @@ export class CanvasObjectBrushLineWithPressure extends CanvasModuleBase {
|
||||
listening: false,
|
||||
shadowForStrokeEnabled: false,
|
||||
globalCompositeOperation: 'source-over',
|
||||
perfectDrawEnabled: false,
|
||||
}),
|
||||
};
|
||||
this.konva.group.add(this.konva.line);
|
||||
|
||||
@@ -46,6 +46,7 @@ export class CanvasObjectEraserLine extends CanvasModuleBase {
|
||||
lineCap: 'round',
|
||||
lineJoin: 'round',
|
||||
globalCompositeOperation: 'destination-out',
|
||||
perfectDrawEnabled: false,
|
||||
}),
|
||||
};
|
||||
this.konva.group.add(this.konva.line);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user