Compare commits

..

5 Commits

Author SHA1 Message Date
Mary Hipp Rogers
649596cec5 fix 1:1 ratio (#8127)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-06-25 19:39:56 -04:00
psychedelicious
45aa84c01a feat: add user_label to FieldIdentifier (#8126)
Co-authored-by: Mary Hipp Rogers <maryhipp@gmail.com>
2025-06-25 09:48:15 -04:00
Mary Hipp Rogers
064d5787c9 Flux Kontext UI support (#8111)
* add support for flux-kontext models in nodes

* flux kontext in canvas

* add aspect ratio support

* lint

* restore aspect ratio logic

* more linting

* typegen

* fix typegen

---------

Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-06-25 09:46:58 -04:00
psychedelicious
d81b23adff fix(nodes): ensure each invocation overrides _original_model_fields with own field data 2025-06-19 09:57:11 -04:00
psychedelicious
c72480fd1b fix: opencv dependency conflict (#8095)
* build: prevent `opencv-python` from being installed

Fixes this error: `AttributeError: module 'cv2.ximgproc' has no attribute 'thinning'`

`opencv-contrib-python` supersedes `opencv-python`, providing the same API + additional features. The two packages should not be installed at the same time to avoid conflicts and/or errors.

The `invisible-watermark` package requires `opencv-python`, but we require the contrib variant.

This change updates `pyproject.toml` to prevent `opencv-python` from ever being installed using a `uv` features called dependency overrides.

* feat(ui): data viewer supports disabling wrap

* feat(api): list _all_ pkgs in app deps endpoint

* chore(ui): typegen

* feat(ui): update about modal to display new full deps list

* chore: uv lock
2025-06-10 08:34:00 -04:00
575 changed files with 10563 additions and 15816 deletions

1
.gitignore vendored
View File

@@ -180,7 +180,6 @@ cython_debug/
# Scratch folder
.scratch/
.vscode/
.zed/
# source installer files
installer/*zip

View File

@@ -297,7 +297,7 @@ Migration logic is in [migrations.ts].
<!-- links -->
[pydantic]: https://github.com/pydantic/pydantic 'pydantic'
[zod]: https://github.com/colinhacks/zod 'zod/v4'
[zod]: https://github.com/colinhacks/zod 'zod'
[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types'
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions

View File

@@ -1,12 +1,21 @@
from fastapi import Body, HTTPException
from fastapi.routing import APIRouter
from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.images.images_common import AddImagesToBoardResult, RemoveImagesFromBoardResult
board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
class AddImagesToBoardResult(BaseModel):
board_id: str = Field(description="The id of the board the images were added to")
added_image_names: list[str] = Field(description="The image names that were added to the board")
class RemoveImagesFromBoardResult(BaseModel):
removed_image_names: list[str] = Field(description="The image names that were removed from their board")
@board_images_router.post(
"/",
operation_id="add_image_to_board",
@@ -14,26 +23,17 @@ board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
201: {"description": "The image was added to a board successfully"},
},
status_code=201,
response_model=AddImagesToBoardResult,
)
async def add_image_to_board(
board_id: str = Body(description="The id of the board to add to"),
image_name: str = Body(description="The name of the image to add"),
) -> AddImagesToBoardResult:
):
"""Creates a board_image"""
try:
added_images: set[str] = set()
affected_boards: set[str] = set()
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
ApiDependencies.invoker.services.board_images.add_image_to_board(board_id=board_id, image_name=image_name)
added_images.add(image_name)
affected_boards.add(board_id)
affected_boards.add(old_board_id)
return AddImagesToBoardResult(
added_images=list(added_images),
affected_boards=list(affected_boards),
result = ApiDependencies.invoker.services.board_images.add_image_to_board(
board_id=board_id, image_name=image_name
)
return result
except Exception:
raise HTTPException(status_code=500, detail="Failed to add image to board")
@@ -45,25 +45,14 @@ async def add_image_to_board(
201: {"description": "The image was removed from the board successfully"},
},
status_code=201,
response_model=RemoveImagesFromBoardResult,
)
async def remove_image_from_board(
image_name: str = Body(description="The name of the image to remove", embed=True),
) -> RemoveImagesFromBoardResult:
):
"""Removes an image from its board, if it had one"""
try:
removed_images: set[str] = set()
affected_boards: set[str] = set()
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
removed_images.add(image_name)
affected_boards.add("none")
affected_boards.add(old_board_id)
return RemoveImagesFromBoardResult(
removed_images=list(removed_images),
affected_boards=list(affected_boards),
)
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
return result
except Exception:
raise HTTPException(status_code=500, detail="Failed to remove image from board")
@@ -83,25 +72,16 @@ async def add_images_to_board(
) -> AddImagesToBoardResult:
"""Adds a list of images to a board"""
try:
added_images: set[str] = set()
affected_boards: set[str] = set()
added_image_names: list[str] = []
for image_name in image_names:
try:
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
ApiDependencies.invoker.services.board_images.add_image_to_board(
board_id=board_id,
image_name=image_name,
board_id=board_id, image_name=image_name
)
added_images.add(image_name)
affected_boards.add(board_id)
affected_boards.add(old_board_id)
added_image_names.append(image_name)
except Exception:
pass
return AddImagesToBoardResult(
added_images=list(added_images),
affected_boards=list(affected_boards),
)
return AddImagesToBoardResult(board_id=board_id, added_image_names=added_image_names)
except Exception:
raise HTTPException(status_code=500, detail="Failed to add images to board")
@@ -120,20 +100,13 @@ async def remove_images_from_board(
) -> RemoveImagesFromBoardResult:
"""Removes a list of images from their board, if they had one"""
try:
removed_images: set[str] = set()
affected_boards: set[str] = set()
removed_image_names: list[str] = []
for image_name in image_names:
try:
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
removed_images.add(image_name)
affected_boards.add("none")
affected_boards.add(old_board_id)
removed_image_names.append(image_name)
except Exception:
pass
return RemoveImagesFromBoardResult(
removed_images=list(removed_images),
affected_boards=list(affected_boards),
)
return RemoveImagesFromBoardResult(removed_image_names=removed_image_names)
except Exception:
raise HTTPException(status_code=500, detail="Failed to remove images from board")

View File

@@ -14,17 +14,10 @@ from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_i
from invokeai.app.invocations.fields import MetadataField
from invokeai.app.services.image_records.image_records_common import (
ImageCategory,
ImageNamesResult,
ImageRecordChanges,
ResourceOrigin,
)
from invokeai.app.services.images.images_common import (
DeleteImagesResult,
ImageDTO,
ImageUrlsDTO,
StarredImagesResult,
UnstarredImagesResult,
)
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
@@ -106,9 +99,7 @@ async def upload_image(
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
try:
# heuristic_resize_fast expects an RGB or RGBA image
pil_rgba = pil_image.convert("RGBA")
np_image = pil_to_np(pil_rgba)
np_image = pil_to_np(pil_image)
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
pil_image = np_to_pil(np_image)
except Exception:
@@ -160,30 +151,18 @@ async def create_image_upload_entry(
raise HTTPException(status_code=501, detail="Not implemented")
@images_router.delete("/i/{image_name}", operation_id="delete_image", response_model=DeleteImagesResult)
@images_router.delete("/i/{image_name}", operation_id="delete_image")
async def delete_image(
image_name: str = Path(description="The name of the image to delete"),
) -> DeleteImagesResult:
) -> None:
"""Deletes an image"""
deleted_images: set[str] = set()
affected_boards: set[str] = set()
try:
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
board_id = image_dto.board_id or "none"
ApiDependencies.invoker.services.images.delete(image_name)
deleted_images.add(image_name)
affected_boards.add(board_id)
except Exception:
# TODO: Does this need any exception handling at all?
pass
return DeleteImagesResult(
deleted_images=list(deleted_images),
affected_boards=list(affected_boards),
)
@images_router.delete("/intermediates", operation_id="clear_intermediates")
async def clear_intermediates() -> int:
@@ -395,32 +374,31 @@ async def list_image_dtos(
return image_dtos
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesResult)
class DeleteImagesFromListResult(BaseModel):
deleted_images: list[str]
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesFromListResult)
async def delete_images_from_list(
image_names: list[str] = Body(description="The list of names of images to delete", embed=True),
) -> DeleteImagesResult:
) -> DeleteImagesFromListResult:
try:
deleted_images: set[str] = set()
affected_boards: set[str] = set()
deleted_images: list[str] = []
for image_name in image_names:
try:
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
board_id = image_dto.board_id or "none"
ApiDependencies.invoker.services.images.delete(image_name)
deleted_images.add(image_name)
affected_boards.add(board_id)
deleted_images.append(image_name)
except Exception:
pass
return DeleteImagesResult(
deleted_images=list(deleted_images),
affected_boards=list(affected_boards),
)
return DeleteImagesFromListResult(deleted_images=deleted_images)
except Exception:
raise HTTPException(status_code=500, detail="Failed to delete images")
@images_router.delete("/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesResult)
async def delete_uncategorized_images() -> DeleteImagesResult:
@images_router.delete(
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
)
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
"""Deletes all images that are uncategorized"""
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
@@ -428,19 +406,14 @@ async def delete_uncategorized_images() -> DeleteImagesResult:
)
try:
deleted_images: set[str] = set()
affected_boards: set[str] = set()
deleted_images: list[str] = []
for image_name in image_names:
try:
ApiDependencies.invoker.services.images.delete(image_name)
deleted_images.add(image_name)
affected_boards.add("none")
deleted_images.append(image_name)
except Exception:
pass
return DeleteImagesResult(
deleted_images=list(deleted_images),
affected_boards=list(affected_boards),
)
return DeleteImagesFromListResult(deleted_images=deleted_images)
except Exception:
raise HTTPException(status_code=500, detail="Failed to delete images")
@@ -449,50 +422,36 @@ class ImagesUpdatedFromListResult(BaseModel):
updated_image_names: list[str] = Field(description="The image names that were updated")
@images_router.post("/star", operation_id="star_images_in_list", response_model=StarredImagesResult)
@images_router.post("/star", operation_id="star_images_in_list", response_model=ImagesUpdatedFromListResult)
async def star_images_in_list(
image_names: list[str] = Body(description="The list of names of images to star", embed=True),
) -> StarredImagesResult:
) -> ImagesUpdatedFromListResult:
try:
starred_images: set[str] = set()
affected_boards: set[str] = set()
updated_image_names: list[str] = []
for image_name in image_names:
try:
updated_image_dto = ApiDependencies.invoker.services.images.update(
image_name, changes=ImageRecordChanges(starred=True)
)
starred_images.add(image_name)
affected_boards.add(updated_image_dto.board_id or "none")
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=True))
updated_image_names.append(image_name)
except Exception:
pass
return StarredImagesResult(
starred_images=list(starred_images),
affected_boards=list(affected_boards),
)
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
except Exception:
raise HTTPException(status_code=500, detail="Failed to star images")
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=UnstarredImagesResult)
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=ImagesUpdatedFromListResult)
async def unstar_images_in_list(
image_names: list[str] = Body(description="The list of names of images to unstar", embed=True),
) -> UnstarredImagesResult:
) -> ImagesUpdatedFromListResult:
try:
unstarred_images: set[str] = set()
affected_boards: set[str] = set()
updated_image_names: list[str] = []
for image_name in image_names:
try:
updated_image_dto = ApiDependencies.invoker.services.images.update(
image_name, changes=ImageRecordChanges(starred=False)
)
unstarred_images.add(image_name)
affected_boards.add(updated_image_dto.board_id or "none")
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=False))
updated_image_names.append(image_name)
except Exception:
pass
return UnstarredImagesResult(
unstarred_images=list(unstarred_images),
affected_boards=list(affected_boards),
)
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
except Exception:
raise HTTPException(status_code=500, detail="Failed to unstar images")
@@ -563,61 +522,3 @@ async def get_bulk_download_item(
return response
except Exception:
raise HTTPException(status_code=404)
@images_router.get("/names", operation_id="get_image_names")
async def get_image_names(
image_origin: Optional[ResourceOrigin] = Query(default=None, description="The origin of images to list."),
categories: Optional[list[ImageCategory]] = Query(default=None, description="The categories of image to include."),
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate images."),
board_id: Optional[str] = Query(
default=None,
description="The board id to filter by. Use 'none' to find images without a board.",
),
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
starred_first: bool = Query(default=True, description="Whether to sort by starred images first"),
search_term: Optional[str] = Query(default=None, description="The term to search for"),
) -> ImageNamesResult:
"""Gets ordered list of image names with metadata for optimistic updates"""
try:
result = ApiDependencies.invoker.services.images.get_image_names(
starred_first=starred_first,
order_dir=order_dir,
image_origin=image_origin,
categories=categories,
is_intermediate=is_intermediate,
board_id=board_id,
search_term=search_term,
)
return result
except Exception:
raise HTTPException(status_code=500, detail="Failed to get image names")
@images_router.post(
"/images_by_names",
operation_id="get_images_by_names",
responses={200: {"model": list[ImageDTO]}},
)
async def get_images_by_names(
image_names: list[str] = Body(embed=True, description="Object containing list of image names to fetch DTOs for"),
) -> list[ImageDTO]:
"""Gets image DTOs for the specified image names. Maintains order of input names."""
try:
image_service = ApiDependencies.invoker.services.images
# Fetch DTOs preserving the order of requested names
image_dtos: list[ImageDTO] = []
for name in image_names:
try:
dto = image_service.get_dto(name)
image_dtos.append(dto)
except Exception:
# Skip missing images - they may have been deleted between name fetch and DTO fetch
continue
return image_dtos
except Exception:
raise HTTPException(status_code=500, detail="Failed to get image DTOs")

View File

@@ -14,14 +14,13 @@ from invokeai.app.services.session_queue.session_queue_common import (
CancelByBatchIDsResult,
CancelByDestinationResult,
ClearResult,
DeleteAllExceptCurrentResult,
DeleteByDestinationResult,
EnqueueBatchResult,
FieldIdentifier,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
SessionQueueStatus,
)
from invokeai.app.services.shared.pagination import CursorPaginatedResults
@@ -69,7 +68,7 @@ async def enqueue_batch(
"/{queue_id}/list",
operation_id="list_queue_items",
responses={
200: {"model": CursorPaginatedResults[SessionQueueItem]},
200: {"model": CursorPaginatedResults[SessionQueueItemDTO]},
},
)
async def list_queue_items(
@@ -78,36 +77,11 @@ async def list_queue_items(
status: Optional[QUEUE_ITEM_STATUS] = Query(default=None, description="The status of items to fetch"),
cursor: Optional[int] = Query(default=None, description="The pagination cursor"),
priority: int = Query(default=0, description="The pagination cursor priority"),
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
) -> CursorPaginatedResults[SessionQueueItem]:
"""Gets cursor-paginated queue items"""
) -> CursorPaginatedResults[SessionQueueItemDTO]:
"""Gets all queue items (without graphs)"""
return ApiDependencies.invoker.services.session_queue.list_queue_items(
queue_id=queue_id,
limit=limit,
status=status,
cursor=cursor,
priority=priority,
destination=destination,
)
@session_queue_router.get(
"/{queue_id}/list_all",
operation_id="list_all_queue_items",
responses={
200: {"model": list[SessionQueueItem]},
},
)
async def list_all_queue_items(
queue_id: str = Path(description="The queue id to perform this operation on"),
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
) -> list[SessionQueueItem]:
"""Gets all queue items"""
return ApiDependencies.invoker.services.session_queue.list_all_queue_items(
queue_id=queue_id,
destination=destination,
queue_id=queue_id, limit=limit, status=status, cursor=cursor, priority=priority
)
@@ -147,18 +121,6 @@ async def cancel_all_except_current(
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
@session_queue_router.put(
"/{queue_id}/delete_all_except_current",
operation_id="delete_all_except_current",
responses={200: {"model": DeleteAllExceptCurrentResult}},
)
async def delete_all_except_current(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> DeleteAllExceptCurrentResult:
"""Immediately deletes all queue items except in-processing items"""
return ApiDependencies.invoker.services.session_queue.delete_all_except_current(queue_id=queue_id)
@session_queue_router.put(
"/{queue_id}/cancel_by_batch_ids",
operation_id="cancel_by_batch_ids",
@@ -307,18 +269,6 @@ async def get_queue_item(
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
@session_queue_router.delete(
"/{queue_id}/i/{item_id}",
operation_id="delete_queue_item",
)
async def delete_queue_item(
queue_id: str = Path(description="The queue id to perform this operation on"),
item_id: int = Path(description="The queue item to delete"),
) -> None:
"""Deletes a queue item"""
ApiDependencies.invoker.services.session_queue.delete_queue_item(item_id)
@session_queue_router.put(
"/{queue_id}/i/{item_id}/cancel",
operation_id="cancel_queue_item",
@@ -348,18 +298,3 @@ async def counts_by_destination(
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
queue_id=queue_id, destination=destination
)
@session_queue_router.delete(
"/{queue_id}/d/{destination}",
operation_id="delete_by_destination",
responses={200: {"model": DeleteByDestinationResult}},
)
async def delete_by_destination(
queue_id: str = Path(description="The queue id to query"),
destination: str = Path(description="The destination to query"),
) -> DeleteByDestinationResult:
"""Deletes all items with the given destination"""
return ApiDependencies.invoker.services.session_queue.delete_by_destination(
queue_id=queue_id, destination=destination
)

View File

@@ -158,7 +158,7 @@ web_root_path = Path(list(web_dir.__path__)[0])
try:
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
except RuntimeError:
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
app.mount(
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
) # docs favicon is in here

View File

@@ -499,7 +499,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
ui_type = field.json_schema_extra.get("ui_type", None)
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
field.json_schema_extra.pop("ui_type")
return None
@@ -615,7 +615,7 @@ def invocation(
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
uiconfig["version"] = version
else:
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
uiconfig["version"] = "1.0.0"
cls.UIConfig = UIConfigBase(**uiconfig)

View File

@@ -114,13 +114,6 @@ class CompelInvocation(BaseInvocation):
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
del compel
del patched_tokenizer
del tokenizer
del ti_manager
del text_encoder
del text_encoder_info
c = c.detach().to("cpu")
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
@@ -229,10 +222,7 @@ class SDXLPromptInvocationBase:
else:
c_pooled = None
del compel
del patched_tokenizer
del tokenizer
del ti_manager
del text_encoder
del text_encoder_info

View File

@@ -438,7 +438,7 @@ class WithWorkflow:
workflow = None
def __init_subclass__(cls) -> None:
logger.warning(
logger.warn(
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
)
super().__init_subclass__()
@@ -579,7 +579,7 @@ def InputField(
if default_factory is not _Unset and default_factory is not None:
default = default_factory()
logger.warning('"default_factory" is not supported, calling it now to set "default"')
logger.warn('"default_factory" is not supported, calling it now to set "default"')
# These are the args we may wish pass to the pydantic `Field()` function
field_args = {

View File

@@ -24,6 +24,7 @@ from invokeai.frontend.cli.arg_parser import InvokeAIArgs
INIT_FILE = Path("invokeai.yaml")
DB_FILE = Path("invokeai.db")
LEGACY_INIT_FILE = Path("invokeai.init")
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
@@ -92,7 +93,7 @@ class InvokeAIAppConfig(BaseSettings):
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
@@ -175,7 +176,7 @@ class InvokeAIAppConfig(BaseSettings):
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
# DEVICE
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
# GENERATION

View File

@@ -5,7 +5,6 @@ from typing import Optional
from invokeai.app.invocations.fields import MetadataField
from invokeai.app.services.image_records.image_records_common import (
ImageCategory,
ImageNamesResult,
ImageRecord,
ImageRecordChanges,
ResourceOrigin,
@@ -98,17 +97,3 @@ class ImageRecordStorageBase(ABC):
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
"""Gets the most recent image for a board."""
pass
@abstractmethod
def get_image_names(
self,
starred_first: bool = True,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
image_origin: Optional[ResourceOrigin] = None,
categories: Optional[list[ImageCategory]] = None,
is_intermediate: Optional[bool] = None,
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> ImageNamesResult:
"""Gets ordered list of image names with metadata for optimistic updates."""
pass

View File

@@ -3,7 +3,7 @@ import datetime
from enum import Enum
from typing import Optional, Union
from pydantic import BaseModel, Field, StrictBool, StrictStr
from pydantic import Field, StrictBool, StrictStr
from invokeai.app.util.metaenum import MetaEnum
from invokeai.app.util.misc import get_iso_timestamp
@@ -207,16 +207,3 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
starred=starred,
has_workflow=has_workflow,
)
class ImageCollectionCounts(BaseModel):
starred_count: int = Field(description="The number of starred images in the collection.")
unstarred_count: int = Field(description="The number of unstarred images in the collection.")
class ImageNamesResult(BaseModel):
"""Response containing ordered image names with metadata for optimistic updates."""
image_names: list[str] = Field(description="Ordered list of image names")
starred_count: int = Field(description="Number of starred images (when starred_first=True)")
total_count: int = Field(description="Total number of images matching the query")

View File

@@ -7,7 +7,6 @@ from invokeai.app.services.image_records.image_records_base import ImageRecordSt
from invokeai.app.services.image_records.image_records_common import (
IMAGE_DTO_COLS,
ImageCategory,
ImageNamesResult,
ImageRecord,
ImageRecordChanges,
ImageRecordDeleteException,
@@ -197,13 +196,9 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
# Search term condition
if search_term:
query_conditions += """--sql
AND (
images.metadata LIKE ?
OR images.created_at LIKE ?
)
AND images.metadata LIKE ?
"""
query_params.append(f"%{search_term.lower()}%")
query_params.append(f"%{search_term.lower()}%")
if starred_first:
query_pagination = f"""--sql
@@ -387,96 +382,3 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
return None
return deserialize_image_record(dict(result))
def get_image_names(
self,
starred_first: bool = True,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
image_origin: Optional[ResourceOrigin] = None,
categories: Optional[list[ImageCategory]] = None,
is_intermediate: Optional[bool] = None,
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> ImageNamesResult:
cursor = self._conn.cursor()
# Build query conditions (reused for both starred count and image names queries)
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
if categories is not None:
category_strings = [c.value for c in set(categories)]
placeholders = ",".join("?" * len(category_strings))
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
for c in category_strings:
query_params.append(c)
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
"""
query_params.append(is_intermediate)
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
if search_term:
query_conditions += """--sql
AND (
images.metadata LIKE ?
OR images.created_at LIKE ?
)
"""
query_params.append(f"%{search_term.lower()}%")
query_params.append(f"%{search_term.lower()}%")
# Get starred count if starred_first is enabled
starred_count = 0
if starred_first:
starred_count_query = f"""--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE images.starred = TRUE AND (1=1{query_conditions})
"""
cursor.execute(starred_count_query, query_params)
starred_count = cast(int, cursor.fetchone()[0])
# Get all image names with proper ordering
if starred_first:
names_query = f"""--sql
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1{query_conditions}
ORDER BY images.starred DESC, images.created_at {order_dir.value}
"""
else:
names_query = f"""--sql
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1{query_conditions}
ORDER BY images.created_at {order_dir.value}
"""
cursor.execute(names_query, query_params)
result = cast(list[sqlite3.Row], cursor.fetchall())
image_names = [row[0] for row in result]
return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names))

View File

@@ -6,7 +6,6 @@ from PIL.Image import Image as PILImageType
from invokeai.app.invocations.fields import MetadataField
from invokeai.app.services.image_records.image_records_common import (
ImageCategory,
ImageNamesResult,
ImageRecord,
ImageRecordChanges,
ResourceOrigin,
@@ -126,7 +125,7 @@ class ImageServiceABC(ABC):
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> OffsetPaginatedResults[ImageDTO]:
"""Gets a paginated list of image DTOs with starred images first when starred_first=True."""
"""Gets a paginated list of image DTOs."""
pass
@abstractmethod
@@ -148,17 +147,3 @@ class ImageServiceABC(ABC):
def delete_images_on_board(self, board_id: str):
"""Deletes all images on a board."""
pass
@abstractmethod
def get_image_names(
self,
starred_first: bool = True,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
image_origin: Optional[ResourceOrigin] = None,
categories: Optional[list[ImageCategory]] = None,
is_intermediate: Optional[bool] = None,
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> ImageNamesResult:
"""Gets ordered list of image names with metadata for optimistic updates."""
pass

View File

@@ -1,6 +1,6 @@
from typing import Optional
from pydantic import BaseModel, Field
from pydantic import Field
from invokeai.app.services.image_records.image_records_common import ImageRecord
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
@@ -39,27 +39,3 @@ def image_record_to_dto(
thumbnail_url=thumbnail_url,
board_id=board_id,
)
class ResultWithAffectedBoards(BaseModel):
affected_boards: list[str] = Field(description="The ids of boards affected by the delete operation")
class DeleteImagesResult(ResultWithAffectedBoards):
deleted_images: list[str] = Field(description="The names of the images that were deleted")
class StarredImagesResult(ResultWithAffectedBoards):
starred_images: list[str] = Field(description="The names of the images that were starred")
class UnstarredImagesResult(ResultWithAffectedBoards):
unstarred_images: list[str] = Field(description="The names of the images that were unstarred")
class AddImagesToBoardResult(ResultWithAffectedBoards):
added_images: list[str] = Field(description="The image names that were added to the board")
class RemoveImagesFromBoardResult(ResultWithAffectedBoards):
removed_images: list[str] = Field(description="The image names that were removed from their board")

View File

@@ -10,7 +10,6 @@ from invokeai.app.services.image_files.image_files_common import (
)
from invokeai.app.services.image_records.image_records_common import (
ImageCategory,
ImageNamesResult,
ImageRecord,
ImageRecordChanges,
ImageRecordDeleteException,
@@ -79,7 +78,7 @@ class ImageService(ImageServiceABC):
board_id=board_id, image_name=image_name
)
except Exception as e:
self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
self.__invoker.services.image_files.save(
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
)
@@ -310,27 +309,3 @@ class ImageService(ImageServiceABC):
except Exception as e:
self.__invoker.services.logger.error("Problem getting intermediates count")
raise e
def get_image_names(
self,
starred_first: bool = True,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
image_origin: Optional[ResourceOrigin] = None,
categories: Optional[list[ImageCategory]] = None,
is_intermediate: Optional[bool] = None,
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> ImageNamesResult:
try:
return self.__invoker.services.image_records.get_image_names(
starred_first=starred_first,
order_dir=order_dir,
image_origin=image_origin,
categories=categories,
is_intermediate=is_intermediate,
board_id=board_id,
search_term=search_term,
)
except Exception as e:
self.__invoker.services.logger.error("Problem getting image names")
raise e

View File

@@ -148,7 +148,7 @@ class ModelInstallService(ModelInstallServiceBase):
def _clear_pending_jobs(self) -> None:
for job in self.list_jobs():
if not job.in_terminal_state:
self._logger.warning(f"Cancelling job {job.id}")
self._logger.warning("Cancelling job {job.id}")
self.cancel_job(job)
while True:
try:

View File

@@ -1,4 +1,3 @@
import gc
import traceback
from contextlib import suppress
from threading import BoundedSemaphore, Thread
@@ -440,12 +439,6 @@ class DefaultSessionProcessor(SessionProcessorBase):
poll_now_event.wait(self._polling_interval)
continue
# GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
# Most queue items take seconds to execute, so the relative cost of a GC is very small.
# Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
# allocation is well worth it.
gc.collect()
self._invoker.services.logger.info(
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
)

View File

@@ -10,8 +10,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
CancelByDestinationResult,
CancelByQueueIDResult,
ClearResult,
DeleteAllExceptCurrentResult,
DeleteByDestinationResult,
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
@@ -19,6 +17,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
SessionQueueStatus,
)
from invokeai.app.services.shared.graph import GraphExecutionState
@@ -93,11 +92,6 @@ class SessionQueueBase(ABC):
"""Cancels a session queue item"""
pass
@abstractmethod
def delete_queue_item(self, item_id: int) -> None:
"""Deletes a session queue item"""
pass
@abstractmethod
def fail_queue_item(
self, item_id: int, error_type: str, error_message: str, error_traceback: str
@@ -115,11 +109,6 @@ class SessionQueueBase(ABC):
"""Cancels all queue items with the given batch destination"""
pass
@abstractmethod
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
"""Deletes all queue items with the given batch destination"""
pass
@abstractmethod
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
"""Cancels all queue items with matching queue ID"""
@@ -130,11 +119,6 @@ class SessionQueueBase(ABC):
"""Cancels all queue items except in-progress items"""
pass
@abstractmethod
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
"""Deletes all queue items except in-progress items"""
pass
@abstractmethod
def list_queue_items(
self,
@@ -143,20 +127,10 @@ class SessionQueueBase(ABC):
priority: int,
cursor: Optional[int] = None,
status: Optional[QUEUE_ITEM_STATUS] = None,
destination: Optional[str] = None,
) -> CursorPaginatedResults[SessionQueueItem]:
) -> CursorPaginatedResults[SessionQueueItemDTO]:
"""Gets a page of session queue items"""
pass
@abstractmethod
def list_all_queue_items(
self,
queue_id: str,
destination: Optional[str] = None,
) -> list[SessionQueueItem]:
"""Gets all queue items that match the given parameters"""
pass
@abstractmethod
def get_queue_item(self, item_id: int) -> SessionQueueItem:
"""Gets a session queue item by ID"""

View File

@@ -208,7 +208,7 @@ class FieldIdentifier(BaseModel):
user_label: str | None = Field(description="The user label of the field, if any")
class SessionQueueItem(BaseModel):
class SessionQueueItemWithoutGraph(BaseModel):
"""Session queue item without the full graph. Used for serialization."""
item_id: int = Field(description="The identifier of the session queue item")
@@ -252,7 +252,42 @@ class SessionQueueItem(BaseModel):
default=None,
description="The ID of the published workflow associated with this queue item",
)
api_input_fields: Optional[list[FieldIdentifier]] = Field(
default=None, description="The fields that were used as input to the API"
)
api_output_fields: Optional[list[FieldIdentifier]] = Field(
default=None, description="The nodes that were used as output from the API"
)
credits: Optional[float] = Field(default=None, description="The total credits used for this queue item")
@classmethod
def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO":
# must parse these manually
queue_item_dict["field_values"] = get_field_values(queue_item_dict)
return SessionQueueItemDTO(**queue_item_dict)
model_config = ConfigDict(
json_schema_extra={
"required": [
"item_id",
"status",
"batch_id",
"queue_id",
"session_id",
"priority",
"session_id",
"created_at",
"updated_at",
]
}
)
class SessionQueueItemDTO(SessionQueueItemWithoutGraph):
pass
class SessionQueueItem(SessionQueueItemWithoutGraph):
session: GraphExecutionState = Field(description="The fully-populated session to be executed")
workflow: Optional[WorkflowWithoutID] = Field(
default=None, description="The workflow associated with this queue item"
@@ -363,18 +398,6 @@ class CancelByDestinationResult(CancelByBatchIDsResult):
pass
class DeleteByDestinationResult(BaseModel):
"""Result of deleting by a destination"""
deleted: int = Field(..., description="Number of queue items deleted")
class DeleteAllExceptCurrentResult(DeleteByDestinationResult):
"""Result of deleting all except current"""
pass
class CancelByQueueIDResult(CancelByBatchIDsResult):
"""Result of canceling by queue id"""

View File

@@ -17,8 +17,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
CancelByDestinationResult,
CancelByQueueIDResult,
ClearResult,
DeleteAllExceptCurrentResult,
DeleteByDestinationResult,
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
@@ -26,6 +24,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemDTO,
SessionQueueItemNotFoundError,
SessionQueueStatus,
ValueToInsertTuple,
@@ -47,6 +46,10 @@ class SqliteSessionQueue(SessionQueueBase):
clear_result = self.clear(DEFAULT_QUEUE_ID)
if clear_result.deleted > 0:
self.__invoker.services.logger.info(f"Cleared all {clear_result.deleted} queue items")
else:
prune_result = self.prune(DEFAULT_QUEUE_ID)
if prune_result.deleted > 0:
self.__invoker.services.logger.info(f"Pruned {prune_result.deleted} finished queue items")
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
@@ -101,7 +104,11 @@ class SqliteSessionQueue(SessionQueueBase):
return cast(Union[int, None], cursor.fetchone()[0]) or 0
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
try:
cursor = self._conn.cursor()
# TODO: how does this work in a multi-user scenario?
current_queue_size = self._get_current_queue_size(queue_id)
max_queue_size = self.__invoker.services.configuration.max_queue_size
@@ -111,12 +118,8 @@ class SqliteSessionQueue(SessionQueueBase):
if prepend:
priority = self._get_highest_priority(queue_id) + 1
requested_count = await asyncio.to_thread(
calc_session_count,
batch=batch,
)
values_to_insert = await asyncio.to_thread(
prepare_values_to_insert,
requested_count = calc_session_count(batch)
values_to_insert = prepare_values_to_insert(
queue_id=queue_id,
batch=batch,
priority=priority,
@@ -124,16 +127,19 @@ class SqliteSessionQueue(SessionQueueBase):
)
enqueued_count = len(values_to_insert)
with self._conn:
cursor = self._conn.cursor()
cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
if requested_count > enqueued_count:
values_to_insert = values_to_insert[:max_new_queue_items]
cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
enqueue_result = EnqueueBatchResult(
queue_id=queue_id,
@@ -214,19 +220,6 @@ class SqliteSessionQueue(SessionQueueBase):
) -> SessionQueueItem:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status FROM session_queue WHERE item_id = ?
""",
(item_id,),
)
row = cursor.fetchone()
if row is None:
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
current_status = row[0]
# Only update if not already finished (completed, failed or canceled)
if current_status in ("completed", "failed", "canceled"):
return self.get_queue_item(item_id)
cursor.execute(
"""--sql
UPDATE session_queue
@@ -338,27 +331,6 @@ class SqliteSessionQueue(SessionQueueBase):
queue_item = self._set_queue_item_status(item_id=item_id, status="canceled")
return queue_item
def delete_queue_item(self, item_id: int) -> None:
"""Deletes a session queue item"""
try:
self.cancel_queue_item(item_id)
except SessionQueueItemNotFoundError:
pass
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
DELETE
FROM session_queue
WHERE item_id = ?
""",
(item_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
def complete_queue_item(self, item_id: int) -> SessionQueueItem:
queue_item = self._set_queue_item_status(item_id=item_id, status="completed")
return queue_item
@@ -456,71 +428,6 @@ class SqliteSessionQueue(SessionQueueBase):
raise
return CancelByDestinationResult(canceled=count)
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
try:
cursor = self._conn.cursor()
current_queue_item = self.get_current(queue_id)
if current_queue_item is not None and current_queue_item.destination == destination:
self.cancel_queue_item(current_queue_item.item_id)
params = (queue_id, destination)
cursor.execute(
"""--sql
SELECT COUNT(*)
FROM session_queue
WHERE
queue_id = ?
AND destination = ?;
""",
params,
)
count = cursor.fetchone()[0]
cursor.execute(
"""--sql
DELETE
FROM session_queue
WHERE
queue_id = ?
AND destination = ?;
""",
params,
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return DeleteByDestinationResult(deleted=count)
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
try:
cursor = self._conn.cursor()
where = """--sql
WHERE
queue_id == ?
AND status == 'pending'
"""
cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM session_queue
{where};
""",
(queue_id,),
)
count = cursor.fetchone()[0]
cursor.execute(
f"""--sql
DELETE
FROM session_queue
{where};
""",
(queue_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return DeleteAllExceptCurrentResult(deleted=count)
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
try:
cursor = self._conn.cursor()
@@ -636,12 +543,26 @@ class SqliteSessionQueue(SessionQueueBase):
priority: int,
cursor: Optional[int] = None,
status: Optional[QUEUE_ITEM_STATUS] = None,
destination: Optional[str] = None,
) -> CursorPaginatedResults[SessionQueueItem]:
) -> CursorPaginatedResults[SessionQueueItemDTO]:
cursor_ = self._conn.cursor()
item_id = cursor
query = """--sql
SELECT *
SELECT item_id,
status,
priority,
field_values,
error_type,
error_message,
error_traceback,
created_at,
updated_at,
completed_at,
started_at,
session_id,
batch_id,
queue_id,
origin,
destination
FROM session_queue
WHERE queue_id = ?
"""
@@ -653,12 +574,6 @@ class SqliteSessionQueue(SessionQueueBase):
"""
params.append(status)
if destination is not None:
query += """---sql
AND destination = ?
"""
params.append(destination)
if item_id is not None:
query += """--sql
AND (priority < ?) OR (priority = ? AND item_id > ?)
@@ -674,7 +589,7 @@ class SqliteSessionQueue(SessionQueueBase):
params.append(limit + 1)
cursor_.execute(query, params)
results = cast(list[sqlite3.Row], cursor_.fetchall())
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results]
has_more = False
if len(items) > limit:
# remove the extra item
@@ -682,37 +597,6 @@ class SqliteSessionQueue(SessionQueueBase):
has_more = True
return CursorPaginatedResults(items=items, limit=limit, has_more=has_more)
def list_all_queue_items(
self,
queue_id: str,
destination: Optional[str] = None,
) -> list[SessionQueueItem]:
"""Gets all queue items that match the given parameters"""
cursor_ = self._conn.cursor()
query = """--sql
SELECT *
FROM session_queue
WHERE queue_id = ?
"""
params: list[Union[str, int]] = [queue_id]
if destination is not None:
query += """---sql
AND destination = ?
"""
params.append(destination)
query += """--sql
ORDER BY
priority DESC,
item_id ASC
;
"""
cursor_.execute(query, params)
results = cast(list[sqlite3.Row], cursor_.fetchall())
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
return items
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
cursor = self._conn.cursor()
cursor.execute(

View File

@@ -7,7 +7,6 @@ from typing import Any, Optional, TypeVar, Union, get_args, get_origin, get_type
import networkx as nx
from pydantic import (
BaseModel,
ConfigDict,
GetCoreSchemaHandler,
GetJsonSchemaHandler,
ValidationError,
@@ -788,22 +787,6 @@ class GraphExecutionState(BaseModel):
default_factory=dict,
)
model_config = ConfigDict(
json_schema_extra={
"required": [
"id",
"graph",
"execution_graph",
"executed",
"executed_history",
"results",
"errors",
"prepared_source_mapping",
"source_prepared_mapping",
]
}
)
@field_validator("graph")
def graph_is_valid(cls, v: Graph):
"""Validates that the graph is valid"""

View File

@@ -42,5 +42,4 @@ IP-Adapters:
- [InvokeAI/ip_adapter_plus_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_sd15)
- [InvokeAI/ip_adapter_plus_face_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15)
- [InvokeAI/ip_adapter_sdxl](https://huggingface.co/InvokeAI/ip_adapter_sdxl)
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
- [InvokeAI/ip-adapter-plus_sdxl_vit-h](https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h)
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)

View File

@@ -37,7 +37,6 @@ from invokeai.app.util.misc import uuid_string
from invokeai.backend.model_hash.hash_validator import validate_hash
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
from invokeai.backend.model_manager.model_on_disk import ModelOnDisk
from invokeai.backend.model_manager.omi import flux_dev_1_lora, stable_diffusion_xl_1_lora
from invokeai.backend.model_manager.taxonomy import (
AnyVariant,
BaseModelType,
@@ -297,7 +296,7 @@ class LoRAConfigBase(ABC, BaseModel):
from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict
sd = mod.load_state_dict(mod.path)
value = flux_format_from_state_dict(sd, mod.metadata())
value = flux_format_from_state_dict(sd)
mod.cache[key] = value
return value
@@ -335,36 +334,6 @@ class T5EncoderBnbQuantizedLlmInt8bConfig(T5EncoderConfigBase, LegacyProbeMixin,
format: Literal[ModelFormat.BnbQuantizedLlmInt8b] = ModelFormat.BnbQuantizedLlmInt8b
class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase):
format: Literal[ModelFormat.OMI] = ModelFormat.OMI
@classmethod
def matches(cls, mod: ModelOnDisk) -> bool:
if mod.path.is_dir():
return False
metadata = mod.metadata()
return (
metadata.get("modelspec.sai_model_spec")
and metadata.get("ot_branch") == "omi_format"
and metadata["modelspec.architecture"].split("/")[1].lower() == "lora"
)
@classmethod
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
metadata = mod.metadata()
architecture = metadata["modelspec.architecture"]
if architecture == stable_diffusion_xl_1_lora:
base = BaseModelType.StableDiffusionXL
elif architecture == flux_dev_1_lora:
base = BaseModelType.Flux
else:
raise InvalidModelConfigException(f"Unrecognised/unsupported architecture for OMI LoRA: {architecture}")
return {"base": base}
class LoRALyCORISConfig(LoRAConfigBase, ModelConfigBase):
"""Model config for LoRA/Lycoris models."""
@@ -381,7 +350,7 @@ class LoRALyCORISConfig(LoRAConfigBase, ModelConfigBase):
state_dict = mod.load_state_dict()
for key in state_dict.keys():
if isinstance(key, int):
if type(key) is int:
continue
if key.startswith(("lora_te_", "lora_unet_", "lora_te1_", "lora_te2_", "lora_transformer_")):
@@ -699,7 +668,6 @@ AnyModelConfig = Annotated[
Annotated[ControlNetDiffusersConfig, ControlNetDiffusersConfig.get_tag()],
Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()],
Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()],
Annotated[LoRAOmiConfig, LoRAOmiConfig.get_tag()],
Annotated[ControlLoRALyCORISConfig, ControlLoRALyCORISConfig.get_tag()],
Annotated[ControlLoRADiffusersConfig, ControlLoRADiffusersConfig.get_tag()],
Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()],

View File

@@ -13,7 +13,6 @@ from invokeai.backend.model_manager.config import AnyModelConfig
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.model_manager.omi.omi import convert_from_omi
from invokeai.backend.model_manager.taxonomy import (
AnyModel,
BaseModelType,
@@ -21,10 +20,6 @@ from invokeai.backend.model_manager.taxonomy import (
ModelType,
SubModelType,
)
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
is_state_dict_likely_in_flux_aitoolkit_format,
lora_model_from_flux_aitoolkit_state_dict,
)
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
is_state_dict_likely_flux_control,
lora_model_from_flux_control_state_dict,
@@ -44,8 +39,6 @@ from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import l
from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.LoRA, format=ModelFormat.OMI)
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.LoRA, format=ModelFormat.OMI)
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.LyCORIS)
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.ControlLoRa, format=ModelFormat.LyCORIS)
@@ -80,23 +73,12 @@ class LoRALoader(ModelLoader):
else:
state_dict = torch.load(model_path, map_location="cpu")
# Strip 'bundle_emb' keys - these are unused and currently cause downstream errors.
# To revisit later to determine if they're needed/useful.
state_dict = {k: v for k, v in state_dict.items() if not k.startswith("bundle_emb")}
# At the time of writing, we support the OMI standard for base models Flux and SDXL
if config.format == ModelFormat.OMI and self._model_base in [
BaseModelType.StableDiffusionXL,
BaseModelType.Flux,
]:
state_dict = convert_from_omi(state_dict, config.base) # type: ignore
# Apply state_dict key conversions, if necessary.
if self._model_base == BaseModelType.StableDiffusionXL:
state_dict = convert_sdxl_keys_to_diffusers_format(state_dict)
model = lora_model_from_sd_state_dict(state_dict=state_dict)
elif self._model_base == BaseModelType.Flux:
if config.format in [ModelFormat.Diffusers, ModelFormat.OMI]:
if config.format == ModelFormat.Diffusers:
# HACK(ryand): We set alpha=None for diffusers PEFT format models. These models are typically
# distributed as a single file without the associated metadata containing the alpha value. We chose
# alpha=None, because this is treated as alpha=rank internally in `LoRALayerBase.scale()`. alpha=rank
@@ -110,10 +92,8 @@ class LoRALoader(ModelLoader):
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
elif is_state_dict_likely_flux_control(state_dict=state_dict):
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict=state_dict):
model = lora_model_from_flux_aitoolkit_state_dict(state_dict=state_dict)
else:
raise ValueError("LoRA model is in unsupported FLUX format")
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
else:
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:

View File

@@ -1,7 +0,0 @@
from invokeai.backend.model_manager.omi.omi import convert_from_omi
from invokeai.backend.model_manager.omi.vendor.model_spec.architecture import (
flux_dev_1_lora,
stable_diffusion_xl_1_lora,
)
__all__ = ["flux_dev_1_lora", "stable_diffusion_xl_1_lora", "convert_from_omi"]

View File

@@ -1,21 +0,0 @@
from invokeai.backend.model_manager.model_on_disk import StateDict
from invokeai.backend.model_manager.omi.vendor.convert.lora import (
convert_flux_lora as omi_flux,
)
from invokeai.backend.model_manager.omi.vendor.convert.lora import (
convert_lora_util as lora_util,
)
from invokeai.backend.model_manager.omi.vendor.convert.lora import (
convert_sdxl_lora as omi_sdxl,
)
from invokeai.backend.model_manager.taxonomy import BaseModelType
def convert_from_omi(weights_sd: StateDict, base: BaseModelType):
keyset = {
BaseModelType.Flux: omi_flux.convert_flux_lora_key_sets(),
BaseModelType.StableDiffusionXL: omi_sdxl.convert_sdxl_lora_key_sets(),
}[base]
source = "omi"
target = "legacy_diffusers"
return lora_util.__convert(weights_sd, keyset, source, target) # type: ignore

View File

@@ -1,20 +0,0 @@
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
LoraConversionKeySet,
map_prefix_range,
)
def map_clip(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("text_projection", "text_projection", parent=key_prefix)]
for k in map_prefix_range("text_model.encoder.layers", "text_model.encoder.layers", parent=key_prefix):
keys += [LoraConversionKeySet("mlp.fc1", "mlp.fc1", parent=k)]
keys += [LoraConversionKeySet("mlp.fc2", "mlp.fc2", parent=k)]
keys += [LoraConversionKeySet("self_attn.k_proj", "self_attn.k_proj", parent=k)]
keys += [LoraConversionKeySet("self_attn.out_proj", "self_attn.out_proj", parent=k)]
keys += [LoraConversionKeySet("self_attn.q_proj", "self_attn.q_proj", parent=k)]
keys += [LoraConversionKeySet("self_attn.v_proj", "self_attn.v_proj", parent=k)]
return keys

View File

@@ -1,84 +0,0 @@
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_clip import map_clip
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
LoraConversionKeySet,
map_prefix_range,
)
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_t5 import map_t5
def __map_double_transformer_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("img_attn.qkv.0", "attn.to_q", parent=key_prefix)]
keys += [LoraConversionKeySet("img_attn.qkv.1", "attn.to_k", parent=key_prefix)]
keys += [LoraConversionKeySet("img_attn.qkv.2", "attn.to_v", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_attn.qkv.0", "attn.add_q_proj", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_attn.qkv.1", "attn.add_k_proj", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_attn.qkv.2", "attn.add_v_proj", parent=key_prefix)]
keys += [LoraConversionKeySet("img_attn.proj", "attn.to_out.0", parent=key_prefix)]
keys += [LoraConversionKeySet("img_mlp.0", "ff.net.0.proj", parent=key_prefix)]
keys += [LoraConversionKeySet("img_mlp.2", "ff.net.2", parent=key_prefix)]
keys += [LoraConversionKeySet("img_mod.lin", "norm1.linear", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_attn.proj", "attn.to_add_out", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_mlp.0", "ff_context.net.0.proj", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_mlp.2", "ff_context.net.2", parent=key_prefix)]
keys += [LoraConversionKeySet("txt_mod.lin", "norm1_context.linear", parent=key_prefix)]
return keys
def __map_single_transformer_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("linear1.0", "attn.to_q", parent=key_prefix)]
keys += [LoraConversionKeySet("linear1.1", "attn.to_k", parent=key_prefix)]
keys += [LoraConversionKeySet("linear1.2", "attn.to_v", parent=key_prefix)]
keys += [LoraConversionKeySet("linear1.3", "proj_mlp", parent=key_prefix)]
keys += [LoraConversionKeySet("linear2", "proj_out", parent=key_prefix)]
keys += [LoraConversionKeySet("modulation.lin", "norm.linear", parent=key_prefix)]
return keys
def __map_transformer(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("txt_in", "context_embedder", parent=key_prefix)]
keys += [
LoraConversionKeySet("final_layer.adaLN_modulation.1", "norm_out.linear", parent=key_prefix, swap_chunks=True)
]
keys += [LoraConversionKeySet("final_layer.linear", "proj_out", parent=key_prefix)]
keys += [
LoraConversionKeySet("guidance_in.in_layer", "time_text_embed.guidance_embedder.linear_1", parent=key_prefix)
]
keys += [
LoraConversionKeySet("guidance_in.out_layer", "time_text_embed.guidance_embedder.linear_2", parent=key_prefix)
]
keys += [LoraConversionKeySet("vector_in.in_layer", "time_text_embed.text_embedder.linear_1", parent=key_prefix)]
keys += [LoraConversionKeySet("vector_in.out_layer", "time_text_embed.text_embedder.linear_2", parent=key_prefix)]
keys += [LoraConversionKeySet("time_in.in_layer", "time_text_embed.timestep_embedder.linear_1", parent=key_prefix)]
keys += [LoraConversionKeySet("time_in.out_layer", "time_text_embed.timestep_embedder.linear_2", parent=key_prefix)]
keys += [LoraConversionKeySet("img_in.proj", "x_embedder", parent=key_prefix)]
for k in map_prefix_range("double_blocks", "transformer_blocks", parent=key_prefix):
keys += __map_double_transformer_block(k)
for k in map_prefix_range("single_blocks", "single_transformer_blocks", parent=key_prefix):
keys += __map_single_transformer_block(k)
return keys
def convert_flux_lora_key_sets() -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("bundle_emb", "bundle_emb")]
keys += __map_transformer(LoraConversionKeySet("transformer", "lora_transformer"))
keys += map_clip(LoraConversionKeySet("clip_l", "lora_te1"))
keys += map_t5(LoraConversionKeySet("t5", "lora_te2"))
return keys

View File

@@ -1,217 +0,0 @@
import torch
from torch import Tensor
from typing_extensions import Self
class LoraConversionKeySet:
def __init__(
self,
omi_prefix: str,
diffusers_prefix: str,
legacy_diffusers_prefix: str | None = None,
parent: Self | None = None,
swap_chunks: bool = False,
filter_is_last: bool | None = None,
next_omi_prefix: str | None = None,
next_diffusers_prefix: str | None = None,
):
if parent is not None:
self.omi_prefix = combine(parent.omi_prefix, omi_prefix)
self.diffusers_prefix = combine(parent.diffusers_prefix, diffusers_prefix)
else:
self.omi_prefix = omi_prefix
self.diffusers_prefix = diffusers_prefix
if legacy_diffusers_prefix is None:
self.legacy_diffusers_prefix = self.diffusers_prefix.replace(".", "_")
elif parent is not None:
self.legacy_diffusers_prefix = combine(parent.legacy_diffusers_prefix, legacy_diffusers_prefix).replace(
".", "_"
)
else:
self.legacy_diffusers_prefix = legacy_diffusers_prefix
self.parent = parent
self.swap_chunks = swap_chunks
self.filter_is_last = filter_is_last
self.prefix = parent
if next_omi_prefix is None and parent is not None:
self.next_omi_prefix = parent.next_omi_prefix
self.next_diffusers_prefix = parent.next_diffusers_prefix
self.next_legacy_diffusers_prefix = parent.next_legacy_diffusers_prefix
elif next_omi_prefix is not None and parent is not None:
self.next_omi_prefix = combine(parent.omi_prefix, next_omi_prefix)
self.next_diffusers_prefix = combine(parent.diffusers_prefix, next_diffusers_prefix)
self.next_legacy_diffusers_prefix = combine(parent.legacy_diffusers_prefix, next_diffusers_prefix).replace(
".", "_"
)
elif next_omi_prefix is not None and parent is None:
self.next_omi_prefix = next_omi_prefix
self.next_diffusers_prefix = next_diffusers_prefix
self.next_legacy_diffusers_prefix = next_diffusers_prefix.replace(".", "_")
else:
self.next_omi_prefix = None
self.next_diffusers_prefix = None
self.next_legacy_diffusers_prefix = None
def __get_omi(self, in_prefix: str, key: str) -> str:
return self.omi_prefix + key.removeprefix(in_prefix)
def __get_diffusers(self, in_prefix: str, key: str) -> str:
return self.diffusers_prefix + key.removeprefix(in_prefix)
def __get_legacy_diffusers(self, in_prefix: str, key: str) -> str:
key = self.legacy_diffusers_prefix + key.removeprefix(in_prefix)
suffix = key[key.rfind(".") :]
if suffix not in [".alpha", ".dora_scale"]: # some keys only have a single . in the suffix
suffix = key[key.removesuffix(suffix).rfind(".") :]
key = key.removesuffix(suffix)
return key.replace(".", "_") + suffix
def get_key(self, in_prefix: str, key: str, target: str) -> str:
if target == "omi":
return self.__get_omi(in_prefix, key)
elif target == "diffusers":
return self.__get_diffusers(in_prefix, key)
elif target == "legacy_diffusers":
return self.__get_legacy_diffusers(in_prefix, key)
return key
def __str__(self) -> str:
return f"omi: {self.omi_prefix}, diffusers: {self.diffusers_prefix}, legacy: {self.legacy_diffusers_prefix}"
def combine(left: str, right: str) -> str:
left = left.rstrip(".")
right = right.lstrip(".")
if left == "" or left is None:
return right
elif right == "" or right is None:
return left
else:
return left + "." + right
def map_prefix_range(
omi_prefix: str,
diffusers_prefix: str,
parent: LoraConversionKeySet,
) -> list[LoraConversionKeySet]:
# 100 should be a safe upper bound. increase if it's not enough in the future
return [
LoraConversionKeySet(
omi_prefix=f"{omi_prefix}.{i}",
diffusers_prefix=f"{diffusers_prefix}.{i}",
parent=parent,
next_omi_prefix=f"{omi_prefix}.{i + 1}",
next_diffusers_prefix=f"{diffusers_prefix}.{i + 1}",
)
for i in range(100)
]
def __convert(
state_dict: dict[str, Tensor],
key_sets: list[LoraConversionKeySet],
source: str,
target: str,
) -> dict[str, Tensor]:
out_states = {}
if source == target:
return dict(state_dict)
# TODO: maybe replace with a non O(n^2) algorithm
for key, tensor in state_dict.items():
for key_set in key_sets:
in_prefix = ""
if source == "omi":
in_prefix = key_set.omi_prefix
elif source == "diffusers":
in_prefix = key_set.diffusers_prefix
elif source == "legacy_diffusers":
in_prefix = key_set.legacy_diffusers_prefix
if not key.startswith(in_prefix):
continue
if key_set.filter_is_last is not None:
next_prefix = None
if source == "omi":
next_prefix = key_set.next_omi_prefix
elif source == "diffusers":
next_prefix = key_set.next_diffusers_prefix
elif source == "legacy_diffusers":
next_prefix = key_set.next_legacy_diffusers_prefix
is_last = not any(k.startswith(next_prefix) for k in state_dict)
if key_set.filter_is_last != is_last:
continue
name = key_set.get_key(in_prefix, key, target)
can_swap_chunks = target == "omi" or source == "omi"
if key_set.swap_chunks and name.endswith(".lora_up.weight") and can_swap_chunks:
chunk_0, chunk_1 = tensor.chunk(2, dim=0)
tensor = torch.cat([chunk_1, chunk_0], dim=0)
out_states[name] = tensor
break # only map the first matching key set
return out_states
def __detect_source(
state_dict: dict[str, Tensor],
key_sets: list[LoraConversionKeySet],
) -> str:
omi_count = 0
diffusers_count = 0
legacy_diffusers_count = 0
for key in state_dict:
for key_set in key_sets:
if key.startswith(key_set.omi_prefix):
omi_count += 1
if key.startswith(key_set.diffusers_prefix):
diffusers_count += 1
if key.startswith(key_set.legacy_diffusers_prefix):
legacy_diffusers_count += 1
if omi_count > diffusers_count and omi_count > legacy_diffusers_count:
return "omi"
if diffusers_count > omi_count and diffusers_count > legacy_diffusers_count:
return "diffusers"
if legacy_diffusers_count > omi_count and legacy_diffusers_count > diffusers_count:
return "legacy_diffusers"
return ""
def convert_to_omi(
state_dict: dict[str, Tensor],
key_sets: list[LoraConversionKeySet],
) -> dict[str, Tensor]:
source = __detect_source(state_dict, key_sets)
return __convert(state_dict, key_sets, source, "omi")
def convert_to_diffusers(
state_dict: dict[str, Tensor],
key_sets: list[LoraConversionKeySet],
) -> dict[str, Tensor]:
source = __detect_source(state_dict, key_sets)
return __convert(state_dict, key_sets, source, "diffusers")
def convert_to_legacy_diffusers(
state_dict: dict[str, Tensor],
key_sets: list[LoraConversionKeySet],
) -> dict[str, Tensor]:
source = __detect_source(state_dict, key_sets)
return __convert(state_dict, key_sets, source, "legacy_diffusers")

View File

@@ -1,125 +0,0 @@
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_clip import map_clip
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
LoraConversionKeySet,
map_prefix_range,
)
def __map_unet_resnet_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("emb_layers.1", "time_emb_proj", parent=key_prefix)]
keys += [LoraConversionKeySet("in_layers.2", "conv1", parent=key_prefix)]
keys += [LoraConversionKeySet("out_layers.3", "conv2", parent=key_prefix)]
keys += [LoraConversionKeySet("skip_connection", "conv_shortcut", parent=key_prefix)]
return keys
def __map_unet_attention_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("proj_in", "proj_in", parent=key_prefix)]
keys += [LoraConversionKeySet("proj_out", "proj_out", parent=key_prefix)]
for k in map_prefix_range("transformer_blocks", "transformer_blocks", parent=key_prefix):
keys += [LoraConversionKeySet("attn1.to_q", "attn1.to_q", parent=k)]
keys += [LoraConversionKeySet("attn1.to_k", "attn1.to_k", parent=k)]
keys += [LoraConversionKeySet("attn1.to_v", "attn1.to_v", parent=k)]
keys += [LoraConversionKeySet("attn1.to_out.0", "attn1.to_out.0", parent=k)]
keys += [LoraConversionKeySet("attn2.to_q", "attn2.to_q", parent=k)]
keys += [LoraConversionKeySet("attn2.to_k", "attn2.to_k", parent=k)]
keys += [LoraConversionKeySet("attn2.to_v", "attn2.to_v", parent=k)]
keys += [LoraConversionKeySet("attn2.to_out.0", "attn2.to_out.0", parent=k)]
keys += [LoraConversionKeySet("ff.net.0.proj", "ff.net.0.proj", parent=k)]
keys += [LoraConversionKeySet("ff.net.2", "ff.net.2", parent=k)]
return keys
def __map_unet_down_blocks(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += __map_unet_resnet_block(LoraConversionKeySet("1.0", "0.resnets.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("2.0", "0.resnets.1", parent=key_prefix))
keys += [LoraConversionKeySet("3.0.op", "0.downsamplers.0.conv", parent=key_prefix)]
keys += __map_unet_resnet_block(LoraConversionKeySet("4.0", "1.resnets.0", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("4.1", "1.attentions.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("5.0", "1.resnets.1", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("5.1", "1.attentions.1", parent=key_prefix))
keys += [LoraConversionKeySet("6.0.op", "1.downsamplers.0.conv", parent=key_prefix)]
keys += __map_unet_resnet_block(LoraConversionKeySet("7.0", "2.resnets.0", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("7.1", "2.attentions.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("8.0", "2.resnets.1", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("8.1", "2.attentions.1", parent=key_prefix))
return keys
def __map_unet_mid_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += __map_unet_resnet_block(LoraConversionKeySet("0", "resnets.0", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("1", "attentions.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("2", "resnets.1", parent=key_prefix))
return keys
def __map_unet_up_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += __map_unet_resnet_block(LoraConversionKeySet("0.0", "0.resnets.0", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("0.1", "0.attentions.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("1.0", "0.resnets.1", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("1.1", "0.attentions.1", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("2.0", "0.resnets.2", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("2.1", "0.attentions.2", parent=key_prefix))
keys += [LoraConversionKeySet("2.2.conv", "0.upsamplers.0.conv", parent=key_prefix)]
keys += __map_unet_resnet_block(LoraConversionKeySet("3.0", "1.resnets.0", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("3.1", "1.attentions.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("4.0", "1.resnets.1", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("4.1", "1.attentions.1", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("5.0", "1.resnets.2", parent=key_prefix))
keys += __map_unet_attention_block(LoraConversionKeySet("5.1", "1.attentions.2", parent=key_prefix))
keys += [LoraConversionKeySet("5.2.conv", "1.upsamplers.0.conv", parent=key_prefix)]
keys += __map_unet_resnet_block(LoraConversionKeySet("6.0", "2.resnets.0", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("7.0", "2.resnets.1", parent=key_prefix))
keys += __map_unet_resnet_block(LoraConversionKeySet("8.0", "2.resnets.2", parent=key_prefix))
return keys
def __map_unet(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("input_blocks.0.0", "conv_in", parent=key_prefix)]
keys += [LoraConversionKeySet("time_embed.0", "time_embedding.linear_1", parent=key_prefix)]
keys += [LoraConversionKeySet("time_embed.2", "time_embedding.linear_2", parent=key_prefix)]
keys += [LoraConversionKeySet("label_emb.0.0", "add_embedding.linear_1", parent=key_prefix)]
keys += [LoraConversionKeySet("label_emb.0.2", "add_embedding.linear_2", parent=key_prefix)]
keys += __map_unet_down_blocks(LoraConversionKeySet("input_blocks", "down_blocks", parent=key_prefix))
keys += __map_unet_mid_block(LoraConversionKeySet("middle_block", "mid_block", parent=key_prefix))
keys += __map_unet_up_block(LoraConversionKeySet("output_blocks", "up_blocks", parent=key_prefix))
keys += [LoraConversionKeySet("out.0", "conv_norm_out", parent=key_prefix)]
keys += [LoraConversionKeySet("out.2", "conv_out", parent=key_prefix)]
return keys
def convert_sdxl_lora_key_sets() -> list[LoraConversionKeySet]:
keys = []
keys += [LoraConversionKeySet("bundle_emb", "bundle_emb")]
keys += __map_unet(LoraConversionKeySet("unet", "lora_unet"))
keys += map_clip(LoraConversionKeySet("clip_l", "lora_te1"))
keys += map_clip(LoraConversionKeySet("clip_g", "lora_te2"))
return keys

View File

@@ -1,19 +0,0 @@
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
LoraConversionKeySet,
map_prefix_range,
)
def map_t5(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
keys = []
for k in map_prefix_range("encoder.block", "encoder.block", parent=key_prefix):
keys += [LoraConversionKeySet("layer.0.SelfAttention.k", "layer.0.SelfAttention.k", parent=k)]
keys += [LoraConversionKeySet("layer.0.SelfAttention.o", "layer.0.SelfAttention.o", parent=k)]
keys += [LoraConversionKeySet("layer.0.SelfAttention.q", "layer.0.SelfAttention.q", parent=k)]
keys += [LoraConversionKeySet("layer.0.SelfAttention.v", "layer.0.SelfAttention.v", parent=k)]
keys += [LoraConversionKeySet("layer.1.DenseReluDense.wi_0", "layer.1.DenseReluDense.wi_0", parent=k)]
keys += [LoraConversionKeySet("layer.1.DenseReluDense.wi_1", "layer.1.DenseReluDense.wi_1", parent=k)]
keys += [LoraConversionKeySet("layer.1.DenseReluDense.wo", "layer.1.DenseReluDense.wo", parent=k)]
return keys

View File

@@ -1,31 +0,0 @@
stable_diffusion_1_lora = "stable-diffusion-v1/lora"
stable_diffusion_1_inpainting_lora = "stable-diffusion-v1-inpainting/lora"
stable_diffusion_2_512_lora = "stable-diffusion-v2-512/lora"
stable_diffusion_2_768_v_lora = "stable-diffusion-v2-768-v/lora"
stable_diffusion_2_depth_lora = "stable-diffusion-v2-depth/lora"
stable_diffusion_2_inpainting_lora = "stable-diffusion-v2-inpainting/lora"
stable_diffusion_3_medium_lora = "stable-diffusion-v3-medium/lora"
stable_diffusion_35_medium_lora = "stable-diffusion-v3.5-medium/lora"
stable_diffusion_35_large_lora = "stable-diffusion-v3.5-large/lora"
stable_diffusion_xl_1_lora = "stable-diffusion-xl-v1-base/lora"
stable_diffusion_xl_1_inpainting_lora = "stable-diffusion-xl-v1-base-inpainting/lora"
wuerstchen_2_lora = "wuerstchen-v2-prior/lora"
stable_cascade_1_stage_a_lora = "stable-cascade-v1-stage-a/lora"
stable_cascade_1_stage_b_lora = "stable-cascade-v1-stage-b/lora"
stable_cascade_1_stage_c_lora = "stable-cascade-v1-stage-c/lora"
pixart_alpha_lora = "pixart-alpha/lora"
pixart_sigma_lora = "pixart-sigma/lora"
flux_dev_1_lora = "Flux.1-dev/lora"
flux_fill_dev_1_lora = "Flux.1-fill-dev/lora"
sana_lora = "sana/lora"
hunyuan_video_lora = "hunyuan-video/lora"
hi_dream_i1_lora = "hidream-i1/lora"

View File

@@ -297,15 +297,6 @@ ip_adapter_sdxl = StarterModel(
dependencies=[ip_adapter_sdxl_image_encoder],
previous_names=["IP Adapter SDXL"],
)
ip_adapter_plus_sdxl = StarterModel(
name="Precise Reference (IP Adapter Plus ViT-H)",
base=BaseModelType.StableDiffusionXL,
source="https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h/resolve/main/ip-adapter-plus_sdxl_vit-h.safetensors",
description="References images with a higher degree of precision.",
type=ModelType.IPAdapter,
dependencies=[ip_adapter_sdxl_image_encoder],
previous_names=["IP Adapter Plus SDXL"],
)
ip_adapter_flux = StarterModel(
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
base=BaseModelType.Flux,
@@ -681,7 +672,6 @@ STARTER_MODELS: list[StarterModel] = [
ip_adapter_plus_sd1,
ip_adapter_plus_face_sd1,
ip_adapter_sdxl,
ip_adapter_plus_sdxl,
ip_adapter_flux,
qr_code_cnet_sd1,
qr_code_cnet_sdxl,
@@ -754,7 +744,6 @@ sdxl_bundle: list[StarterModel] = [
juggernaut_sdxl,
sdxl_fp16_vae_fix,
ip_adapter_sdxl,
ip_adapter_plus_sdxl,
canny_sdxl,
depth_sdxl,
softedge_sdxl,

View File

@@ -89,7 +89,6 @@ class ModelVariantType(str, Enum):
class ModelFormat(str, Enum):
"""Storage format of model."""
OMI = "omi"
Diffusers = "diffusers"
Checkpoint = "checkpoint"
LyCORIS = "lycoris"
@@ -139,7 +138,6 @@ class FluxLoRAFormat(str, Enum):
Kohya = "flux.kohya"
OneTrainer = "flux.onetrainer"
Control = "flux.control"
AIToolkit = "flux.aitoolkit"
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]

View File

@@ -46,10 +46,6 @@ class ModelPatcher:
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
ti_list: List[Tuple[str, TextualInversionModelRaw]],
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
if len(ti_list) == 0:
yield tokenizer, TextualInversionManager(tokenizer)
return
init_tokens_count = None
new_tokens_added = None

View File

@@ -1,63 +0,0 @@
import json
from dataclasses import dataclass, field
from typing import Any
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import _group_by_layer
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.util import InvokeAILogger
def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool:
if metadata:
try:
software = json.loads(metadata.get("software", "{}"))
except json.JSONDecodeError:
return False
return software.get("name") == "ai-toolkit"
# metadata got lost somewhere
return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys())
@dataclass
class GroupedStateDict:
transformer: dict[str, Any] = field(default_factory=dict)
# might also grow CLIP and T5 submodels
def _group_state_by_submodel(state_dict: dict[str, Any]) -> GroupedStateDict:
logger = InvokeAILogger.get_logger()
grouped = GroupedStateDict()
for key, value in state_dict.items():
submodel_name, param_name = key.split(".", 1)
match submodel_name:
case "diffusion_model":
grouped.transformer[param_name] = value
case _:
logger.warning(f"Unexpected submodel name: {submodel_name}")
return grouped
def _rename_peft_lora_keys(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Renames keys from the PEFT LoRA format to the InvokeAI format."""
renamed_state_dict = {}
for key, value in state_dict.items():
renamed_key = key.replace(".lora_A.", ".lora_down.").replace(".lora_B.", ".lora_up.")
renamed_state_dict[renamed_key] = value
return renamed_state_dict
def lora_model_from_flux_aitoolkit_state_dict(state_dict: dict[str, torch.Tensor]) -> ModelPatchRaw:
state_dict = _rename_peft_lora_keys(state_dict)
by_layer = _group_by_layer(state_dict)
by_model = _group_state_by_submodel(by_layer)
layers: dict[str, BaseLayerPatch] = {}
for layer_key, layer_state_dict in by_model.transformer.items():
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
return ModelPatchRaw(layers=layers)

View File

@@ -1,7 +1,4 @@
from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
is_state_dict_likely_in_flux_aitoolkit_format,
)
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
is_state_dict_likely_in_flux_diffusers_format,
@@ -14,7 +11,7 @@ from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_u
)
def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None:
def flux_format_from_state_dict(state_dict):
if is_state_dict_likely_in_flux_kohya_format(state_dict):
return FluxLoRAFormat.Kohya
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict):
@@ -23,7 +20,5 @@ def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None)
return FluxLoRAFormat.Diffusers
elif is_state_dict_likely_flux_control(state_dict):
return FluxLoRAFormat.Control
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict, metadata):
return FluxLoRAFormat.AIToolkit
else:
return None

View File

@@ -9,16 +9,13 @@ module.exports = {
// https://github.com/qdanik/eslint-plugin-path
'path/no-relative-imports': ['error', { maxDepth: 0 }],
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
// TODO: ENABLE THIS RULE BEFORE v6.0.0
// 'i18next/no-literal-string': 'error',
'i18next/no-literal-string': 'error',
// https://eslint.org/docs/latest/rules/no-console
'no-console': 'warn',
'no-console': 'error',
// https://eslint.org/docs/latest/rules/no-promise-executor-return
'no-promise-executor-return': 'error',
// https://eslint.org/docs/latest/rules/require-await
'require-await': 'error',
// TODO: ENABLE THIS RULE BEFORE v6.0.0
'react/display-name': 'off',
'no-restricted-properties': [
'error',
{
@@ -33,27 +30,6 @@ module.exports = {
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
},
],
'no-restricted-imports': [
'error',
{
paths: [
{
name: 'lodash-es',
importNames: ['isEqual'],
message: 'Please use objectEquals from @observ33r/object-equals instead.',
},
{
name: 'lodash-es',
message: 'Please use es-toolkit instead.',
},
{
name: 'es-toolkit',
importNames: ['isEqual'],
message: 'Please use objectEquals from @observ33r/object-equals instead.',
},
],
},
],
},
overrides: [
/**

View File

@@ -3,8 +3,6 @@ import type { KnipConfig } from 'knip';
const config: KnipConfig = {
project: ['src/**/*.{ts,tsx}!'],
ignore: [
// TODO(psyche): temporarily ignored all files for test build purposes
'src/**',
// This file is only used during debugging
'src/app/store/middleware/debugLoggerMiddleware.ts',
// Autogenerated types - shouldn't ever touch these

View File

@@ -52,58 +52,56 @@
}
},
"dependencies": {
"@atlaskit/pragmatic-drag-and-drop": "^1.7.4",
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^2.1.1",
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.1.0",
"@dagrejs/dagre": "^1.1.5",
"@atlaskit/pragmatic-drag-and-drop": "^1.5.3",
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^2.1.0",
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.0.3",
"@dagrejs/dagre": "^1.1.4",
"@dagrejs/graphlib": "^2.2.4",
"@fontsource-variable/inter": "^5.2.6",
"@fontsource-variable/inter": "^5.2.5",
"@invoke-ai/ui-library": "^0.0.46",
"@nanostores/react": "^1.0.0",
"@observ33r/object-equals": "^1.1.4",
"@reduxjs/toolkit": "2.8.2",
"@reduxjs/toolkit": "2.7.0",
"@roarr/browser-log-writer": "^1.3.0",
"@xyflow/react": "^12.7.1",
"@xyflow/react": "^12.6.0",
"async-mutex": "^0.5.0",
"chakra-react-select": "^4.9.2",
"cmdk": "^1.1.1",
"compare-versions": "^6.1.1",
"dockview": "^4.4.0",
"es-toolkit": "^1.39.5",
"filesize": "^10.1.6",
"fracturedjsonjs": "^4.1.0",
"fracturedjsonjs": "^4.0.2",
"framer-motion": "^11.10.0",
"i18next": "^25.2.1",
"i18next": "^25.0.1",
"i18next-http-backend": "^3.0.2",
"idb-keyval": "^6.2.2",
"idb-keyval": "^6.2.1",
"jsondiffpatch": "^0.7.3",
"konva": "^9.3.20",
"linkify-react": "^4.3.1",
"linkifyjs": "^4.3.1",
"linkify-react": "^4.2.0",
"linkifyjs": "^4.2.0",
"lodash-es": "^4.17.21",
"lru-cache": "^11.1.0",
"mtwist": "^1.0.2",
"nanoid": "^5.1.5",
"nanostores": "^1.0.1",
"new-github-issue-url": "^1.1.0",
"overlayscrollbars": "^2.11.4",
"overlayscrollbars": "^2.11.1",
"overlayscrollbars-react": "^0.5.6",
"perfect-freehand": "^1.2.2",
"query-string": "^9.2.1",
"query-string": "^9.1.1",
"raf-throttle": "^2.0.6",
"react": "^18.3.1",
"react-colorful": "^5.6.1",
"react-dom": "^18.3.1",
"react-dropzone": "^14.3.8",
"react-error-boundary": "^5.0.0",
"react-hook-form": "^7.58.1",
"react-hook-form": "^7.56.1",
"react-hotkeys-hook": "4.5.0",
"react-i18next": "^15.5.3",
"react-i18next": "^15.5.1",
"react-icons": "^5.5.0",
"react-redux": "9.2.0",
"react-resizable-panels": "^3.0.3",
"react-resizable-panels": "^2.1.8",
"react-textarea-autosize": "^8.5.9",
"react-use": "^17.6.0",
"react-virtuoso": "^4.13.0",
"react-virtuoso": "^4.12.6",
"redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^5.2.0",
"redux-undo": "^1.1.0",
@@ -111,12 +109,12 @@
"roarr": "^7.21.1",
"serialize-error": "^12.0.0",
"socket.io-client": "^4.8.1",
"stable-hash": "^0.0.6",
"use-debounce": "^10.0.5",
"stable-hash": "^0.0.5",
"use-debounce": "^10.0.4",
"use-device-pixel-ratio": "^1.1.2",
"uuid": "^11.1.0",
"zod": "^3.25.67",
"zod-validation-error": "^3.5.2"
"zod": "^3.24.3",
"zod-validation-error": "^3.4.0"
},
"peerDependencies": {
"react": "^18.2.0",
@@ -133,6 +131,7 @@
"@storybook/react": "^8.6.12",
"@storybook/react-vite": "^8.6.12",
"@storybook/theming": "^8.6.12",
"@types/lodash-es": "^4.17.12",
"@types/node": "^22.15.1",
"@types/react": "^18.3.11",
"@types/react-dom": "^18.3.0",

View File

@@ -6,41 +6,38 @@ settings:
dependencies:
'@atlaskit/pragmatic-drag-and-drop':
specifier: ^1.7.4
version: 1.7.4
specifier: ^1.5.3
version: 1.5.3
'@atlaskit/pragmatic-drag-and-drop-auto-scroll':
specifier: ^2.1.1
version: 2.1.1
specifier: ^2.1.0
version: 2.1.0
'@atlaskit/pragmatic-drag-and-drop-hitbox':
specifier: ^1.1.0
version: 1.1.0
specifier: ^1.0.3
version: 1.0.3
'@dagrejs/dagre':
specifier: ^1.1.5
version: 1.1.5
specifier: ^1.1.4
version: 1.1.4
'@dagrejs/graphlib':
specifier: ^2.2.4
version: 2.2.4
'@fontsource-variable/inter':
specifier: ^5.2.6
version: 5.2.6
specifier: ^5.2.5
version: 5.2.5
'@invoke-ai/ui-library':
specifier: ^0.0.46
version: 0.0.46(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.2.6)(@types/react@18.3.11)(i18next@25.2.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3)
version: 0.0.46(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.2.5)(@types/react@18.3.11)(i18next@25.0.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3)
'@nanostores/react':
specifier: ^1.0.0
version: 1.0.0(nanostores@1.0.1)(react@18.3.1)
'@observ33r/object-equals':
specifier: ^1.1.4
version: 1.1.4
'@reduxjs/toolkit':
specifier: 2.8.2
version: 2.8.2(react-redux@9.2.0)(react@18.3.1)
specifier: 2.7.0
version: 2.7.0(react-redux@9.2.0)(react@18.3.1)
'@roarr/browser-log-writer':
specifier: ^1.3.0
version: 1.3.0
'@xyflow/react':
specifier: ^12.7.1
version: 12.7.1(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)
specifier: ^12.6.0
version: 12.6.0(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)
async-mutex:
specifier: ^0.5.0
version: 0.5.0
@@ -53,30 +50,24 @@ dependencies:
compare-versions:
specifier: ^6.1.1
version: 6.1.1
dockview:
specifier: ^4.4.0
version: 4.4.0(react@18.3.1)
es-toolkit:
specifier: ^1.39.5
version: 1.39.5
filesize:
specifier: ^10.1.6
version: 10.1.6
fracturedjsonjs:
specifier: ^4.1.0
version: 4.1.0
specifier: ^4.0.2
version: 4.0.2
framer-motion:
specifier: ^11.10.0
version: 11.10.0(react-dom@18.3.1)(react@18.3.1)
i18next:
specifier: ^25.2.1
version: 25.2.1(typescript@5.8.3)
specifier: ^25.0.1
version: 25.0.1(typescript@5.8.3)
i18next-http-backend:
specifier: ^3.0.2
version: 3.0.2
idb-keyval:
specifier: ^6.2.2
version: 6.2.2
specifier: ^6.2.1
version: 6.2.1
jsondiffpatch:
specifier: ^0.7.3
version: 0.7.3
@@ -84,11 +75,14 @@ dependencies:
specifier: ^9.3.20
version: 9.3.20
linkify-react:
specifier: ^4.3.1
version: 4.3.1(linkifyjs@4.3.1)(react@18.3.1)
specifier: ^4.2.0
version: 4.2.0(linkifyjs@4.2.0)(react@18.3.1)
linkifyjs:
specifier: ^4.3.1
version: 4.3.1
specifier: ^4.2.0
version: 4.2.0
lodash-es:
specifier: ^4.17.21
version: 4.17.21
lru-cache:
specifier: ^11.1.0
version: 11.1.0
@@ -105,17 +99,17 @@ dependencies:
specifier: ^1.1.0
version: 1.1.0
overlayscrollbars:
specifier: ^2.11.4
version: 2.11.4
specifier: ^2.11.1
version: 2.11.1
overlayscrollbars-react:
specifier: ^0.5.6
version: 0.5.6(overlayscrollbars@2.11.4)(react@18.3.1)
version: 0.5.6(overlayscrollbars@2.11.1)(react@18.3.1)
perfect-freehand:
specifier: ^1.2.2
version: 1.2.2
query-string:
specifier: ^9.2.1
version: 9.2.1
specifier: ^9.1.1
version: 9.1.1
raf-throttle:
specifier: ^2.0.6
version: 2.0.6
@@ -135,14 +129,14 @@ dependencies:
specifier: ^5.0.0
version: 5.0.0(react@18.3.1)
react-hook-form:
specifier: ^7.58.1
version: 7.58.1(react@18.3.1)
specifier: ^7.56.1
version: 7.56.1(react@18.3.1)
react-hotkeys-hook:
specifier: 4.5.0
version: 4.5.0(react-dom@18.3.1)(react@18.3.1)
react-i18next:
specifier: ^15.5.3
version: 15.5.3(i18next@25.2.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3)
specifier: ^15.5.1
version: 15.5.1(i18next@25.0.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3)
react-icons:
specifier: ^5.5.0
version: 5.5.0(react@18.3.1)
@@ -150,8 +144,8 @@ dependencies:
specifier: 9.2.0
version: 9.2.0(@types/react@18.3.11)(react@18.3.1)(redux@5.0.1)
react-resizable-panels:
specifier: ^3.0.3
version: 3.0.3(react-dom@18.3.1)(react@18.3.1)
specifier: ^2.1.8
version: 2.1.8(react-dom@18.3.1)(react@18.3.1)
react-textarea-autosize:
specifier: ^8.5.9
version: 8.5.9(@types/react@18.3.11)(react@18.3.1)
@@ -159,8 +153,8 @@ dependencies:
specifier: ^17.6.0
version: 17.6.0(react-dom@18.3.1)(react@18.3.1)
react-virtuoso:
specifier: ^4.13.0
version: 4.13.0(react-dom@18.3.1)(react@18.3.1)
specifier: ^4.12.6
version: 4.12.6(react-dom@18.3.1)(react@18.3.1)
redux-dynamic-middlewares:
specifier: ^2.2.0
version: 2.2.0
@@ -183,11 +177,11 @@ dependencies:
specifier: ^4.8.1
version: 4.8.1
stable-hash:
specifier: ^0.0.6
version: 0.0.6
specifier: ^0.0.5
version: 0.0.5
use-debounce:
specifier: ^10.0.5
version: 10.0.5(react@18.3.1)
specifier: ^10.0.4
version: 10.0.4(react@18.3.1)
use-device-pixel-ratio:
specifier: ^1.1.2
version: 1.1.2(react@18.3.1)
@@ -195,11 +189,11 @@ dependencies:
specifier: ^11.1.0
version: 11.1.0
zod:
specifier: ^3.25.67
version: 3.25.67
specifier: ^3.24.3
version: 3.24.3
zod-validation-error:
specifier: ^3.5.2
version: 3.5.2(zod@3.25.67)
specifier: ^3.4.0
version: 3.4.0(zod@3.24.3)
devDependencies:
'@invoke-ai/eslint-config-react':
@@ -232,6 +226,9 @@ devDependencies:
'@storybook/theming':
specifier: ^8.6.12
version: 8.6.12(storybook@8.6.12)
'@types/lodash-es':
specifier: ^4.17.12
version: 4.17.12
'@types/node':
specifier: ^22.15.1
version: 22.15.1
@@ -331,24 +328,24 @@ packages:
'@jridgewell/trace-mapping': 0.3.25
dev: true
/@atlaskit/pragmatic-drag-and-drop-auto-scroll@2.1.1:
resolution: {integrity: sha512-VAQEb3NVLY9Q5ZgC5Eiws9Uf6xOINY9/pAZMdbOVlF90uRXEkmpYqdTL+zeyZ8U8deuqYCmXr7oWIEnxpNQVzA==}
/@atlaskit/pragmatic-drag-and-drop-auto-scroll@2.1.0:
resolution: {integrity: sha512-E52y8/0BTTf4ai6BJyFYgdVHFgQ1AES33KvAVQpZ41jMkoukLIq6UoCudOXku7xs3qoPygQdpC+vitVUuEFJXw==}
dependencies:
'@atlaskit/pragmatic-drag-and-drop': 1.7.4
'@babel/runtime': 7.27.6
'@atlaskit/pragmatic-drag-and-drop': 1.5.3
'@babel/runtime': 7.27.0
dev: false
/@atlaskit/pragmatic-drag-and-drop-hitbox@1.1.0:
resolution: {integrity: sha512-JWt6eVp6Br2FPHRM8s0dUIHQk/jFInGP1f3ti5CdtM1Ji5/pt8Akm44wDC063Gv2i5RGseixtbW0z/t6RYtbdg==}
/@atlaskit/pragmatic-drag-and-drop-hitbox@1.0.3:
resolution: {integrity: sha512-/Sbu/HqN2VGLYBhnsG7SbRNg98XKkbF6L7XDdBi+izRybfaK1FeMfodPpm/xnBHPJzwYMdkE0qtLyv6afhgMUA==}
dependencies:
'@atlaskit/pragmatic-drag-and-drop': 1.7.4
'@babel/runtime': 7.27.6
'@atlaskit/pragmatic-drag-and-drop': 1.5.3
'@babel/runtime': 7.25.7
dev: false
/@atlaskit/pragmatic-drag-and-drop@1.7.4:
resolution: {integrity: sha512-lZHnO9BJdHPKnwB0uvVUCyDnIhL+WAHzXQ2EXX0qacogOsnvIUiCgY0BLKhBqTCWln3/f/Ox5jU54MKO6ayh9A==}
/@atlaskit/pragmatic-drag-and-drop@1.5.3:
resolution: {integrity: sha512-YFhSvBJjnPbYuZrCDaiTLwdJ+zxgS7uBaBObzB8Io77DFnGi3M4IV87Jwc1dwb6RS4BszslPOrX0axz0pODCsw==}
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.27.0
bind-event-listener: 3.0.0
raf-schd: 4.0.3
dev: false
@@ -536,11 +533,6 @@ packages:
dependencies:
regenerator-runtime: 0.14.1
/@babel/runtime@7.27.6:
resolution: {integrity: sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==}
engines: {node: '>=6.9.0'}
dev: false
/@babel/template@7.25.7:
resolution: {integrity: sha512-wRwtAgI3bAS+JGU2upWNL9lSlDcRCqD05BZ1n3X2ONLH1WilFP6O1otQjeMK/1g0pvYcXC7b/qVUB1keofjtZA==}
engines: {node: '>=6.9.0'}
@@ -1091,8 +1083,8 @@ packages:
react: 18.3.1
dev: false
/@dagrejs/dagre@1.1.5:
resolution: {integrity: sha512-Ghgrh08s12DCL5SeiR6AoyE80mQELTWhJBRmXfFoqDiFkR458vPEdgTbbjA0T+9ETNxUblnD0QW55tfdvi5pjQ==}
/@dagrejs/dagre@1.1.4:
resolution: {integrity: sha512-QUTc54Cg/wvmlEUxB+uvoPVKFazM1H18kVHBQNmK2NbrDR5ihOCR6CXLnDSZzMcSQKJtabPUWridBOlJM3WkDg==}
dependencies:
'@dagrejs/graphlib': 2.2.4
dev: false
@@ -1594,8 +1586,8 @@ packages:
resolution: {integrity: sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==}
dev: false
/@fontsource-variable/inter@5.2.6:
resolution: {integrity: sha512-jks/bficUPQ9nn7GvXvHtlQIPudW7Wx8CrlZoY8bhxgeobNxlQan8DclUJuYF2loYRrGpfrhCIZZspXYysiVGg==}
/@fontsource-variable/inter@5.2.5:
resolution: {integrity: sha512-TrWffUAFOnT8zroE9YmGybagoOgM/HjRqMQ8k9R0vVgXlnUh/vnpbGPAS/Caz1KIlOPnPGh6fvJbb7DHbFCncA==}
dev: false
/@humanwhocodes/config-array@0.13.0:
@@ -1654,7 +1646,7 @@ packages:
prettier: 3.5.3
dev: true
/@invoke-ai/ui-library@0.0.46(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.2.6)(@types/react@18.3.11)(i18next@25.2.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3):
/@invoke-ai/ui-library@0.0.46(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.2.5)(@types/react@18.3.11)(i18next@25.0.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3):
resolution: {integrity: sha512-3YBuWWhRbTUHi0RZKeyvDEvweoyZmeBdUGJIhemjdAgGx6l98rAMeCs8IQH+SYjSAIhiGRGf45fQ33PDK8Jkmw==}
peerDependencies:
'@fontsource-variable/inter': ^5.0.16
@@ -1670,7 +1662,7 @@ packages:
'@chakra-ui/theme-tools': 2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
'@emotion/react': 11.14.0(@types/react@18.3.11)(react@18.3.1)
'@emotion/styled': 11.14.0(@emotion/react@11.14.0)(@types/react@18.3.11)(react@18.3.1)
'@fontsource-variable/inter': 5.2.6
'@fontsource-variable/inter': 5.2.5
'@nanostores/react': 0.7.3(nanostores@0.11.4)(react@18.3.1)
chakra-react-select: 4.10.1(@chakra-ui/react@2.10.4)(@emotion/react@11.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
@@ -1680,7 +1672,7 @@ packages:
overlayscrollbars-react: 0.5.6(overlayscrollbars@2.10.0)(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-i18next: 15.5.3(i18next@25.2.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3)
react-i18next: 15.5.1(i18next@25.0.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3)
react-icons: 5.5.0(react@18.3.1)
react-select: 5.10.0(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1)
transitivePeerDependencies:
@@ -1858,10 +1850,6 @@ packages:
fastq: 1.17.1
dev: true
/@observ33r/object-equals@1.1.4:
resolution: {integrity: sha512-a46ys2Zvyyu1NPo8C8mF6FLztVxxaBtXpZwxlQutaaRtQFcD71yTMwyPY4DOuHsz//YEZjLkCw+mJoKDiG/CgA==}
dev: false
/@pkgjs/parseargs@0.11.0:
resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
engines: {node: '>=14'}
@@ -2173,8 +2161,8 @@ packages:
- supports-color
dev: true
/@reduxjs/toolkit@2.8.2(react-redux@9.2.0)(react@18.3.1):
resolution: {integrity: sha512-MYlOhQ0sLdw4ud48FoC5w0dH9VfWQjtCjreKwYTT3l+r427qYC5Y8PihNutepr8XrNaBUDQo9khWUwQxZaqt5A==}
/@reduxjs/toolkit@2.7.0(react-redux@9.2.0)(react@18.3.1):
resolution: {integrity: sha512-XVwolG6eTqwV0N8z/oDlN93ITCIGIop6leXlGJI/4EKy+0POYkR+ABHRSdGXY+0MQvJBP8yAzh+EYFxTuvmBiQ==}
peerDependencies:
react: ^16.9.0 || ^17.0.0 || ^18 || ^19
react-redux: ^7.2.1 || ^8.1.3 || ^9.0.0
@@ -2807,7 +2795,7 @@ packages:
peerDependencies:
storybook: ^8.6.12
dependencies:
es-toolkit: 1.39.5
es-toolkit: 1.36.0
estraverse: 5.3.0
prettier: 3.5.3
storybook: 8.6.12(prettier@3.5.3)
@@ -3098,10 +3086,16 @@ packages:
resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==}
dev: true
/@types/lodash-es@4.17.12:
resolution: {integrity: sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==}
dependencies:
'@types/lodash': 4.17.10
dev: true
/@types/lodash.mergewith@4.6.7:
resolution: {integrity: sha512-3m+lkO5CLRRYU0fhGRp7zbsGi6+BZj0uTVSwvcKU+nSlhjA9/QRNfuSGnD2mX6hQA7ZbmcCkzk5h4ZYGOtk14A==}
dependencies:
'@types/lodash': 4.17.18
'@types/lodash': 4.17.16
dev: false
/@types/lodash.mergewith@4.6.9:
@@ -3112,10 +3106,9 @@ packages:
/@types/lodash@4.17.10:
resolution: {integrity: sha512-YpS0zzoduEhuOWjAotS6A5AVCva7X4lVlYLF0FYHAY9sdraBfnatttHItlWeZdGhuEkf+OzMNg2ZYAx8t+52uQ==}
dev: false
/@types/lodash@4.17.18:
resolution: {integrity: sha512-KJ65INaxqxmU6EoCiJmRPZC9H9RVWCRd349tXM2M3O5NA7cY6YL7c0bHAHQ93NOfTObEQ004kd2QVHs/r0+m4g==}
/@types/lodash@4.17.16:
resolution: {integrity: sha512-HX7Em5NYQAXKW+1T+FiuG27NGwzJfCX3s1GjOa7ujxZa52kjJLOr4FUxT+giF6Tgxv1e+/czV/iTtBw27WTU9g==}
dev: false
/@types/mdx@2.0.13:
@@ -3591,32 +3584,30 @@ packages:
resolution: {integrity: sha512-N8tkAACJx2ww8vFMneJmaAgmjAG1tnVBZJRLRcx061tmsLRZHSEZSLuGWnwPtunsSLvSqXQ2wfp7Mgqg1I+2dQ==}
dev: false
/@xyflow/react@12.7.1(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-uvIPQIZdf8tt0mDWvhkEpg/7t5E/e/KE4RWjNczAEhEYA+uvLc+4A5kIPJqCjJJbVHfMiAojT5JOB5mB7/EgFw==}
/@xyflow/react@12.6.0(@types/react@18.3.11)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-YzsSK4SlpC6e9Ki1g6O9B1UH7xvz/bzWF+tJ+vWDD8Am5xJmFn0jYnCEuqvzvH8dRKb1NFBmyuqEGqWN39xXsA==}
peerDependencies:
react: '>=17'
react-dom: '>=17'
dependencies:
'@xyflow/system': 0.0.63
'@xyflow/system': 0.0.57
classcat: 5.0.5
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
zustand: 4.5.7(@types/react@18.3.11)(react@18.3.1)
zustand: 4.5.6(@types/react@18.3.11)(react@18.3.1)
transitivePeerDependencies:
- '@types/react'
- immer
dev: false
/@xyflow/system@0.0.63:
resolution: {integrity: sha512-lCZRh5o7RCPE7iNe3yKzV8UuS4hijVIWJ9nbQh9eowsRJOwgy5KlUnZ3Q43SOlRsZnOht8px5phpsjBHPRn+oQ==}
/@xyflow/system@0.0.57:
resolution: {integrity: sha512-1YpBo0WgmZLR5wQw9Jvk3Tu0gISi/oYc4uSimrDuAsA/G2rGleulLrKkM59uuT/QU5m6DYC2VdBDAzjSNMGuBA==}
dependencies:
'@types/d3-drag': 3.0.7
'@types/d3-interpolate': 3.0.4
'@types/d3-selection': 3.0.11
'@types/d3-transition': 3.0.9
'@types/d3-zoom': 3.0.8
d3-drag: 3.0.0
d3-interpolate: 3.0.1
d3-selection: 3.0.0
d3-zoom: 3.0.0
dev: false
@@ -3908,7 +3899,7 @@ packages:
resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==}
engines: {node: '>=10', npm: '>=6'}
dependencies:
'@babel/runtime': 7.27.0
'@babel/runtime': 7.25.7
cosmiconfig: 7.1.0
resolve: 1.22.8
dev: false
@@ -4501,19 +4492,6 @@ packages:
resolution: {integrity: sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ==}
dev: false
/dockview-core@4.4.0:
resolution: {integrity: sha512-UsBJwS3lfZXM+gaTA+bJs8rAxLd7ZEmNcUf5CbKKhiPeKIPJrNCxXxTLcnQb3IXMJUGkE0aX1ZJ4BDaZGMtzlA==}
dev: false
/dockview@4.4.0(react@18.3.1):
resolution: {integrity: sha512-cWi5R40R5kDky69vAqsKGznRx5tA0gk3Mdqe5aS2r4ollK951mWNJ/EeMmac+UP/juw4cbl0/APhXTV+EMnAbg==}
peerDependencies:
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
dependencies:
dockview-core: 4.4.0
react: 18.3.1
dev: false
/doctrine@2.1.0:
resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==}
engines: {node: '>=0.10.0'}
@@ -4539,7 +4517,7 @@ packages:
/dom-helpers@5.2.1:
resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==}
dependencies:
'@babel/runtime': 7.27.0
'@babel/runtime': 7.25.7
csstype: 3.1.3
dev: false
@@ -4762,8 +4740,9 @@ packages:
is-symbol: 1.0.4
dev: true
/es-toolkit@1.39.5:
resolution: {integrity: sha512-z9V0qU4lx1TBXDNFWfAASWk6RNU6c6+TJBKE+FLIg8u0XJ6Yw58Hi0yX8ftEouj6p1QARRlXLFfHbIli93BdQQ==}
/es-toolkit@1.36.0:
resolution: {integrity: sha512-5lpkRpDELuTSeAL//Rcg5urg+K/yOD1BobJSiNeCc89snMqgrhckmj8jdljqraDbpREiXTNW311RN518eVHBng==}
dev: true
/esbuild-register@3.6.0(esbuild@0.25.3):
resolution: {integrity: sha512-H2/S7Pm8a9CL1uhp9OvjwrBh5Pvx0H8qVOxNu8Wed9Y7qv56MPtq+GGM8RJpq6glYJn9Wspr8uw7l55uyinNeg==}
@@ -5301,8 +5280,8 @@ packages:
signal-exit: 4.1.0
dev: true
/fracturedjsonjs@4.1.0:
resolution: {integrity: sha512-qy6LPA8OOiiyRHt5/sNKDayD7h5r3uHmHxSOLbBsgtU/hkt5vOVWOR51MdfDbeCNfj7k/dKCRbXYm8FBAJcgWQ==}
/fracturedjsonjs@4.0.2:
resolution: {integrity: sha512-+vGJH9wK0EEhbbn50V2sOebLRaar1VL3EXr02kxchIwpkhQk0ItrPjIOtYPYuU9hNFpVzxjrPgzjtMJih+ae4A==}
dev: false
/framer-motion@10.18.0(react-dom@18.3.1)(react@18.3.1):
@@ -5631,20 +5610,20 @@ packages:
- encoding
dev: false
/i18next@25.2.1(typescript@5.8.3):
resolution: {integrity: sha512-+UoXK5wh+VlE1Zy5p6MjcvctHXAhRwQKCxiJD8noKZzIXmnAX8gdHX5fLPA3MEVxEN4vbZkQFy8N0LyD9tUqPw==}
/i18next@25.0.1(typescript@5.8.3):
resolution: {integrity: sha512-8S8PyZbrymJZn3DaN70/34JYWNhsqrU6yA4MuzcygJBv+41dgNMocEA8h+kV1P7MCc1ll03lOTOIXE7mpNCicw==}
peerDependencies:
typescript: ^5
peerDependenciesMeta:
typescript:
optional: true
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.27.0
typescript: 5.8.3
dev: false
/idb-keyval@6.2.2:
resolution: {integrity: sha512-yjD9nARJ/jb1g+CvD0tlhUHOrJ9Sy0P8T9MF3YaLlHnSRpwPfpTX0XIvpmw3gAJUmEu3FiICLBDPXVwyEvrleg==}
/idb-keyval@6.2.1:
resolution: {integrity: sha512-8Sb3veuYCyrZL+VBt9LJfZjLUPWVvqn8tG28VqYNFCo43KHcKuq+b4EiXGeuaLAQWL2YmyDgMp2aSpH9JHsEQg==}
dev: false
/ieee754@1.2.1:
@@ -6151,8 +6130,8 @@ packages:
smol-toml: 1.3.4
strip-json-comments: 5.0.1
typescript: 5.8.3
zod: 3.25.67
zod-validation-error: 3.5.2(zod@3.25.67)
zod: 3.24.3
zod-validation-error: 3.4.0(zod@3.24.3)
dev: true
/kolorist@1.8.0:
@@ -6175,18 +6154,18 @@ packages:
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
dev: false
/linkify-react@4.3.1(linkifyjs@4.3.1)(react@18.3.1):
resolution: {integrity: sha512-w8ahBdCwF9C/doS4V3nE93QF1oyORmosvi8UEUbpHYws077eGzhkxUzJQcE2/SU5Q2K7SD80M4ybwwZGHErx5Q==}
/linkify-react@4.2.0(linkifyjs@4.2.0)(react@18.3.1):
resolution: {integrity: sha512-dIcDGo+n4FP2FPIHDcqB7cUE+omkcEgQJpc7sNNP4+XZ9FUhFAkKjGnHMzsZM+B4yF93sK166z9K5cKTe/JpzA==}
peerDependencies:
linkifyjs: ^4.0.0
react: '>= 15.0.0'
dependencies:
linkifyjs: 4.3.1
linkifyjs: 4.2.0
react: 18.3.1
dev: false
/linkifyjs@4.3.1:
resolution: {integrity: sha512-DRSlB9DKVW04c4SUdGvKK5FR6be45lTU9M76JnngqPeeGDqPwYc0zdUErtsNVMtxPXgUWV4HbXbnC4sNyBxkYg==}
/linkifyjs@4.2.0:
resolution: {integrity: sha512-pCj3PrQyATaoTYKHrgWRF3SJwsm61udVh+vuls/Rl6SptiDhgE7ziUIudAedRY9QEfynmM7/RmLEfPUyw1HPCw==}
dev: false
/liqe@3.8.0:
@@ -6630,13 +6609,13 @@ packages:
react: 18.3.1
dev: false
/overlayscrollbars-react@0.5.6(overlayscrollbars@2.11.4)(react@18.3.1):
/overlayscrollbars-react@0.5.6(overlayscrollbars@2.11.1)(react@18.3.1):
resolution: {integrity: sha512-E5To04bL5brn9GVCZ36SnfGanxa2I2MDkWoa4Cjo5wol7l+diAgi4DBc983V7l2nOk/OLJ6Feg4kySspQEGDBw==}
peerDependencies:
overlayscrollbars: ^2.0.0
react: '>=16.8.0'
dependencies:
overlayscrollbars: 2.11.4
overlayscrollbars: 2.11.1
react: 18.3.1
dev: false
@@ -6644,8 +6623,8 @@ packages:
resolution: {integrity: sha512-diNMeEafWTE0A4GJfwRpdBp2rE/BEvrhptBdBcDu8/UeytWcdCy9Td8tZWnztJeJ26f8/uHCWfPnPUC/dtgJdw==}
dev: false
/overlayscrollbars@2.11.4:
resolution: {integrity: sha512-GKYQo3OZ1QWnppNjQVv5hfpn+glYUxc6+ufW+ivdXUyLWFNc01XoH2Z36KGM4I8e5pXYeA3ElNItcXiLvmUhnQ==}
/overlayscrollbars@2.11.1:
resolution: {integrity: sha512-kogaNaBTIizRenQ2GTzt2cpkEH9B0nUBXseRxqQblH/YicJ3TaWuvn8E5TXPPfJCVoHYSgBYZzzva40kCERKHg==}
dev: false
/p-limit@3.1.0:
@@ -6851,8 +6830,8 @@ packages:
resolution: {integrity: sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A==}
dev: true
/query-string@9.2.1:
resolution: {integrity: sha512-3jTGGLRzlhu/1ws2zlr4Q+GVMLCQTLFOj8CMX5x44cdZG9FQE07x2mQhaNxaKVPNmIDu0mvJ/cEwtY7Pim7hqA==}
/query-string@9.1.1:
resolution: {integrity: sha512-MWkCOVIcJP9QSKU52Ngow6bsAWAPlPK2MludXvcrS2bGZSl+T1qX9MZvRIkqUIkGLJquMJHWfsT6eRqUpp4aWg==}
engines: {node: '>=18'}
dependencies:
decode-uri-component: 0.4.1
@@ -6982,8 +6961,8 @@ packages:
use-sidecar: 1.1.2(@types/react@18.3.11)(react@18.3.1)
dev: false
/react-hook-form@7.58.1(react@18.3.1):
resolution: {integrity: sha512-Lml/KZYEEFfPhUVgE0RdCVpnC4yhW+PndRhbiTtdvSlQTL8IfVR+iQkBjLIvmmc6+GGoVeM11z37ktKFPAb0FA==}
/react-hook-form@7.56.1(react@18.3.1):
resolution: {integrity: sha512-qWAVokhSpshhcEuQDSANHx3jiAEFzu2HAaaQIzi/r9FNPm1ioAvuJSD4EuZzWd7Al7nTRKcKPnBKO7sRn+zavQ==}
engines: {node: '>=18.0.0'}
peerDependencies:
react: ^16.8.0 || ^17 || ^18 || ^19
@@ -7001,8 +6980,8 @@ packages:
react-dom: 18.3.1(react@18.3.1)
dev: false
/react-i18next@15.5.3(i18next@25.2.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3):
resolution: {integrity: sha512-ypYmOKOnjqPEJZO4m1BI0kS8kWqkBNsKYyhVUfij0gvjy9xJNoG/VcGkxq5dRlVwzmrmY1BQMAmpbbUBLwC4Kw==}
/react-i18next@15.5.1(i18next@25.0.1)(react-dom@18.3.1)(react@18.3.1)(typescript@5.8.3):
resolution: {integrity: sha512-C8RZ7N7H0L+flitiX6ASjq9p5puVJU1Z8VyL3OgM/QOMRf40BMZX+5TkpxzZVcTmOLPX5zlti4InEX5pFyiVeA==}
peerDependencies:
i18next: '>= 23.2.3'
react: '>= 16.8.0'
@@ -7017,9 +6996,9 @@ packages:
typescript:
optional: true
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.27.0
html-parse-stringify: 3.0.1
i18next: 25.2.1(typescript@5.8.3)
i18next: 25.0.1(typescript@5.8.3)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
typescript: 5.8.3
@@ -7129,8 +7108,8 @@ packages:
use-sidecar: 1.1.3(@types/react@18.3.11)(react@18.3.1)
dev: false
/react-resizable-panels@3.0.3(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-7HA8THVBHTzhDK4ON0tvlGXyMAJN1zBeRpuyyremSikgYh2ku6ltD7tsGQOcXx4NKPrZtYCm/5CBr+dkruTGQw==}
/react-resizable-panels@2.1.8(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-oDvD0sw34Ecx00cQFLiRJpAE2fCgNLBr8DMrBzkrsaUiLpAycIQoY3eAWfMblDql3pTIMZ60wJ/P89RO1htM2w==}
peerDependencies:
react: ^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
react-dom: ^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
@@ -7278,8 +7257,8 @@ packages:
tslib: 2.8.1
dev: false
/react-virtuoso@4.13.0(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-XHv2Fglpx80yFPdjZkV9d1baACKghg/ucpDFEXwaix7z0AfVQj+mF6lM+YQR6UC/TwzXG2rJKydRMb3+7iV3PA==}
/react-virtuoso@4.12.6(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-bfvS6aCL1ehXmq39KRiz/vxznGUbtA27I5I24TYCe1DhMf84O3aVNCIwrSjYQjkJGJGzY46ihdN8WkYlemuhMQ==}
peerDependencies:
react: '>=16 || >=17 || >= 18 || >= 19'
react-dom: '>=16 || >=17 || >= 18 || >=19'
@@ -7775,8 +7754,8 @@ packages:
resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==}
dev: true
/stable-hash@0.0.6:
resolution: {integrity: sha512-0afH4mobqTybYZsXImQRLOjHV4gvOW+92HdUIax9t7a8d9v54KWykEuMVIcXhD9BCi+w3kS4x7O6fmZQ3JlG/g==}
/stable-hash@0.0.5:
resolution: {integrity: sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==}
dev: false
/stack-generator@2.0.10:
@@ -8329,8 +8308,8 @@ packages:
react: 18.3.1
dev: false
/use-debounce@10.0.5(react@18.3.1):
resolution: {integrity: sha512-Q76E3lnIV+4YT9AHcrHEHYmAd9LKwUAbPXDm7FlqVGDHiSOhX3RDjT8dm0AxbJup6WgOb1YEcKyCr11kBJR5KQ==}
/use-debounce@10.0.4(react@18.3.1):
resolution: {integrity: sha512-6Cf7Yr7Wk7Kdv77nnJMf6de4HuDE4dTxKij+RqE9rufDsI6zsbjyAxcH5y2ueJCQAnfgKbzXbZHYlkFwmBlWkw==}
engines: {node: '>= 16.0.0'}
peerDependencies:
react: '*'
@@ -8868,19 +8847,19 @@ packages:
engines: {node: '>=10'}
dev: true
/zod-validation-error@3.5.2(zod@3.25.67):
resolution: {integrity: sha512-mdi7YOLtram5dzJ5aDtm1AG9+mxRma1iaMrZdYIpFO7epdKBUwLHIxTF8CPDeCQ828zAXYtizrKlEJAtzgfgrw==}
/zod-validation-error@3.4.0(zod@3.24.3):
resolution: {integrity: sha512-ZOPR9SVY6Pb2qqO5XHt+MkkTRxGXb4EVtnjc9JpXUOtUB1T9Ru7mZOT361AN3MsetVe7R0a1KZshJDZdgp9miQ==}
engines: {node: '>=18.0.0'}
peerDependencies:
zod: ^3.25.0
zod: ^3.18.0
dependencies:
zod: 3.25.67
zod: 3.24.3
/zod@3.25.67:
resolution: {integrity: sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==}
/zod@3.24.3:
resolution: {integrity: sha512-HhY1oqzWCQWuUqvBFnsyrtZRhyPeR7SUGv+C4+MsisMuVfSPx8HpwWqH8tRahSlt6M3PiFAcoeFhZAqIXTxoSg==}
/zustand@4.5.7(@types/react@18.3.11)(react@18.3.1):
resolution: {integrity: sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==}
/zustand@4.5.6(@types/react@18.3.11)(react@18.3.1):
resolution: {integrity: sha512-ibr/n1hBzLLj5Y+yUcU7dYw8p6WnIVzdJbnX+1YpaScvZVF2ziugqHs+LAmHw4lWO9c/zRj+K1ncgWDQuthEdQ==}
engines: {node: '>=12.7.0'}
peerDependencies:
'@types/react': '>=16.8'

View File

@@ -2017,9 +2017,7 @@
"resetGenerationSettings": "Reset Generation Settings",
"replaceCurrent": "Replace Current",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image to get started.",
"uploadOrDragAnImage": "Drag an image from the gallery or <UploadButton>upload an image</UploadButton>.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
"imageNoise": "Image Noise",
"denoiseLimit": "Denoise Limit",
"warnings": {

View File

@@ -3,7 +3,7 @@ import { useStore } from '@nanostores/react';
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import { $globalIsLoading } from 'app/store/nanostores/globalIsLoading';
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import { useClearStorage } from 'common/hooks/useClearStorage';
@@ -20,7 +20,7 @@ interface Props {
}
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
const globalIsLoading = useStore($globalIsLoading);
const didStudioInit = useStore($didStudioInit);
const clearStorage = useClearStorage();
const handleReset = useCallback(() => {
@@ -33,7 +33,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
<AppContent />
{globalIsLoading && <Loading />}
{!didStudioInit && <Loading />}
</Box>
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
<GlobalModalIsolator />

View File

@@ -8,9 +8,7 @@ import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/ap
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import type { PartialAppConfig } from 'app/types/invokeai';
import { useFocusRegionWatcher } from 'common/hooks/focus';
import { useCloseChakraTooltipsOnDragFix } from 'common/hooks/useCloseChakraTooltipsOnDragFix';
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
import { size } from 'es-toolkit/compat';
import { useDynamicPromptsWatcher } from 'features/dynamicPrompts/hooks/useDynamicPromptsWatcher';
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
@@ -18,13 +16,11 @@ import { useReadinessWatcher } from 'features/queue/store/readiness';
import { configChanged } from 'features/system/store/configSlice';
import { selectLanguage } from 'features/system/store/systemSelectors';
import i18n from 'i18n';
import { size } from 'lodash-es';
import { memo, useEffect } from 'react';
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
import { useGetQueueCountsByDestinationQuery } from 'services/api/endpoints/queue';
import { useSocketIO } from 'services/events/useSocketIO';
const queueCountArg = { destination: 'canvas' };
/**
* GlobalHookIsolator is a logical component that runs global hooks in an isolated component, so that they do not
* cause needless re-renders of any other components.
@@ -42,11 +38,6 @@ export const GlobalHookIsolator = memo(
useGlobalHotkeys();
useGetOpenAPISchemaQuery();
useSyncLoggingConfig();
useCloseChakraTooltipsOnDragFix();
// Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
// and/or in progress canvas sessions.
useGetQueueCountsByDestinationQuery(queueCountArg);
useEffect(() => {
i18n.changeLanguage(language);

View File

@@ -7,13 +7,11 @@ import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo } from 'react';
import { useImageDTO } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
export const GlobalImageHotkeys = memo(() => {
useAssertSingleton('GlobalImageHotkeys');
const imageName = useAppSelector(selectLastSelectedImage);
const imageDTO = useImageDTO(imageName);
const imageDTO = useAppSelector(selectLastSelectedImage);
if (!imageDTO) {
return null;

View File

@@ -6,7 +6,7 @@ import {
NewGallerySessionDialog,
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
@@ -15,7 +15,6 @@ import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow
import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal';
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
import { DeleteAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/DeleteAllExceptCurrentQueueItemConfirmationAlertDialog';
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
@@ -40,7 +39,6 @@ export const GlobalModalIsolator = memo(() => {
<StylePresetModal />
<WorkflowLibraryModal />
<CancelAllExceptCurrentQueueItemConfirmationAlertDialog />
<DeleteAllExceptCurrentQueueItemConfirmationAlertDialog />
<ClearQueueConfirmationsAlertDialog />
<NewWorkflowConfirmationAlertDialog />
<LoadWorkflowConfirmationAlertDialog />

View File

@@ -1,7 +1,6 @@
import '@fontsource-variable/inter';
import 'overlayscrollbars/overlayscrollbars.css';
import '@xyflow/react/dist/base.css';
import 'common/components/OverlayScrollbars/overlayscrollbars.css';
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
import type { ReactNode } from 'react';

View File

@@ -3,10 +3,11 @@ import { useAppStore } from 'app/store/storeHooks';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { withResultAsync } from 'common/util/result';
import { canvasReset } from 'features/controlLayers/store/actions';
import { settingsSendToCanvasChanged } from 'features/controlLayers/store/canvasSettingsSlice';
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { sentImageToCanvas } from 'features/gallery/store/actions';
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
@@ -92,7 +93,10 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
};
store.dispatch(canvasReset());
store.dispatch(rasterLayerAdded({ overrides, isSelected: true }));
store.dispatch(settingsSendToCanvasChanged(true));
store.dispatch(setActiveTab('canvas'));
store.dispatch(sentImageToCanvas());
$imageViewer.set(false);
toast({
title: t('toast.sentToCanvas'),
status: 'info',
@@ -114,9 +118,9 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
return;
}
const metadata = getImageMetadataResult.value;
store.dispatch(canvasReset());
// This shows a toast
await parseAndRecallAllMetadata(metadata, true);
store.dispatch(setActiveTab('canvas'));
},
[store, t]
);
@@ -160,12 +164,16 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
switch (destination) {
case 'generation':
// Go to the canvas tab, open the image viewer, and enable send-to-gallery mode
store.dispatch(paramsReset());
store.dispatch(setActiveTab('canvas'));
store.dispatch(activeTabCanvasRightPanelChanged('gallery'));
store.dispatch(settingsSendToCanvasChanged(false));
$imageViewer.set(true);
break;
case 'canvas':
// Go to the canvas tab, close the image viewer, and disable send-to-gallery mode
store.dispatch(canvasReset());
store.dispatch(setActiveTab('canvas'));
store.dispatch(settingsSendToCanvasChanged(true));
$imageViewer.set(false);
break;
case 'workflows':
// Go to the workflows tab

View File

@@ -2,7 +2,7 @@ import { createLogWriter } from '@roarr/browser-log-writer';
import { atom } from 'nanostores';
import type { Logger, MessageSerializer } from 'roarr';
import { ROARR, Roarr } from 'roarr';
import { z } from 'zod/v4';
import { z } from 'zod';
const serializeMessage: MessageSerializer = (message) => {
return JSON.stringify(message);

View File

@@ -1,13 +1,13 @@
import { objectEquals } from '@observ33r/object-equals';
import { createDraftSafeSelectorCreator, createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
import { isEqual } from 'lodash-es';
/**
* A memoized selector creator that uses LRU cache and @observ33r/object-equals's objectEquals for equality check.
* A memoized selector creator that uses LRU cache and lodash's isEqual for equality check.
*/
export const createMemoizedSelector = createSelectorCreator({
memoize: lruMemoize,
memoizeOptions: {
resultEqualityCheck: objectEquals,
resultEqualityCheck: isEqual,
},
argsMemoize: lruMemoize,
});

View File

@@ -8,13 +8,10 @@ import { diff } from 'jsondiffpatch';
* Super simple logger middleware. Useful for debugging when the redux devtools are awkward.
*/
export const getDebugLoggerMiddleware =
(options?: { filter?: (action: unknown) => boolean; withDiff?: boolean; withNextState?: boolean }): Middleware =>
(options?: { withDiff?: boolean; withNextState?: boolean }): Middleware =>
(api: MiddlewareAPI) =>
(next) =>
(action) => {
if (options?.filter?.(action)) {
return next(action);
}
const originalState = api.getState();
console.log('REDUX: dispatching', action);
const result = next(action);

View File

@@ -1,6 +1,7 @@
import type { TypedStartListening } from '@reduxjs/toolkit';
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
import { addStagingListeners } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
@@ -9,9 +10,15 @@ import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/l
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
import { addGalleryOffsetChangedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryOffsetChanged';
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
import { addImageDeletionListeners } from 'app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners';
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
import { addImagesStarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesStarred';
import { addImagesUnstarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesUnstarred';
import { addImageToDeleteSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected';
import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded';
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
@@ -40,7 +47,17 @@ export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
addImageUploadedFulfilledListener(startAppListening);
// Image deleted
addImageDeletionListeners(startAppListening);
addDeleteBoardAndImagesFulfilledListener(startAppListening);
addImageToDeleteSelectedListener(startAppListening);
// Image starred
addImagesStarredListener(startAppListening);
addImagesUnstarredListener(startAppListening);
// Gallery
addGalleryImageClickedListener(startAppListening);
addGalleryOffsetChangedListener(startAppListening);
// User Invoked
addEnqueueRequestedLinear(startAppListening);
@@ -48,6 +65,9 @@ addEnqueueRequestedUpscale(startAppListening);
addAnyEnqueuedListener(startAppListening);
addBatchEnqueuedListener(startAppListening);
// Canvas actions
addStagingListeners(startAppListening);
// Socket.IO
addSocketConnectedEventListener(startAppListening);

View File

@@ -25,7 +25,7 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis
matcher: matchAnyBoardDeleted,
effect: (action, { dispatch, getState }) => {
const state = getState();
const deletedBoardId = action.meta.arg.originalArgs.board_id;
const deletedBoardId = action.meta.arg.originalArgs;
const { autoAddBoardId, selectedBoardId } = state.gallery;
// If the deleted board was currently selected, we should reset the selected board to uncategorized

View File

@@ -0,0 +1,46 @@
import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasReset, newSessionRequested } from 'features/controlLayers/store/actions';
import { stagingAreaReset } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { queueApi } from 'services/api/endpoints/queue';
const log = logger('canvas');
const matchCanvasOrStagingAreaReset = isAnyOf(stagingAreaReset, canvasReset, newSessionRequested);
export const addStagingListeners = (startAppListening: AppStartListening) => {
startAppListening({
matcher: matchCanvasOrStagingAreaReset,
effect: async (_, { dispatch }) => {
try {
const req = dispatch(
queueApi.endpoints.cancelByBatchDestination.initiate(
{ destination: 'canvas' },
{ fixedCacheKey: 'cancelByBatchOrigin' }
)
);
const { canceled } = await req.unwrap();
req.reset();
if (canceled > 0) {
log.debug(`Canceled ${canceled} canvas batches`);
toast({
id: 'CANCEL_BATCH_SUCCEEDED',
title: t('queue.cancelBatchSucceeded'),
status: 'success',
});
}
} catch {
log.error('Failed to cancel canvas batches');
toast({
id: 'CANCEL_BATCH_FAILED',
title: t('queue.cancelBatchFailed'),
status: 'error',
});
}
},
});
};

View File

@@ -1,29 +1,15 @@
import { createAction } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
import { imageSelected } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
export const appStarted = createAction('app/appStarted');
export const addAppStartedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: appStarted,
effect: async (action, { unsubscribe, cancelActiveListeners, take, getState, dispatch }) => {
effect: (action, { unsubscribe, cancelActiveListeners }) => {
// this should only run once
cancelActiveListeners();
unsubscribe();
// ensure an image is selected when we load the first board
const firstImageLoad = await take(imagesApi.endpoints.getImageNames.matchFulfilled);
if (firstImageLoad !== null) {
const [{ payload }] = firstImageLoad;
const selectedImage = selectLastSelectedImage(getState());
if (selectedImage) {
return;
}
dispatch(imageSelected(payload.image_names.at(0) ?? null));
}
},
});
};

View File

@@ -1,9 +1,9 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { truncate } from 'es-toolkit/compat';
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { truncate } from 'lodash-es';
import { serializeError } from 'serialize-error';
import { queueApi } from 'services/api/endpoints/queue';
import type { JsonObject } from 'type-fest';

View File

@@ -1,7 +1,6 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getImageUsage } from 'features/deleteImageModal/store/state';
import { getImageUsage } from 'features/deleteImageModal/store/selectors';
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
@@ -21,10 +20,9 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
const nodes = selectNodesSlice(state);
const canvas = selectCanvasSlice(state);
const upscale = selectUpscaleSlice(state);
const refImages = selectRefImagesSlice(state);
deleted_images.forEach((image_name) => {
const imageUsage = getImageUsage(nodes, canvas, upscale, refImages, image_name);
const imageUsage = getImageUsage(nodes, canvas, upscale, image_name);
if (imageUsage.isNodesImage && !wasNodeEditorReset) {
dispatch(nodeEditorReset());

View File

@@ -1,6 +1,6 @@
import { isAnyOf } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectListImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
@@ -11,35 +11,36 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening)
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
cancelActiveListeners();
if (boardIdSelected.match(action) && action.payload.selectedImageName) {
// This action already has a selected image name, we trust it is valid
return;
}
const state = getState();
const board_id = selectSelectedBoardId(state);
const queryArgs = { ...selectListImageNamesQueryArgs(state), board_id };
const queryArgs = selectListImagesQueryArgs(state);
// wait until the board has some images - maybe it already has some from a previous fetch
// must use getState() to ensure we do not have stale state
const isSuccess = await condition(
() => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
() => imagesApi.endpoints.listImages.select(queryArgs)(getState()).isSuccess,
5000
);
if (!isSuccess) {
if (isSuccess) {
// the board was just changed - we can select the first image
const { data: boardImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(getState());
if (boardImagesData && boardIdSelected.match(action) && action.payload.selectedImageName) {
const selectedImage = boardImagesData.items.find(
(item) => item.image_name === action.payload.selectedImageName
);
dispatch(imageSelected(selectedImage || null));
} else if (boardImagesData) {
dispatch(imageSelected(boardImagesData.items[0] || null));
} else {
// board has no images - deselect
dispatch(imageSelected(null));
}
} else {
// fallback - deselect
dispatch(imageSelected(null));
return;
}
// the board was just changed - we can select the first image
const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
const imageToSelect = imageNames?.at(0) ?? null;
dispatch(imageSelected(imageToSelect));
},
});
};

View File

@@ -5,12 +5,6 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
import { extractMessageFromAssertionError } from 'common/util/extractMessageFromAssertionError';
import { withResult, withResultAsync } from 'common/util/result';
import { parseify } from 'common/util/serialize';
import {
canvasSessionIdCreated,
generateSessionIdCreated,
selectCanvasSessionId,
selectGenerateSessionId,
} from 'features/controlLayers/store/canvasStagingAreaSlice';
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildChatGPT4oGraph } from 'features/nodes/util/graph/generation/buildChatGPT4oGraph';
@@ -24,7 +18,6 @@ import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Grap
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
import { toast } from 'features/toast/toast';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { serializeError } from 'serialize-error';
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
import { assert, AssertionError } from 'tsafe';
@@ -38,34 +31,11 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
actionCreator: enqueueRequestedCanvas,
effect: async (action, { getState, dispatch }) => {
log.debug('Enqueue requested');
const tab = selectActiveTab(getState());
let sessionId = null;
if (tab === 'generate') {
sessionId = selectGenerateSessionId(getState());
if (!sessionId) {
dispatch(generateSessionIdCreated());
sessionId = selectGenerateSessionId(getState());
}
} else if (tab === 'canvas') {
sessionId = selectCanvasSessionId(getState());
if (!sessionId) {
dispatch(canvasSessionIdCreated());
sessionId = selectCanvasSessionId(getState());
}
} else {
log.warn(`Enqueue requested in unsupported tab ${tab}`);
return;
}
const state = getState();
const destination = sessionId;
assert(destination !== null);
const { prepend } = action.payload;
const manager = $canvasManager.get();
// assert(manager, 'No canvas manager');
assert(manager, 'No canvas manager');
const model = state.params.model;
assert(model, 'No model found in state');
@@ -120,6 +90,8 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
const { g, seedFieldIdentifier, positivePromptFieldIdentifier } = buildGraphResult.value;
const destination = state.canvasSettings.sendToCanvas ? 'canvas' : 'gallery';
const prepareBatchResult = withResult(() =>
prepareLinearUIBatch({
state,
@@ -127,7 +99,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
prepend,
seedFieldIdentifier,
positivePromptFieldIdentifier,
origin: tab,
origin: 'canvas',
destination,
})
);

View File

@@ -0,0 +1,73 @@
import { createAction } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
export const galleryImageClicked = createAction<{
imageDTO: ImageDTO;
shiftKey: boolean;
ctrlKey: boolean;
metaKey: boolean;
altKey: boolean;
}>('gallery/imageClicked');
/**
* This listener handles the logic for selecting images in the gallery.
*
* Previously, this logic was in a `useCallback` with the whole gallery selection as a dependency. Every time
* the selection changed, the callback got recreated and all images rerendered. This could easily block for
* hundreds of ms, more for lower end devices.
*
* Moving this logic into a listener means we don't need to recalculate anything dynamically and the gallery
* is much more responsive.
*/
export const addGalleryImageClickedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: galleryImageClicked,
effect: (action, { dispatch, getState }) => {
const { imageDTO, shiftKey, ctrlKey, metaKey, altKey } = action.payload;
const state = getState();
const queryArgs = selectListImagesQueryArgs(state);
const queryResult = imagesApi.endpoints.listImages.select(queryArgs)(state);
if (!queryResult.data) {
// Should never happen if we have clicked a gallery image
return;
}
const imageDTOs = queryResult.data.items;
const selection = state.gallery.selection;
if (altKey) {
if (state.gallery.imageToCompare?.image_name === imageDTO.image_name) {
dispatch(imageToCompareChanged(null));
} else {
dispatch(imageToCompareChanged(imageDTO));
}
} else if (shiftKey) {
const rangeEndImageName = imageDTO.image_name;
const lastSelectedImage = selection[selection.length - 1]?.image_name;
const lastClickedIndex = imageDTOs.findIndex((n) => n.image_name === lastSelectedImage);
const currentClickedIndex = imageDTOs.findIndex((n) => n.image_name === rangeEndImageName);
if (lastClickedIndex > -1 && currentClickedIndex > -1) {
// We have a valid range!
const start = Math.min(lastClickedIndex, currentClickedIndex);
const end = Math.max(lastClickedIndex, currentClickedIndex);
const imagesToSelect = imageDTOs.slice(start, end + 1);
dispatch(selectionChanged(selection.concat(imagesToSelect)));
}
} else if (ctrlKey || metaKey) {
if (selection.some((i) => i.image_name === imageDTO.image_name) && selection.length > 1) {
dispatch(selectionChanged(selection.filter((n) => n.image_name !== imageDTO.image_name)));
} else {
dispatch(selectionChanged(selection.concat(imageDTO)));
}
} else {
dispatch(selectionChanged([imageDTO]));
}
},
});
};

View File

@@ -0,0 +1,119 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
import { imageToCompareChanged, offsetChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
export const addGalleryOffsetChangedListener = (startAppListening: AppStartListening) => {
/**
* When the user changes pages in the gallery, we need to wait until the next page of images is loaded, then maybe
* update the selection.
*
* There are a three scenarios:
*
* 1. The page is changed by clicking the pagination buttons. No changes to selection are needed.
*
* 2. The page is changed by using the arrow keys (without alt).
* - When going backwards, select the last image.
* - When going forwards, select the first image.
*
* 3. The page is changed by using the arrows keys with alt. This means the user is changing the comparison image.
* - When going backwards, select the last image _as the comparison image_.
* - When going forwards, select the first image _as the comparison image_.
*/
startAppListening({
actionCreator: offsetChanged,
effect: async (action, { dispatch, getState, getOriginalState, take, cancelActiveListeners }) => {
// Cancel any active listeners to prevent the selection from changing without user input
cancelActiveListeners();
const { withHotkey } = action.payload;
if (!withHotkey) {
// User changed pages by clicking the pagination buttons - no changes to selection
return;
}
const originalState = getOriginalState();
const prevOffset = originalState.gallery.offset;
const offset = getState().gallery.offset;
if (offset === prevOffset) {
// The page didn't change - bail
return;
}
/**
* We need to wait until the next page of images is loaded before updating the selection, so we use the correct
* page of images.
*
* The simplest way to do it would be to use `take` to wait for the next fulfilled action, but RTK-Q doesn't
* dispatch an action on cache hits. This means the `take` will only return if the cache is empty. If the user
* changes to a cached page - a common situation - the `take` will never resolve.
*
* So we need to take a two-step approach. First, check if we have data in the cache for the page of images. If
* we have data cached, use it to update the selection. If we don't have data cached, wait for the next fulfilled
* action, which updates the cache, then use the cache to update the selection.
*/
// Check if we have data in the cache for the page of images
const queryArgs = selectListImagesQueryArgs(getState());
let { data } = imagesApi.endpoints.listImages.select(queryArgs)(getState());
// No data yet - wait for the network request to complete
if (!data) {
const takeResult = await take(imagesApi.endpoints.listImages.matchFulfilled, 5000);
if (!takeResult) {
// The request didn't complete in time - bail
return;
}
data = takeResult[0].payload;
}
// We awaited a network request - state could have changed, get fresh state
const state = getState();
const { selection, imageToCompare } = state.gallery;
const imageDTOs = data?.items;
if (!imageDTOs) {
// The page didn't load - bail
return;
}
if (withHotkey === 'arrow') {
// User changed pages by using the arrow keys - selection changes to first or last image depending
if (offset < prevOffset) {
// We've gone backwards
const lastImage = imageDTOs[imageDTOs.length - 1];
if (!selection.some((selectedImage) => selectedImage.image_name === lastImage?.image_name)) {
dispatch(selectionChanged(lastImage ? [lastImage] : []));
}
} else {
// We've gone forwards
const firstImage = imageDTOs[0];
if (!selection.some((selectedImage) => selectedImage.image_name === firstImage?.image_name)) {
dispatch(selectionChanged(firstImage ? [firstImage] : []));
}
}
return;
}
if (withHotkey === 'alt+arrow') {
// User changed pages by using the arrow keys with alt - comparison image changes to first or last depending
if (offset < prevOffset) {
// We've gone backwards
const lastImage = imageDTOs[imageDTOs.length - 1];
if (lastImage && imageToCompare?.image_name !== lastImage.image_name) {
dispatch(imageToCompareChanged(lastImage));
}
} else {
// We've gone forwards
const firstImage = imageDTOs[0];
if (firstImage && imageToCompare?.image_name !== firstImage.image_name) {
dispatch(imageToCompareChanged(firstImage));
}
}
return;
}
},
});
};

View File

@@ -1,9 +1,9 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { parseify } from 'common/util/serialize';
import { size } from 'es-toolkit/compat';
import { $templates } from 'features/nodes/store/nodesSlice';
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
import { size } from 'lodash-es';
import { serializeError } from 'serialize-error';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import type { JsonObject } from 'type-fest';

View File

@@ -8,16 +8,16 @@ export const addImageAddedToBoardFulfilledListener = (startAppListening: AppStar
startAppListening({
matcher: imagesApi.endpoints.addImageToBoard.matchFulfilled,
effect: (action) => {
const { board_id, image_name } = action.meta.arg.originalArgs;
log.debug({ board_id, image_name }, 'Image added to board');
const { board_id, imageDTO } = action.meta.arg.originalArgs;
log.debug({ board_id, imageDTO }, 'Image added to board');
},
});
startAppListening({
matcher: imagesApi.endpoints.addImageToBoard.matchRejected,
effect: (action) => {
const { board_id, image_name } = action.meta.arg.originalArgs;
log.debug({ board_id, image_name }, 'Problem adding image to board');
const { board_id, imageDTO } = action.meta.arg.originalArgs;
log.debug({ board_id, imageDTO }, 'Problem adding image to board');
},
});
};

View File

@@ -0,0 +1,221 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch, RootState } from 'app/store/store';
import { entityDeleted, referenceImageIPAdapterImageChanged } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
import { imageSelected } from 'features/gallery/store/gallerySlice';
import { fieldImageCollectionValueChanged, fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
import { isImageFieldCollectionInputInstance, isImageFieldInputInstance } from 'features/nodes/types/field';
import { isInvocationNode } from 'features/nodes/types/invocation';
import { forEach, intersectionBy } from 'lodash-es';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import type { Param0 } from 'tsafe';
const log = logger('gallery');
//TODO(psyche): handle image deletion (canvas staging area?)
// Some utils to delete images from different parts of the app
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
const actions: Param0<typeof dispatch>[] = [];
state.nodes.present.nodes.forEach((node) => {
if (!isInvocationNode(node)) {
return;
}
forEach(node.data.inputs, (input) => {
if (isImageFieldInputInstance(input) && input.value?.image_name === imageDTO.image_name) {
actions.push(
fieldImageValueChanged({
nodeId: node.data.id,
fieldName: input.name,
value: undefined,
})
);
return;
}
if (isImageFieldCollectionInputInstance(input)) {
actions.push(
fieldImageCollectionValueChanged({
nodeId: node.data.id,
fieldName: input.name,
value: input.value?.filter((value) => value?.image_name !== imageDTO.image_name),
})
);
}
});
});
actions.forEach(dispatch);
};
const deleteControlLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).controlLayers.entities.forEach(({ id, objects }) => {
let shouldDelete = false;
for (const obj of objects) {
if (obj.type === 'image' && obj.image.image_name === imageDTO.image_name) {
shouldDelete = true;
break;
}
}
if (shouldDelete) {
dispatch(entityDeleted({ entityIdentifier: { id, type: 'control_layer' } }));
}
});
};
const deleteReferenceImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
if (entity.ipAdapter.image?.image_name === imageDTO.image_name) {
dispatch(referenceImageIPAdapterImageChanged({ entityIdentifier: getEntityIdentifier(entity), imageDTO: null }));
}
});
};
const deleteRasterLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).rasterLayers.entities.forEach(({ id, objects }) => {
let shouldDelete = false;
for (const obj of objects) {
if (obj.type === 'image' && obj.image.image_name === imageDTO.image_name) {
shouldDelete = true;
break;
}
}
if (shouldDelete) {
dispatch(entityDeleted({ entityIdentifier: { id, type: 'raster_layer' } }));
}
});
};
export const addImageDeletionListeners = (startAppListening: AppStartListening) => {
// Handle single image deletion
startAppListening({
actionCreator: imageDeletionConfirmed,
effect: async (action, { dispatch, getState }) => {
const { imageDTOs, imagesUsage } = action.payload;
if (imageDTOs.length !== 1 || imagesUsage.length !== 1) {
// handle multiples in separate listener
return;
}
const imageDTO = imageDTOs[0];
const imageUsage = imagesUsage[0];
if (!imageDTO || !imageUsage) {
// satisfy noUncheckedIndexedAccess
return;
}
try {
const state = getState();
await dispatch(imagesApi.endpoints.deleteImage.initiate(imageDTO)).unwrap();
if (state.gallery.selection.some((i) => i.image_name === imageDTO.image_name)) {
// The deleted image was a selected image, we need to select the next image
const newSelection = state.gallery.selection.filter((i) => i.image_name !== imageDTO.image_name);
if (newSelection.length > 0) {
return;
}
// Get the current list of images and select the same index
const baseQueryArgs = selectListImagesQueryArgs(state);
const data = imagesApi.endpoints.listImages.select(baseQueryArgs)(state).data;
if (data) {
const deletedImageIndex = data.items.findIndex((i) => i.image_name === imageDTO.image_name);
const nextImage = data.items[deletedImageIndex + 1] ?? data.items[0] ?? null;
if (nextImage?.image_name === imageDTO.image_name) {
// If the next image is the same as the deleted one, it means it was the last image, reset selection
dispatch(imageSelected(null));
} else {
dispatch(imageSelected(nextImage));
}
}
}
deleteNodesImages(state, dispatch, imageDTO);
deleteReferenceImages(state, dispatch, imageDTO);
deleteRasterLayerImages(state, dispatch, imageDTO);
deleteControlLayerImages(state, dispatch, imageDTO);
} catch {
// no-op
} finally {
dispatch(isModalOpenChanged(false));
}
},
});
// Handle multiple image deletion
startAppListening({
actionCreator: imageDeletionConfirmed,
effect: async (action, { dispatch, getState }) => {
const { imageDTOs, imagesUsage } = action.payload;
if (imageDTOs.length <= 1 || imagesUsage.length <= 1) {
// handle singles in separate listener
return;
}
try {
const state = getState();
await dispatch(imagesApi.endpoints.deleteImages.initiate({ imageDTOs })).unwrap();
if (intersectionBy(state.gallery.selection, imageDTOs, 'image_name').length > 0) {
// Some selected images were deleted, need to select the next image
const queryArgs = selectListImagesQueryArgs(state);
const { data } = imagesApi.endpoints.listImages.select(queryArgs)(state);
if (data) {
// When we delete multiple images, we clear the selection. Then, the the next time we load images, we will
// select the first one. This is handled below in the listener for `imagesApi.endpoints.listImages.matchFulfilled`.
dispatch(imageSelected(null));
}
}
// We need to reset the features where the image is in use - none of these work if their image(s) don't exist
imageDTOs.forEach((imageDTO) => {
deleteNodesImages(state, dispatch, imageDTO);
deleteControlLayerImages(state, dispatch, imageDTO);
deleteReferenceImages(state, dispatch, imageDTO);
deleteRasterLayerImages(state, dispatch, imageDTO);
});
} catch {
// no-op
} finally {
dispatch(isModalOpenChanged(false));
}
},
});
// When we list images, if no images is selected, select the first one.
startAppListening({
matcher: imagesApi.endpoints.listImages.matchFulfilled,
effect: (action, { dispatch, getState }) => {
const selection = getState().gallery.selection;
if (selection.length === 0) {
dispatch(imageSelected(action.payload.items[0] ?? null));
}
},
});
startAppListening({
matcher: imagesApi.endpoints.deleteImage.matchFulfilled,
effect: (action) => {
log.debug({ imageDTO: action.meta.arg.originalArgs }, 'Image deleted');
},
});
startAppListening({
matcher: imagesApi.endpoints.deleteImage.matchRejected,
effect: (action) => {
log.debug({ imageDTO: action.meta.arg.originalArgs }, 'Unable to delete image');
},
});
};

View File

@@ -0,0 +1,32 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
import { selectImageUsage } from 'features/deleteImageModal/store/selectors';
import { imagesToDeleteSelected, isModalOpenChanged } from 'features/deleteImageModal/store/slice';
export const addImageToDeleteSelectedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: imagesToDeleteSelected,
effect: (action, { dispatch, getState }) => {
const imageDTOs = action.payload;
const state = getState();
const { shouldConfirmOnDelete } = state.system;
const imagesUsage = selectImageUsage(getState());
const isImageInUse =
imagesUsage.some((i) => i.isRasterLayerImage) ||
imagesUsage.some((i) => i.isControlLayerImage) ||
imagesUsage.some((i) => i.isReferenceImage) ||
imagesUsage.some((i) => i.isInpaintMaskImage) ||
imagesUsage.some((i) => i.isUpscaleImage) ||
imagesUsage.some((i) => i.isNodesImage) ||
imagesUsage.some((i) => i.isRegionalGuidanceImage);
if (shouldConfirmOnDelete || isImageInUse) {
dispatch(isModalOpenChanged(true));
return;
}
dispatch(imageDeletionConfirmed({ imageDTOs, imagesUsage }));
},
});
};

View File

@@ -2,12 +2,12 @@ import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { RootState } from 'app/store/store';
import { omit } from 'es-toolkit/compat';
import { imageUploadedClientSide } from 'features/gallery/store/actions';
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { omit } from 'lodash-es';
import { boardsApi } from 'services/api/endpoints/boards';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';

View File

@@ -0,0 +1,30 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectionChanged } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
export const addImagesStarredListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.starImages.matchFulfilled,
effect: (action, { dispatch, getState }) => {
const { updated_image_names: starredImages } = action.payload;
const state = getState();
const { selection } = state.gallery;
const updatedSelection: ImageDTO[] = [];
selection.forEach((selectedImageDTO) => {
if (starredImages.includes(selectedImageDTO.image_name)) {
updatedSelection.push({
...selectedImageDTO,
starred: true,
});
} else {
updatedSelection.push(selectedImageDTO);
}
});
dispatch(selectionChanged(updatedSelection));
},
});
};

View File

@@ -0,0 +1,30 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectionChanged } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
export const addImagesUnstarredListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.unstarImages.matchFulfilled,
effect: (action, { dispatch, getState }) => {
const { updated_image_names: unstarredImages } = action.payload;
const state = getState();
const { selection } = state.gallery;
const updatedSelection: ImageDTO[] = [];
selection.forEach((selectedImageDTO) => {
if (unstarredImages.includes(selectedImageDTO.image_name)) {
updatedSelection.push({
...selectedImageDTO,
starred: false,
});
} else {
updatedSelection.push(selectedImageDTO);
}
});
dispatch(selectionChanged(updatedSelection));
},
});
};

View File

@@ -1,7 +1,11 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch, RootState } from 'app/store/store';
import { controlLayerModelChanged, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
import {
controlLayerModelChanged,
referenceImageIPAdapterModelChanged,
rgIPAdapterModelChanged,
} from 'features/controlLayers/store/canvasSlice';
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
import {
clipEmbedModelSelected,
@@ -11,9 +15,8 @@ import {
t5EncoderModelSelected,
vaeSelected,
} from 'features/controlLayers/store/paramsSlice';
import { refImageModelChanged, selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { modelSelected } from 'features/parameters/store/actions';
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
import {
@@ -207,12 +210,12 @@ const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log)
const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
const ipaModels = models.filter(isIPAdapterModelConfig);
selectRefImagesSlice(state).entities.forEach((entity) => {
if (!isIPAdapterConfig(entity.config)) {
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
if (entity.ipAdapter.type !== 'ip_adapter') {
return;
}
const selectedIPAdapterModel = entity.config.model;
const selectedIPAdapterModel = entity.ipAdapter.model;
// `null` is a valid IP adapter model - no need to do anything.
if (!selectedIPAdapterModel) {
return;
@@ -222,16 +225,16 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
return;
}
log.debug({ selectedIPAdapterModel }, 'Selected IP adapter model is not available, clearing');
dispatch(refImageModelChanged({ id: entity.id, modelConfig: null }));
dispatch(referenceImageIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
});
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, config }) => {
if (!isIPAdapterConfig(config)) {
entity.referenceImages.forEach(({ id: referenceImageId, ipAdapter }) => {
if (ipAdapter.type !== 'ip_adapter') {
return;
}
const selectedIPAdapterModel = config.model;
const selectedIPAdapterModel = ipAdapter.model;
// `null` is a valid IP adapter model - no need to do anything.
if (!selectedIPAdapterModel) {
return;
@@ -242,7 +245,7 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
}
log.debug({ selectedIPAdapterModel }, 'Selected IP adapter model is not available, clearing');
dispatch(
rgRefImageModelChanged({ entityIdentifier: getEntityIdentifier(entity), referenceImageId, modelConfig: null })
rgIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), referenceImageId, modelConfig: null })
);
});
});
@@ -251,11 +254,11 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
const fluxReduxModels = models.filter(isFluxReduxModelConfig);
selectRefImagesSlice(state).entities.forEach((entity) => {
if (!isFLUXReduxConfig(entity.config)) {
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
if (entity.ipAdapter.type !== 'flux_redux') {
return;
}
const selectedFLUXReduxModel = entity.config.model;
const selectedFLUXReduxModel = entity.ipAdapter.model;
// `null` is a valid FLUX Redux model - no need to do anything.
if (!selectedFLUXReduxModel) {
return;
@@ -265,16 +268,16 @@ const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
return;
}
log.debug({ selectedFLUXReduxModel }, 'Selected FLUX Redux model is not available, clearing');
dispatch(refImageModelChanged({ id: entity.id, modelConfig: null }));
dispatch(referenceImageIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
});
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, config }) => {
if (!isFLUXReduxConfig(config)) {
entity.referenceImages.forEach(({ id: referenceImageId, ipAdapter }) => {
if (ipAdapter.type !== 'flux_redux') {
return;
}
const selectedFLUXReduxModel = config.model;
const selectedFLUXReduxModel = ipAdapter.model;
// `null` is a valid FLUX Redux model - no need to do anything.
if (!selectedFLUXReduxModel) {
return;
@@ -285,7 +288,7 @@ const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
}
log.debug({ selectedFLUXReduxModel }, 'Selected FLUX Redux model is not available, clearing');
dispatch(
rgRefImageModelChanged({ entityIdentifier: getEntityIdentifier(entity), referenceImageId, modelConfig: null })
rgIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), referenceImageId, modelConfig: null })
);
});
});

View File

@@ -1,8 +1,8 @@
import { objectEquals } from '@observ33r/object-equals';
import { createAction } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { $baseUrl } from 'app/store/nanostores/baseUrl';
import { isEqual } from 'lodash-es';
import { atom } from 'nanostores';
import { api } from 'services/api';
import { modelsApi } from 'services/api/endpoints/models';
@@ -64,7 +64,7 @@ export const addSocketConnectedEventListener = (startAppListening: AppStartListe
const nextQueueStatusData = await queueStatusRequest.unwrap();
// If the queue hasn't changed, we don't need to do anything.
if (objectEquals(prevQueueStatusData?.queue, nextQueueStatusData.queue)) {
if (isEqual(prevQueueStatusData?.queue, nextQueueStatusData.queue)) {
return;
}

View File

@@ -1,13 +0,0 @@
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
import { atom, computed } from 'nanostores';
import { flushSync } from 'react-dom';
export const $isLayoutLoading = atom(false);
export const setIsLayoutLoading = (isLoading: boolean) => {
flushSync(() => {
$isLayoutLoading.set(isLoading);
});
};
export const $globalIsLoading = computed([$didStudioInit, $isLayoutLoading], (didStudioInit, isLayoutLoading) => {
return !didStudioInit || isLayoutLoading;
});

View File

@@ -4,17 +4,16 @@ import { logger } from 'app/logging/logger';
import { idbKeyValDriver } from 'app/store/enhancers/reduxRemember/driver';
import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
import { deepClone } from 'common/util/deepClone';
import { keys, mergeWith, omit, pick } from 'es-toolkit/compat';
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
import {
canvasSessionSlice,
canvasStagingAreaPersistConfig,
canvasStagingAreaSlice,
} from 'features/controlLayers/store/canvasStagingAreaSlice';
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
import { refImagesPersistConfig, refImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice';
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
import { hrfPersistConfig, hrfSlice } from 'features/hrf/store/hrfSlice';
@@ -29,6 +28,7 @@ import { configSlice } from 'features/system/store/configSlice';
import { systemPersistConfig, systemSlice } from 'features/system/store/systemSlice';
import { uiPersistConfig, uiSlice } from 'features/ui/store/uiSlice';
import { diff } from 'jsondiffpatch';
import { keys, mergeWith, omit, pick } from 'lodash-es';
import dynamicMiddlewares from 'redux-dynamic-middlewares';
import type { SerializeFunction, UnserializeFunction } from 'redux-remember';
import { rememberEnhancer, rememberReducer } from 'redux-remember';
@@ -54,6 +54,7 @@ const allReducers = {
[configSlice.name]: configSlice.reducer,
[uiSlice.name]: uiSlice.reducer,
[dynamicPromptsSlice.name]: dynamicPromptsSlice.reducer,
[deleteImageModalSlice.name]: deleteImageModalSlice.reducer,
[changeBoardModalSlice.name]: changeBoardModalSlice.reducer,
[modelManagerV2Slice.name]: modelManagerV2Slice.reducer,
[queueSlice.name]: queueSlice.reducer,
@@ -64,10 +65,9 @@ const allReducers = {
[stylePresetSlice.name]: stylePresetSlice.reducer,
[paramsSlice.name]: paramsSlice.reducer,
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
[canvasStagingAreaSlice.name]: canvasStagingAreaSlice.reducer,
[lorasSlice.name]: lorasSlice.reducer,
[workflowLibrarySlice.name]: workflowLibrarySlice.reducer,
[refImagesSlice.name]: refImagesSlice.reducer,
};
const rootReducer = combineReducers(allReducers);
@@ -113,7 +113,6 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
[canvasStagingAreaPersistConfig.name]: canvasStagingAreaPersistConfig,
[lorasPersistConfig.name]: lorasPersistConfig,
[workflowLibraryPersistConfig.name]: workflowLibraryPersistConfig,
[refImagesSlice.name]: refImagesPersistConfig,
};
const unserialize: UnserializeFunction = (data, key) => {
@@ -176,7 +175,6 @@ export const createStore = (uniqueStoreKey?: string, persist = true) =>
.concat(api.middleware)
.concat(dynamicMiddlewares)
.concat(authToastMiddleware)
// .concat(getDebugLoggerMiddleware())
.prepend(listenerMiddleware.middleware),
enhancers: (getDefaultEnhancers) => {
const _enhancers = getDefaultEnhancers().concat(autoBatchEnhancer());
@@ -211,4 +209,3 @@ export type RootState = ReturnType<AppStore['getState']>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type AppThunkDispatch = ThunkDispatch<RootState, any, UnknownAction>;
export type AppDispatch = ReturnType<typeof createStore>['dispatch'];
export type AppGetState = ReturnType<typeof createStore>['getState'];

View File

@@ -15,9 +15,9 @@ import {
} from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { merge, omit } from 'es-toolkit/compat';
import { selectSystemSlice, setShouldEnableInformationalPopovers } from 'features/system/store/systemSlice';
import { toast } from 'features/toast/toast';
import { merge, omit } from 'lodash-es';
import type { ReactElement } from 'react';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';

View File

@@ -17,7 +17,6 @@ const Loading = () => {
right={0}
bottom={0}
left={0}
zIndex={99999}
>
<Image src={InvokeLogoWhite} w="8rem" h="8rem" />
<Spinner

View File

@@ -11,14 +11,13 @@ import { memo, useEffect, useMemo, useState } from 'react';
type Props = PropsWithChildren & {
maxHeight?: ChakraProps['maxHeight'];
maxWidth?: ChakraProps['maxWidth'];
overflowX?: 'hidden' | 'scroll';
overflowY?: 'hidden' | 'scroll';
};
const styles: CSSProperties = { position: 'absolute', top: 0, left: 0, right: 0, bottom: 0 };
const ScrollableContent = ({ children, maxHeight, maxWidth, overflowX = 'hidden', overflowY = 'scroll' }: Props) => {
const ScrollableContent = ({ children, maxHeight, overflowX = 'hidden', overflowY = 'scroll' }: Props) => {
const overlayscrollbarsOptions = useMemo(
() => getOverlayScrollbarsParams({ overflowX, overflowY }).options,
[overflowX, overflowY]
@@ -45,7 +44,7 @@ const ScrollableContent = ({ children, maxHeight, maxWidth, overflowX = 'hidden'
}, [os]);
return (
<Flex w="full" h="full" maxHeight={maxHeight} maxWidth={maxWidth} position="relative">
<Flex w="full" h="full" maxHeight={maxHeight} position="relative">
<Box position="absolute" top={0} left={0} right={0} bottom={0}>
<OverlayScrollbarsComponent ref={osRef} style={styles} options={overlayscrollbarsOptions}>
{children}

View File

@@ -1,5 +1,5 @@
import { deepClone } from 'common/util/deepClone';
import { merge } from 'es-toolkit/compat';
import { merge } from 'lodash-es';
import { ClickScrollPlugin, OverlayScrollbars } from 'overlayscrollbars';
import type { UseOverlayScrollbarsParams } from 'overlayscrollbars-react';
import type { CSSProperties } from 'react';

View File

@@ -1,6 +1,6 @@
.os-scrollbar {
/* The size of the scrollbar */
--os-size: 8px;
--os-size: 9px;
/* The axis-perpedicular padding of the scrollbar (horizontal: padding-y, vertical: padding-x) */
/* --os-padding-perpendicular: 0; */
/* The axis padding of the scrollbar (horizontal: padding-x, vertical: padding-y) */
@@ -8,11 +8,11 @@
/* The border radius of the scrollbar track */
/* --os-track-border-radius: 0; */
/* The background of the scrollbar track */
--os-track-bg: rgba(0, 0, 0, 0.5);
/* --os-track-bg: rgba(0, 0, 0, 0.3); */
/* The :hover background of the scrollbar track */
--os-track-bg-hover: rgba(0, 0, 0, 0.5);
/* --os-track-bg-hover: rgba(0, 0, 0, 0.3); */
/* The :active background of the scrollbar track */
--os-track-bg-active: rgba(0, 0, 0, 0.6);
/* --os-track-bg-active: rgba(0, 0, 0, 0.3); */
/* The border of the scrollbar track */
/* --os-track-border: none; */
/* The :hover background of the scrollbar track */
@@ -22,11 +22,11 @@
/* The border radius of the scrollbar handle */
/* --os-handle-border-radius: 2px; */
/* The background of the scrollbar handle */
--os-handle-bg: var(--invoke-colors-base-400);
/* --os-handle-bg: var(--invokeai-colors-accentAlpha-500); */
/* The :hover background of the scrollbar handle */
--os-handle-bg-hover: var(--invoke-colors-base-300);
/* --os-handle-bg-hover: var(--invokeai-colors-accentAlpha-700); */
/* The :active background of the scrollbar handle */
--os-handle-bg-active: var(--invoke-colors-base-250);
/* --os-handle-bg-active: var(--invokeai-colors-accentAlpha-800); */
/* The border of the scrollbar handle */
/* --os-handle-border: none; */
/* The :hover border of the scrollbar handle */
@@ -34,23 +34,23 @@
/* The :active border of the scrollbar handle */
/* --os-handle-border-active: none; */
/* The min size of the scrollbar handle */
--os-handle-min-size: 32px;
--os-handle-min-size: 50px;
/* The max size of the scrollbar handle */
/* --os-handle-max-size: none; */
/* The axis-perpedicular size of the scrollbar handle (horizontal: height, vertical: width) */
/* --os-handle-perpendicular-size: 100%; */
/* The :hover axis-perpedicular size of the scrollbar handle (horizontal: height, vertical: width) */
--os-handle-perpendicular-size-hover: 100%;
/* --os-handle-perpendicular-size-hover: 100%; */
/* The :active axis-perpedicular size of the scrollbar handle (horizontal: height, vertical: width) */
/* --os-handle-perpendicular-size-active: 100%; */
/* Increases the interactive area of the scrollbar handle. */
--os-handle-interactive-area-offset: -1px;
/* --os-handle-interactive-area-offset: 0; */
}
.os-scrollbar-handle {
/* cursor: grab; */
cursor: grab;
}
.os-scrollbar-handle:active {
/* cursor: grabbing; */
cursor: grabbing;
}

View File

@@ -73,7 +73,7 @@ export const useBoolean = (initialValue: boolean): UseBoolean => {
};
};
export type UseDisclosure = {
type UseDisclosure = {
isOpen: boolean;
open: () => void;
close: () => void;

View File

@@ -1,29 +0,0 @@
import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine';
import { dropTargetForElements } from '@atlaskit/pragmatic-drag-and-drop/element/adapter';
import { dropTargetForExternal } from '@atlaskit/pragmatic-drag-and-drop/external/adapter';
import { useTimeoutCallback } from 'common/hooks/useTimeoutCallback';
import type { RefObject } from 'react';
import { useEffect } from 'react';
export const useCallbackOnDragEnter = (cb: () => void, ref: RefObject<HTMLElement>, delay = 300) => {
const [run, cancel] = useTimeoutCallback(cb, delay);
useEffect(() => {
const element = ref.current;
if (!element) {
return;
}
return combine(
dropTargetForElements({
element,
onDragEnter: run,
onDragLeave: cancel,
}),
dropTargetForExternal({
element,
onDragEnter: run,
onDragLeave: cancel,
})
);
}, [cancel, ref, run]);
};

View File

@@ -1,19 +0,0 @@
import { useEffect } from 'react';
// Chakra tooltips sometimes open during a drag operation. We can fix it by dispatching an event that chakra listens
// for to close tooltips. It's reaching into the internals but it seems to work.
const closeEventName = 'chakra-ui:close-tooltip';
export const useCloseChakraTooltipsOnDragFix = () => {
useEffect(() => {
const closeTooltips = () => {
document.dispatchEvent(new window.CustomEvent(closeEventName));
};
document.addEventListener('drag', closeTooltips);
return () => {
document.removeEventListener('drag', closeTooltips);
};
}, []);
};

View File

@@ -1,165 +0,0 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
/**
* Adapted from https://github.com/chakra-ui/chakra-ui/blob/v2/packages/hooks/src/use-outside-click.ts
*
* The main change here is to support filtering of outside clicks via a `filter` function.
*
* This lets us work around issues with portals and components like popovers, which typically close on an outside click.
*
* For example, consider a popover that has a custom drop-down component inside it, which uses a portal to render
* the drop-down options. The original outside click handler would close the popover when clicking on the drop-down options,
* because the click is outside the popover - but we expect the popover to stay open in this case.
*
* A filter function like this can fix that:
*
* ```ts
* const filter = (el: HTMLElement) => el.className.includes('chakra-portal') || el.id.includes('react-select')
* ```
*
* This ignores clicks on react-select-based drop-downs and Chakra UI portals and is used as the default filter.
*/
import { useCallback, useEffect, useRef } from 'react';
type FilterFunction = (el: HTMLElement | SVGElement) => boolean;
export function useCallbackRef<T extends (...args: any[]) => any>(
callback: T | undefined,
deps: React.DependencyList = []
) {
const callbackRef = useRef(callback);
useEffect(() => {
callbackRef.current = callback;
});
// eslint-disable-next-line react-hooks/exhaustive-deps
return useCallback(((...args) => callbackRef.current?.(...args)) as T, deps);
}
export interface UseOutsideClickProps {
/**
* Whether the hook is enabled
*/
enabled?: boolean;
/**
* The reference to a DOM element.
*/
ref: React.RefObject<HTMLElement | null>;
/**
* Function invoked when a click is triggered outside the referenced element.
*/
handler?: (e: Event) => void;
/**
* A function that filters the elements that should be considered as outside clicks.
*
* If omitted, a default filter function that ignores clicks in Chakra UI portals and react-select components is used.
*/
filter?: FilterFunction;
}
export const DEFAULT_FILTER: FilterFunction = (el) => {
if (el instanceof SVGElement) {
// SVGElement's type appears to be incorrect. Its className is not a string, which causes `includes` to fail.
// Let's assume that SVG elements with a class name are not part of the portal and should not be filtered.
return false;
}
return el.className.includes('chakra-portal') || el.id.includes('react-select');
};
/**
* Example, used in components like Dialogs and Popovers, so they can close
* when a user clicks outside them.
*/
export function useFilterableOutsideClick(props: UseOutsideClickProps) {
const { ref, handler, enabled = true, filter = DEFAULT_FILTER } = props;
const savedHandler = useCallbackRef(handler);
const stateRef = useRef({
isPointerDown: false,
ignoreEmulatedMouseEvents: false,
});
const state = stateRef.current;
useEffect(() => {
if (!enabled) {
return;
}
const onPointerDown: any = (e: PointerEvent) => {
if (isValidEvent(e, ref, filter)) {
state.isPointerDown = true;
}
};
const onMouseUp: any = (event: MouseEvent) => {
if (state.ignoreEmulatedMouseEvents) {
state.ignoreEmulatedMouseEvents = false;
return;
}
if (state.isPointerDown && handler && isValidEvent(event, ref)) {
state.isPointerDown = false;
savedHandler(event);
}
};
const onTouchEnd = (event: TouchEvent) => {
state.ignoreEmulatedMouseEvents = true;
if (handler && state.isPointerDown && isValidEvent(event, ref)) {
state.isPointerDown = false;
savedHandler(event);
}
};
const doc = getOwnerDocument(ref.current);
doc.addEventListener('mousedown', onPointerDown, true);
doc.addEventListener('mouseup', onMouseUp, true);
doc.addEventListener('touchstart', onPointerDown, true);
doc.addEventListener('touchend', onTouchEnd, true);
return () => {
doc.removeEventListener('mousedown', onPointerDown, true);
doc.removeEventListener('mouseup', onMouseUp, true);
doc.removeEventListener('touchstart', onPointerDown, true);
doc.removeEventListener('touchend', onTouchEnd, true);
};
}, [handler, ref, savedHandler, state, enabled, filter]);
}
function isValidEvent(event: Event, ref: React.RefObject<HTMLElement | null>, filter?: FilterFunction): boolean {
const target = (event.composedPath?.()[0] ?? event.target) as HTMLElement;
if (target) {
const doc = getOwnerDocument(target);
if (!doc.contains(target)) {
return false;
}
}
if (ref.current?.contains(target)) {
return false;
}
// This is the main logic change from the original hook.
if (filter) {
// Check if the click is inside an element matching the filter.
// This is used for portal-awareness or other general exclusion cases.
let currentElement: HTMLElement | null = target;
// Traverse up the DOM tree from the target element.
while (currentElement && currentElement !== document.body) {
if (filter(currentElement)) {
return false;
}
currentElement = currentElement.parentElement;
}
}
// If the click is not inside the ref and not inside a portal, it's a valid outside click.
return true;
}
function getOwnerDocument(node?: Element | null): Document {
return node?.ownerDocument ?? document;
}

View File

@@ -1,6 +1,6 @@
import { useAppDispatch } from 'app/store/storeHooks';
import { useCancelCurrentQueueItem } from 'features/queue/hooks/useCancelCurrentQueueItem';
import { useClearQueue } from 'features/queue/hooks/useClearQueue';
import { useDeleteCurrentQueueItem } from 'features/queue/hooks/useDeleteCurrentQueueItem';
import { useInvoke } from 'features/queue/hooks/useInvoke';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
@@ -35,39 +35,34 @@ export const useGlobalHotkeys = () => {
dependencies: [queue],
});
const deleteCurrentQueueItem = useDeleteCurrentQueueItem();
const {
cancelQueueItem,
isDisabled: isDisabledCancelQueueItem,
isLoading: isLoadingCancelQueueItem,
} = useCancelCurrentQueueItem();
useRegisteredHotkeys({
id: 'cancelQueueItem',
category: 'app',
callback: deleteCurrentQueueItem.trigger,
callback: cancelQueueItem,
options: {
enabled: !deleteCurrentQueueItem.isDisabled && !deleteCurrentQueueItem.isLoading,
enabled: !isDisabledCancelQueueItem && !isLoadingCancelQueueItem,
preventDefault: true,
},
dependencies: [deleteCurrentQueueItem],
dependencies: [cancelQueueItem, isDisabledCancelQueueItem, isLoadingCancelQueueItem],
});
const clearQueue = useClearQueue();
const { clearQueue, isDisabled: isDisabledClearQueue, isLoading: isLoadingClearQueue } = useClearQueue();
useRegisteredHotkeys({
id: 'clearQueue',
category: 'app',
callback: clearQueue.trigger,
callback: clearQueue,
options: {
enabled: !clearQueue.isDisabled && !clearQueue.isLoading,
enabled: !isDisabledClearQueue && !isLoadingClearQueue,
preventDefault: true,
},
dependencies: [clearQueue],
});
useRegisteredHotkeys({
id: 'selectGenerateTab',
category: 'app',
callback: () => {
dispatch(setActiveTab('generate'));
},
dependencies: [dispatch],
dependencies: [clearQueue, isDisabledClearQueue, isLoadingClearQueue],
});
useRegisteredHotkeys({
@@ -117,20 +112,4 @@ export const useGlobalHotkeys = () => {
},
dependencies: [dispatch, isModelManagerEnabled],
});
// TODO: implement delete - needs to handle gallery focus, which has changed w/ dockview
// useRegisteredHotkeys({
// id: 'deleteSelection',
// category: 'gallery',
// callback: () => {
// if (!selection.length) {
// return;
// }
// deleteImageModal.delete(selection);
// },
// options: {
// enabled: (isGalleryFocused || isImageViewerFocused) && isDeleteEnabledByTab && !isWorkflowsFocused,
// },
// dependencies: [isWorkflowsFocused, isDeleteEnabledByTab, selection, isWorkflowsFocused],
// });
};

View File

@@ -2,10 +2,10 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import type { GroupBase } from 'chakra-react-select';
import { groupBy, reduce } from 'es-toolkit/compat';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
import { groupBy, reduce } from 'lodash-es';
import { useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import type { AnyModelConfig } from 'services/api/types';

View File

@@ -1,11 +1,11 @@
import type { ButtonProps, IconButtonProps, SystemStyleObject } from '@invoke-ai/ui-library';
import { Button, IconButton } from '@invoke-ai/ui-library';
import type { IconButtonProps, SystemStyleObject } from '@invoke-ai/ui-library';
import { IconButton } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import { useAppSelector } from 'app/store/storeHooks';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
import { selectIsClientSideUploadEnabled } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import { memo, useCallback } from 'react';
import { useCallback } from 'react';
import type { FileRejection } from 'react-dropzone';
import { useDropzone } from 'react-dropzone';
import { useTranslation } from 'react-i18next';
@@ -163,63 +163,32 @@ const sx = {
},
} satisfies SystemStyleObject;
export const UploadImageIconButton = memo(
({
isDisabled = false,
onUpload,
isError = false,
...rest
}: {
onUpload?: (imageDTO: ImageDTO) => void;
isError?: boolean;
} & SetOptional<IconButtonProps, 'aria-label'>) => {
const uploadApi = useImageUploadButton({ isDisabled, allowMultiple: false, onUpload });
return (
<>
<IconButton
aria-label="Upload image"
variant="outline"
sx={sx}
data-error={isError}
icon={<PiUploadBold />}
isLoading={uploadApi.request.isLoading}
{...rest}
{...uploadApi.getUploadButtonProps()}
/>
<input {...uploadApi.getUploadInputProps()} />
</>
);
}
);
UploadImageIconButton.displayName = 'UploadImageIconButton';
type UploadImageButtonProps = {
export const UploadImageButton = ({
isDisabled = false,
onUpload,
isError = false,
...rest
}: {
onUpload?: (imageDTO: ImageDTO) => void;
isError?: boolean;
} & ButtonProps;
const UploadImageButton = memo((props: UploadImageButtonProps) => {
const { children, isDisabled = false, onUpload, isError = false, ...rest } = props;
} & SetOptional<IconButtonProps, 'aria-label'>) => {
const uploadApi = useImageUploadButton({ isDisabled, allowMultiple: false, onUpload });
return (
<>
<Button
<IconButton
aria-label="Upload image"
variant="outline"
sx={sx}
data-error={isError}
rightIcon={<PiUploadBold />}
icon={<PiUploadBold />}
isLoading={uploadApi.request.isLoading}
{...rest}
{...uploadApi.getUploadButtonProps()}
>
{children ?? 'Upload'}
</Button>
/>
<input {...uploadApi.getUploadInputProps()} />
</>
);
});
UploadImageButton.displayName = 'UploadImageButton';
};
export const UploadMultipleImageButton = ({
isDisabled = false,

View File

@@ -1,7 +1,7 @@
import { useAppStore } from 'app/store/nanostores/store';
import { debounce } from 'es-toolkit/compat';
import type { Dimensions } from 'features/controlLayers/store/types';
import { selectUiSlice, textAreaSizesStateChanged } from 'features/ui/store/uiSlice';
import { debounce } from 'lodash-es';
import { type RefObject, useCallback, useEffect, useMemo } from 'react';
type Options = {

View File

@@ -1,18 +1,12 @@
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import { EMPTY_ARRAY } from 'app/store/constants';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import type { GroupBase } from 'chakra-react-select';
import { uniq } from 'es-toolkit/compat';
import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useGetRelatedModelIdsBatchQuery } from 'services/api/endpoints/modelRelationships';
import type { AnyModelConfig } from 'services/api/types';
import { useGroupedModelCombobox } from './useGroupedModelCombobox';
import { useRelatedModelKeys } from './useRelatedModelKeys';
import { useSelectedModelKeys } from './useSelectedModelKeys';
type UseRelatedGroupedModelComboboxArg<T extends AnyModelConfig> = {
modelConfigs: T[];
@@ -35,32 +29,6 @@ type UseRelatedGroupedModelComboboxReturn = {
noOptionsMessage: () => string;
};
const selectSelectedModelKeys = createMemoizedSelector(selectParamsSlice, selectLoRAsSlice, (params, loras) => {
const keys: string[] = [];
const main = params.model;
const vae = params.vae;
const refiner = params.refinerModel;
const controlnet = params.controlLora;
if (main) {
keys.push(main.key);
}
if (vae) {
keys.push(vae.key);
}
if (refiner) {
keys.push(refiner.key);
}
if (controlnet) {
keys.push(controlnet.key);
}
for (const { model } of loras.loras) {
keys.push(model.key);
}
return uniq(keys);
});
export function useRelatedGroupedModelCombobox<T extends AnyModelConfig>({
modelConfigs,
selectedModel,
@@ -71,15 +39,9 @@ export function useRelatedGroupedModelCombobox<T extends AnyModelConfig>({
}: UseRelatedGroupedModelComboboxArg<T>): UseRelatedGroupedModelComboboxReturn {
const { t } = useTranslation();
const selectedKeys = useAppSelector(selectSelectedModelKeys);
const { relatedKeys } = useGetRelatedModelIdsBatchQuery(selectedKeys, {
selectFromResult: ({ data }) => {
if (!data) {
return { relatedKeys: EMPTY_ARRAY };
}
return { relatedKeys: data };
},
});
const selectedKeys = useSelectedModelKeys();
const relatedKeys = useRelatedModelKeys(selectedKeys);
// Base grouped options
const base = useGroupedModelCombobox({
@@ -91,42 +53,40 @@ export function useRelatedGroupedModelCombobox<T extends AnyModelConfig>({
groupByType,
});
const options = useMemo(() => {
if (relatedKeys.length === 0) {
return base.options;
}
// If no related models selected, just return base
if (relatedKeys.size === 0) {
return base;
}
const relatedOptions: ComboboxOption[] = [];
const updatedGroups: GroupBase<ComboboxOption>[] = [];
const relatedOptions: ComboboxOption[] = [];
const updatedGroups: GroupBase<ComboboxOption>[] = [];
for (const group of base.options) {
const remainingOptions: ComboboxOption[] = [];
for (const group of base.options) {
const remainingOptions: ComboboxOption[] = [];
for (const option of group.options) {
if (relatedKeys.includes(option.value)) {
relatedOptions.push({ ...option, label: `* ${option.label}` });
} else {
remainingOptions.push(option);
}
}
if (remainingOptions.length > 0) {
updatedGroups.push({
label: group.label,
options: remainingOptions,
});
for (const option of group.options) {
if (relatedKeys.has(option.value)) {
relatedOptions.push({ ...option, label: `* ${option.label}` });
} else {
remainingOptions.push(option);
}
}
if (relatedOptions.length > 0) {
return [{ label: t('modelManager.relatedModels'), options: relatedOptions }, ...updatedGroups];
} else {
return updatedGroups;
if (remainingOptions.length > 0) {
updatedGroups.push({
label: group.label,
options: remainingOptions,
});
}
}, [base.options, relatedKeys, t]);
}
const finalOptions: GroupBase<ComboboxOption>[] =
relatedOptions.length > 0
? [{ label: t('modelManager.relatedModels'), options: relatedOptions }, ...updatedGroups]
: updatedGroups;
return {
...base,
options,
options: finalOptions,
};
}

View File

@@ -0,0 +1,14 @@
import { useMemo } from 'react';
import { useGetRelatedModelIdsBatchQuery } from 'services/api/endpoints/modelRelationships';
/**
* Fetches related model keys for a given set of selected model keys.
* Returns a Set<string> for fast lookup.
*/
export const useRelatedModelKeys = (selectedKeys: Set<string>) => {
const { data: related = [] } = useGetRelatedModelIdsBatchQuery([...selectedKeys], {
skip: selectedKeys.size === 0,
});
return useMemo(() => new Set(related), [related]);
};

View File

@@ -0,0 +1,34 @@
import { useAppSelector } from 'app/store/storeHooks';
/**
* Gathers all currently selected model keys from parameters and loras.
* This includes the main model, VAE, refiner model, controlnet, and loras.
*/
export const useSelectedModelKeys = () => {
return useAppSelector((state) => {
const keys = new Set<string>();
const main = state.params.model;
const vae = state.params.vae;
const refiner = state.params.refinerModel;
const controlnet = state.params.controlLora;
const loras = state.loras.loras.map((l) => l.model);
if (main) {
keys.add(main.key);
}
if (vae) {
keys.add(vae.key);
}
if (refiner) {
keys.add(refiner.key);
}
if (controlnet) {
keys.add(controlnet.key);
}
for (const lora of loras) {
keys.add(lora.key);
}
return keys;
});
};

View File

@@ -1,28 +0,0 @@
import type { Selector } from '@reduxjs/toolkit';
import type { RootState } from 'app/store/store';
import { useAppStore } from 'app/store/storeHooks';
import type { Atom, WritableAtom } from 'nanostores';
import { atom } from 'nanostores';
import { useEffect, useState } from 'react';
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
export const useSelectorAsAtom = <T extends Selector<RootState, any>>(selector: T): Atom<ReturnType<T>> => {
const store = useAppStore();
const $atom = useState<WritableAtom<ReturnType<T>>>(() => atom<ReturnType<T>>(selector(store.getState())))[0];
useEffect(() => {
const unsubscribe = store.subscribe(() => {
const prev = $atom.get();
const next = selector(store.getState());
if (prev !== next) {
$atom.set(next);
}
});
return () => {
unsubscribe();
};
}, [$atom, selector, store]);
return $atom;
};

View File

@@ -1,21 +0,0 @@
import { useCallback, useMemo, useRef } from 'react';
export const useTimeoutCallback = (callback: () => void, delay: number, onCancel?: () => void) => {
const timeoutRef = useRef<number | null>(null);
const cancel = useCallback(() => {
if (timeoutRef.current !== null) {
window.clearTimeout(timeoutRef.current);
timeoutRef.current = null;
onCancel?.();
}
}, [onCancel]);
const callWithTimeout = useCallback(() => {
cancel();
timeoutRef.current = window.setTimeout(() => {
callback();
timeoutRef.current = null;
}, delay);
}, [callback, cancel, delay]);
const api = useMemo(() => [callWithTimeout, cancel] as const, [callWithTimeout, cancel]);
return api;
};

View File

@@ -1,5 +1,5 @@
import { NUMPY_RAND_MAX, NUMPY_RAND_MIN } from 'app/constants';
import { random } from 'es-toolkit/compat';
import { random } from 'lodash-es';
type GenerateSeedsArg = {
count: number;

View File

@@ -1,10 +0,0 @@
import type { z } from 'zod/v4';
/**
* Helper to create a type guard from a zod schema. The type guard will infer the schema's TS type.
* @param schema The zod schema to create a type guard from.
* @returns A type guard function for the schema.
*/
export const buildZodTypeGuard = <T extends z.ZodTypeAny>(schema: T) => {
return (val: unknown): val is z.infer<T> => schema.safeParse(val).success;
};

View File

@@ -16,7 +16,7 @@ import { useAddImagesToBoardMutation, useRemoveImagesFromBoardMutation } from 's
const selectImagesToChange = createMemoizedSelector(
selectChangeBoardModalSlice,
(changeBoardModal) => changeBoardModal.image_names
(changeBoardModal) => changeBoardModal.imagesToChange
);
const selectIsModalOpen = createSelector(
@@ -57,10 +57,10 @@ const ChangeBoardModal = () => {
}
if (selectedBoard === 'none') {
removeImagesFromBoard({ image_names: imagesToChange });
removeImagesFromBoard({ imageDTOs: imagesToChange });
} else {
addImagesToBoard({
image_names: imagesToChange,
imageDTOs: imagesToChange,
board_id: selectedBoard,
});
}

View File

@@ -2,5 +2,5 @@ import type { ChangeBoardModalState } from './types';
export const initialState: ChangeBoardModalState = {
isModalOpen: false,
image_names: [],
imagesToChange: [],
};

View File

@@ -1,6 +1,7 @@
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import type { RootState } from 'app/store/store';
import type { ImageDTO } from 'services/api/types';
import { initialState } from './initialState';
@@ -11,11 +12,11 @@ export const changeBoardModalSlice = createSlice({
isModalOpenChanged: (state, action: PayloadAction<boolean>) => {
state.isModalOpen = action.payload;
},
imagesToChangeSelected: (state, action: PayloadAction<string[]>) => {
state.image_names = action.payload;
imagesToChangeSelected: (state, action: PayloadAction<ImageDTO[]>) => {
state.imagesToChange = action.payload;
},
changeBoardReset: (state) => {
state.image_names = [];
state.imagesToChange = [];
state.isModalOpen = false;
},
},

Some files were not shown because too many files have changed in this diff Show More