diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py
index ae8e6e05a6..ff55749f6b 100644
--- a/invokeai/app/api/dependencies.py
+++ b/invokeai/app/api/dependencies.py
@@ -54,6 +54,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
CogView4ConditioningInfo,
ConditioningFieldData,
FLUXConditioningInfo,
+ QwenImageConditioningInfo,
SD3ConditioningInfo,
SDXLConditioningInfo,
ZImageConditioningInfo,
@@ -144,6 +145,7 @@ class ApiDependencies:
SD3ConditioningInfo,
CogView4ConditioningInfo,
ZImageConditioningInfo,
+ QwenImageConditioningInfo,
AnimaConditioningInfo,
],
ephemeral=True,
diff --git a/invokeai/app/api/routers/auth.py b/invokeai/app/api/routers/auth.py
index 36aeabda82..e0b0c885cd 100644
--- a/invokeai/app/api/routers/auth.py
+++ b/invokeai/app/api/routers/auth.py
@@ -80,6 +80,7 @@ class SetupStatusResponse(BaseModel):
setup_required: bool = Field(description="Whether initial setup is required")
multiuser_enabled: bool = Field(description="Whether multiuser mode is enabled")
strict_password_checking: bool = Field(description="Whether strict password requirements are enforced")
+ admin_email: str | None = Field(default=None, description="Email of the first active admin user, if any")
@auth_router.get("/status", response_model=SetupStatusResponse)
@@ -94,15 +95,25 @@ async def get_setup_status() -> SetupStatusResponse:
# If multiuser is disabled, setup is never required
if not config.multiuser:
return SetupStatusResponse(
- setup_required=False, multiuser_enabled=False, strict_password_checking=config.strict_password_checking
+ setup_required=False,
+ multiuser_enabled=False,
+ strict_password_checking=config.strict_password_checking,
+ admin_email=None,
)
# In multiuser mode, check if an admin exists
user_service = ApiDependencies.invoker.services.users
setup_required = not user_service.has_admin()
+ # Only expose admin_email during initial setup to avoid leaking
+ # administrator identity on public deployments.
+ admin_email = user_service.get_admin_email() if setup_required else None
+
return SetupStatusResponse(
- setup_required=setup_required, multiuser_enabled=True, strict_password_checking=config.strict_password_checking
+ setup_required=setup_required,
+ multiuser_enabled=True,
+ strict_password_checking=config.strict_password_checking,
+ admin_email=admin_email,
)
diff --git a/invokeai/app/api/routers/board_images.py b/invokeai/app/api/routers/board_images.py
index cb5e0ab51a..f94e4f2437 100644
--- a/invokeai/app/api/routers/board_images.py
+++ b/invokeai/app/api/routers/board_images.py
@@ -1,12 +1,53 @@
from fastapi import Body, HTTPException
from fastapi.routing import APIRouter
+from invokeai.app.api.auth_dependencies import CurrentUserOrDefault
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.images.images_common import AddImagesToBoardResult, RemoveImagesFromBoardResult
board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
+def _assert_board_write_access(board_id: str, current_user: CurrentUserOrDefault) -> None:
+ """Raise 403 if the current user may not mutate the given board.
+
+ Write access is granted when ANY of these hold:
+ - The user is an admin.
+ - The user owns the board.
+ - The board visibility is Public (public boards accept contributions from any user).
+ """
+ from invokeai.app.services.board_records.board_records_common import BoardVisibility
+
+ try:
+ board = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
+ except Exception:
+ raise HTTPException(status_code=404, detail="Board not found")
+ if current_user.is_admin:
+ return
+ if board.user_id == current_user.user_id:
+ return
+ if board.board_visibility == BoardVisibility.Public:
+ return
+ raise HTTPException(status_code=403, detail="Not authorized to modify this board")
+
+
+def _assert_image_direct_owner(image_name: str, current_user: CurrentUserOrDefault) -> None:
+ """Raise 403 if the current user is not the direct owner of the image.
+
+ This is intentionally stricter than _assert_image_owner in images.py:
+ board ownership is NOT sufficient here. Allowing a user to add someone
+ else's image to their own board would grant them mutation rights via the
+ board-ownership fallback in _assert_image_owner, escalating read access
+ into write access.
+ """
+ if current_user.is_admin:
+ return
+ owner = ApiDependencies.invoker.services.image_records.get_user_id(image_name)
+ if owner is not None and owner == current_user.user_id:
+ return
+ raise HTTPException(status_code=403, detail="Not authorized to move this image")
+
+
@board_images_router.post(
"/",
operation_id="add_image_to_board",
@@ -17,14 +58,17 @@ board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
response_model=AddImagesToBoardResult,
)
async def add_image_to_board(
+ current_user: CurrentUserOrDefault,
board_id: str = Body(description="The id of the board to add to"),
image_name: str = Body(description="The name of the image to add"),
) -> AddImagesToBoardResult:
"""Creates a board_image"""
+ _assert_board_write_access(board_id, current_user)
+ _assert_image_direct_owner(image_name, current_user)
try:
added_images: set[str] = set()
affected_boards: set[str] = set()
- old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
+ old_board_id = ApiDependencies.invoker.services.board_image_records.get_board_for_image(image_name) or "none"
ApiDependencies.invoker.services.board_images.add_image_to_board(board_id=board_id, image_name=image_name)
added_images.add(image_name)
affected_boards.add(board_id)
@@ -48,13 +92,16 @@ async def add_image_to_board(
response_model=RemoveImagesFromBoardResult,
)
async def remove_image_from_board(
+ current_user: CurrentUserOrDefault,
image_name: str = Body(description="The name of the image to remove", embed=True),
) -> RemoveImagesFromBoardResult:
"""Removes an image from its board, if it had one"""
try:
+ old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
+ if old_board_id != "none":
+ _assert_board_write_access(old_board_id, current_user)
removed_images: set[str] = set()
affected_boards: set[str] = set()
- old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
removed_images.add(image_name)
affected_boards.add("none")
@@ -64,6 +111,8 @@ async def remove_image_from_board(
affected_boards=list(affected_boards),
)
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=500, detail="Failed to remove image from board")
@@ -78,16 +127,21 @@ async def remove_image_from_board(
response_model=AddImagesToBoardResult,
)
async def add_images_to_board(
+ current_user: CurrentUserOrDefault,
board_id: str = Body(description="The id of the board to add to"),
image_names: list[str] = Body(description="The names of the images to add", embed=True),
) -> AddImagesToBoardResult:
"""Adds a list of images to a board"""
+ _assert_board_write_access(board_id, current_user)
try:
added_images: set[str] = set()
affected_boards: set[str] = set()
for image_name in image_names:
try:
- old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
+ _assert_image_direct_owner(image_name, current_user)
+ old_board_id = (
+ ApiDependencies.invoker.services.board_image_records.get_board_for_image(image_name) or "none"
+ )
ApiDependencies.invoker.services.board_images.add_image_to_board(
board_id=board_id,
image_name=image_name,
@@ -96,12 +150,16 @@ async def add_images_to_board(
affected_boards.add(board_id)
affected_boards.add(old_board_id)
+ except HTTPException:
+ raise
except Exception:
pass
return AddImagesToBoardResult(
added_images=list(added_images),
affected_boards=list(affected_boards),
)
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=500, detail="Failed to add images to board")
@@ -116,6 +174,7 @@ async def add_images_to_board(
response_model=RemoveImagesFromBoardResult,
)
async def remove_images_from_board(
+ current_user: CurrentUserOrDefault,
image_names: list[str] = Body(description="The names of the images to remove", embed=True),
) -> RemoveImagesFromBoardResult:
"""Removes a list of images from their board, if they had one"""
@@ -125,15 +184,21 @@ async def remove_images_from_board(
for image_name in image_names:
try:
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
+ if old_board_id != "none":
+ _assert_board_write_access(old_board_id, current_user)
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
removed_images.add(image_name)
affected_boards.add("none")
affected_boards.add(old_board_id)
+ except HTTPException:
+ raise
except Exception:
pass
return RemoveImagesFromBoardResult(
removed_images=list(removed_images),
affected_boards=list(affected_boards),
)
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=500, detail="Failed to remove images from board")
diff --git a/invokeai/app/api/routers/boards.py b/invokeai/app/api/routers/boards.py
index e93bb8b2a9..6897e90aff 100644
--- a/invokeai/app/api/routers/boards.py
+++ b/invokeai/app/api/routers/boards.py
@@ -6,7 +6,7 @@ from pydantic import BaseModel, Field
from invokeai.app.api.auth_dependencies import CurrentUserOrDefault
from invokeai.app.api.dependencies import ApiDependencies
-from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy
+from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecordOrderBy, BoardVisibility
from invokeai.app.services.boards.boards_common import BoardDTO
from invokeai.app.services.image_records.image_records_common import ImageCategory
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
@@ -56,7 +56,14 @@ async def get_board(
except Exception:
raise HTTPException(status_code=404, detail="Board not found")
- if not current_user.is_admin and result.user_id != current_user.user_id:
+ # Admins can access any board.
+ # Owners can access their own boards.
+ # Shared and public boards are visible to all authenticated users.
+ if (
+ not current_user.is_admin
+ and result.user_id != current_user.user_id
+ and result.board_visibility == BoardVisibility.Private
+ ):
raise HTTPException(status_code=403, detail="Not authorized to access this board")
return result
@@ -188,7 +195,11 @@ async def list_all_board_image_names(
except Exception:
raise HTTPException(status_code=404, detail="Board not found")
- if not current_user.is_admin and board.user_id != current_user.user_id:
+ if (
+ not current_user.is_admin
+ and board.user_id != current_user.user_id
+ and board.board_visibility == BoardVisibility.Private
+ ):
raise HTTPException(status_code=403, detail="Not authorized to access this board")
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
@@ -196,4 +207,15 @@ async def list_all_board_image_names(
categories,
is_intermediate,
)
+
+ # For uncategorized images (board_id="none"), filter to only the caller's
+ # images so that one user cannot enumerate another's uncategorized images.
+ # Admin users can see all uncategorized images.
+ if board_id == "none" and not current_user.is_admin:
+ image_names = [
+ name
+ for name in image_names
+ if ApiDependencies.invoker.services.image_records.get_user_id(name) == current_user.user_id
+ ]
+
return image_names
diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py
index 6b11762c9e..a3ae6fce82 100644
--- a/invokeai/app/api/routers/images.py
+++ b/invokeai/app/api/routers/images.py
@@ -38,6 +38,96 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
IMAGE_MAX_AGE = 31536000
+def _assert_image_owner(image_name: str, current_user: CurrentUserOrDefault) -> None:
+ """Raise 403 if the current user does not own the image and is not an admin.
+
+ Ownership is satisfied when ANY of these hold:
+ - The user is an admin.
+ - The user is the image's direct owner (image_records.user_id).
+ - The user owns the board the image sits on.
+ - The image sits on a Public board (public boards grant mutation rights).
+ """
+ from invokeai.app.services.board_records.board_records_common import BoardVisibility
+
+ if current_user.is_admin:
+ return
+ owner = ApiDependencies.invoker.services.image_records.get_user_id(image_name)
+ if owner is not None and owner == current_user.user_id:
+ return
+
+ # Check whether the user owns the board the image belongs to,
+ # or the board is Public (public boards grant mutation rights).
+ board_id = ApiDependencies.invoker.services.board_image_records.get_board_for_image(image_name)
+ if board_id is not None:
+ try:
+ board = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
+ if board.user_id == current_user.user_id:
+ return
+ if board.board_visibility == BoardVisibility.Public:
+ return
+ except Exception:
+ pass
+
+ raise HTTPException(status_code=403, detail="Not authorized to modify this image")
+
+
+def _assert_image_read_access(image_name: str, current_user: CurrentUserOrDefault) -> None:
+ """Raise 403 if the current user may not view the image.
+
+ Access is granted when ANY of these hold:
+ - The user is an admin.
+ - The user owns the image.
+ - The image sits on a shared or public board.
+ """
+ from invokeai.app.services.board_records.board_records_common import BoardVisibility
+
+ if current_user.is_admin:
+ return
+
+ owner = ApiDependencies.invoker.services.image_records.get_user_id(image_name)
+ if owner is not None and owner == current_user.user_id:
+ return
+
+ # Check whether the image's board makes it visible to other users.
+ board_id = ApiDependencies.invoker.services.board_image_records.get_board_for_image(image_name)
+ if board_id is not None:
+ try:
+ board = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
+ if board.board_visibility in (BoardVisibility.Shared, BoardVisibility.Public):
+ return
+ except Exception:
+ pass
+
+ raise HTTPException(status_code=403, detail="Not authorized to access this image")
+
+
+def _assert_board_read_access(board_id: str, current_user: CurrentUserOrDefault) -> None:
+ """Raise 403 if the current user may not read images from this board.
+
+ Access is granted when ANY of these hold:
+ - The user is an admin.
+ - The user owns the board.
+ - The board visibility is Shared or Public.
+ """
+ from invokeai.app.services.board_records.board_records_common import BoardVisibility
+
+ if current_user.is_admin:
+ return
+
+ try:
+ board = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
+ except Exception:
+ raise HTTPException(status_code=404, detail="Board not found")
+
+ if board.user_id == current_user.user_id:
+ return
+
+ if board.board_visibility in (BoardVisibility.Shared, BoardVisibility.Public):
+ return
+
+ raise HTTPException(status_code=403, detail="Not authorized to access this board")
+
+
class ResizeToDimensions(BaseModel):
width: int = Field(..., gt=0)
height: int = Field(..., gt=0)
@@ -83,6 +173,22 @@ async def upload_image(
),
) -> ImageDTO:
"""Uploads an image for the current user"""
+ # If uploading into a board, verify the user has write access.
+ # Public boards allow uploads from any authenticated user.
+ if board_id is not None:
+ from invokeai.app.services.board_records.board_records_common import BoardVisibility
+
+ try:
+ board = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
+ except Exception:
+ raise HTTPException(status_code=404, detail="Board not found")
+ if (
+ not current_user.is_admin
+ and board.user_id != current_user.user_id
+ and board.board_visibility != BoardVisibility.Public
+ ):
+ raise HTTPException(status_code=403, detail="Not authorized to upload to this board")
+
if not file.content_type or not file.content_type.startswith("image"):
raise HTTPException(status_code=415, detail="Not an image")
@@ -165,9 +271,11 @@ async def create_image_upload_entry(
@images_router.delete("/i/{image_name}", operation_id="delete_image", response_model=DeleteImagesResult)
async def delete_image(
+ current_user: CurrentUserOrDefault,
image_name: str = Path(description="The name of the image to delete"),
) -> DeleteImagesResult:
"""Deletes an image"""
+ _assert_image_owner(image_name, current_user)
deleted_images: set[str] = set()
affected_boards: set[str] = set()
@@ -189,26 +297,31 @@ async def delete_image(
@images_router.delete("/intermediates", operation_id="clear_intermediates")
-async def clear_intermediates() -> int:
- """Clears all intermediates"""
+async def clear_intermediates(
+ current_user: CurrentUserOrDefault,
+) -> int:
+ """Clears all intermediates. Requires admin."""
+ if not current_user.is_admin:
+ raise HTTPException(status_code=403, detail="Only admins can clear all intermediates")
try:
count_deleted = ApiDependencies.invoker.services.images.delete_intermediates()
return count_deleted
except Exception:
raise HTTPException(status_code=500, detail="Failed to clear intermediates")
- pass
@images_router.get("/intermediates", operation_id="get_intermediates_count")
-async def get_intermediates_count() -> int:
- """Gets the count of intermediate images"""
+async def get_intermediates_count(
+ current_user: CurrentUserOrDefault,
+) -> int:
+ """Gets the count of intermediate images. Non-admin users only see their own intermediates."""
try:
- return ApiDependencies.invoker.services.images.get_intermediates_count()
+ user_id = None if current_user.is_admin else current_user.user_id
+ return ApiDependencies.invoker.services.images.get_intermediates_count(user_id=user_id)
except Exception:
raise HTTPException(status_code=500, detail="Failed to get intermediates")
- pass
@images_router.patch(
@@ -217,10 +330,12 @@ async def get_intermediates_count() -> int:
response_model=ImageDTO,
)
async def update_image(
+ current_user: CurrentUserOrDefault,
image_name: str = Path(description="The name of the image to update"),
image_changes: ImageRecordChanges = Body(description="The changes to apply to the image"),
) -> ImageDTO:
"""Updates an image"""
+ _assert_image_owner(image_name, current_user)
try:
return ApiDependencies.invoker.services.images.update(image_name, image_changes)
@@ -234,9 +349,11 @@ async def update_image(
response_model=ImageDTO,
)
async def get_image_dto(
+ current_user: CurrentUserOrDefault,
image_name: str = Path(description="The name of image to get"),
) -> ImageDTO:
"""Gets an image's DTO"""
+ _assert_image_read_access(image_name, current_user)
try:
return ApiDependencies.invoker.services.images.get_dto(image_name)
@@ -250,9 +367,11 @@ async def get_image_dto(
response_model=Optional[MetadataField],
)
async def get_image_metadata(
+ current_user: CurrentUserOrDefault,
image_name: str = Path(description="The name of image to get"),
) -> Optional[MetadataField]:
"""Gets an image's metadata"""
+ _assert_image_read_access(image_name, current_user)
try:
return ApiDependencies.invoker.services.images.get_metadata(image_name)
@@ -269,8 +388,11 @@ class WorkflowAndGraphResponse(BaseModel):
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=WorkflowAndGraphResponse
)
async def get_image_workflow(
+ current_user: CurrentUserOrDefault,
image_name: str = Path(description="The name of image whose workflow to get"),
) -> WorkflowAndGraphResponse:
+ _assert_image_read_access(image_name, current_user)
+
try:
workflow = ApiDependencies.invoker.services.images.get_workflow(image_name)
graph = ApiDependencies.invoker.services.images.get_graph(image_name)
@@ -306,8 +428,12 @@ async def get_image_workflow(
async def get_image_full(
image_name: str = Path(description="The name of full-resolution image file to get"),
) -> Response:
- """Gets a full-resolution image file"""
+ """Gets a full-resolution image file.
+ This endpoint is intentionally unauthenticated because browsers load images
+ via
tags which cannot send Bearer tokens. Image names are UUIDs,
+ providing security through unguessability.
+ """
try:
path = ApiDependencies.invoker.services.images.get_path(image_name)
with open(path, "rb") as f:
@@ -335,8 +461,12 @@ async def get_image_full(
async def get_image_thumbnail(
image_name: str = Path(description="The name of thumbnail image file to get"),
) -> Response:
- """Gets a thumbnail image file"""
+ """Gets a thumbnail image file.
+ This endpoint is intentionally unauthenticated because browsers load images
+ via
tags which cannot send Bearer tokens. Image names are UUIDs,
+ providing security through unguessability.
+ """
try:
path = ApiDependencies.invoker.services.images.get_path(image_name, thumbnail=True)
with open(path, "rb") as f:
@@ -354,9 +484,11 @@ async def get_image_thumbnail(
response_model=ImageUrlsDTO,
)
async def get_image_urls(
+ current_user: CurrentUserOrDefault,
image_name: str = Path(description="The name of the image whose URL to get"),
) -> ImageUrlsDTO:
"""Gets an image and thumbnail URL"""
+ _assert_image_read_access(image_name, current_user)
try:
image_url = ApiDependencies.invoker.services.images.get_url(image_name)
@@ -392,6 +524,11 @@ async def list_image_dtos(
) -> OffsetPaginatedResults[ImageDTO]:
"""Gets a list of image DTOs for the current user"""
+ # Validate that the caller can read from this board before listing its images.
+ # "none" is a sentinel for uncategorized images and is handled by the SQL layer.
+ if board_id is not None and board_id != "none":
+ _assert_board_read_access(board_id, current_user)
+
image_dtos = ApiDependencies.invoker.services.images.get_many(
offset,
limit,
@@ -410,6 +547,7 @@ async def list_image_dtos(
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesResult)
async def delete_images_from_list(
+ current_user: CurrentUserOrDefault,
image_names: list[str] = Body(description="The list of names of images to delete", embed=True),
) -> DeleteImagesResult:
try:
@@ -417,24 +555,31 @@ async def delete_images_from_list(
affected_boards: set[str] = set()
for image_name in image_names:
try:
+ _assert_image_owner(image_name, current_user)
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
board_id = image_dto.board_id or "none"
ApiDependencies.invoker.services.images.delete(image_name)
deleted_images.add(image_name)
affected_boards.add(board_id)
+ except HTTPException:
+ raise
except Exception:
pass
return DeleteImagesResult(
deleted_images=list(deleted_images),
affected_boards=list(affected_boards),
)
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=500, detail="Failed to delete images")
@images_router.delete("/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesResult)
-async def delete_uncategorized_images() -> DeleteImagesResult:
- """Deletes all images that are uncategorized"""
+async def delete_uncategorized_images(
+ current_user: CurrentUserOrDefault,
+) -> DeleteImagesResult:
+ """Deletes all uncategorized images owned by the current user (or all if admin)"""
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
board_id="none", categories=None, is_intermediate=None
@@ -445,9 +590,13 @@ async def delete_uncategorized_images() -> DeleteImagesResult:
affected_boards: set[str] = set()
for image_name in image_names:
try:
+ _assert_image_owner(image_name, current_user)
ApiDependencies.invoker.services.images.delete(image_name)
deleted_images.add(image_name)
affected_boards.add("none")
+ except HTTPException:
+ # Skip images not owned by the current user
+ pass
except Exception:
pass
return DeleteImagesResult(
@@ -464,6 +613,7 @@ class ImagesUpdatedFromListResult(BaseModel):
@images_router.post("/star", operation_id="star_images_in_list", response_model=StarredImagesResult)
async def star_images_in_list(
+ current_user: CurrentUserOrDefault,
image_names: list[str] = Body(description="The list of names of images to star", embed=True),
) -> StarredImagesResult:
try:
@@ -471,23 +621,29 @@ async def star_images_in_list(
affected_boards: set[str] = set()
for image_name in image_names:
try:
+ _assert_image_owner(image_name, current_user)
updated_image_dto = ApiDependencies.invoker.services.images.update(
image_name, changes=ImageRecordChanges(starred=True)
)
starred_images.add(image_name)
affected_boards.add(updated_image_dto.board_id or "none")
+ except HTTPException:
+ raise
except Exception:
pass
return StarredImagesResult(
starred_images=list(starred_images),
affected_boards=list(affected_boards),
)
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=500, detail="Failed to star images")
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=UnstarredImagesResult)
async def unstar_images_in_list(
+ current_user: CurrentUserOrDefault,
image_names: list[str] = Body(description="The list of names of images to unstar", embed=True),
) -> UnstarredImagesResult:
try:
@@ -495,17 +651,22 @@ async def unstar_images_in_list(
affected_boards: set[str] = set()
for image_name in image_names:
try:
+ _assert_image_owner(image_name, current_user)
updated_image_dto = ApiDependencies.invoker.services.images.update(
image_name, changes=ImageRecordChanges(starred=False)
)
unstarred_images.add(image_name)
affected_boards.add(updated_image_dto.board_id or "none")
+ except HTTPException:
+ raise
except Exception:
pass
return UnstarredImagesResult(
unstarred_images=list(unstarred_images),
affected_boards=list(affected_boards),
)
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=500, detail="Failed to unstar images")
@@ -523,6 +684,7 @@ class ImagesDownloaded(BaseModel):
"/download", operation_id="download_images_from_list", response_model=ImagesDownloaded, status_code=202
)
async def download_images_from_list(
+ current_user: CurrentUserOrDefault,
background_tasks: BackgroundTasks,
image_names: Optional[list[str]] = Body(
default=None, description="The list of names of images to download", embed=True
@@ -533,6 +695,16 @@ async def download_images_from_list(
) -> ImagesDownloaded:
if (image_names is None or len(image_names) == 0) and board_id is None:
raise HTTPException(status_code=400, detail="No images or board id specified.")
+
+ # Validate that the caller can read every image they are requesting.
+ # For a board_id request, check board visibility; for explicit image names,
+ # check each image individually.
+ if board_id:
+ _assert_board_read_access(board_id, current_user)
+ if image_names:
+ for name in image_names:
+ _assert_image_read_access(name, current_user)
+
bulk_download_item_id: str = ApiDependencies.invoker.services.bulk_download.generate_item_id(board_id)
background_tasks.add_task(
@@ -540,6 +712,7 @@ async def download_images_from_list(
image_names,
board_id,
bulk_download_item_id,
+ current_user.user_id,
)
return ImagesDownloaded(bulk_download_item_name=bulk_download_item_id + ".zip")
@@ -558,11 +731,21 @@ async def download_images_from_list(
},
)
async def get_bulk_download_item(
+ current_user: CurrentUserOrDefault,
background_tasks: BackgroundTasks,
bulk_download_item_name: str = Path(description="The bulk_download_item_name of the bulk download item to get"),
) -> FileResponse:
- """Gets a bulk download zip file"""
+ """Gets a bulk download zip file.
+
+ Requires authentication. The caller must be the user who initiated the
+ download (tracked by the bulk download service) or an admin.
+ """
try:
+ # Verify the caller owns this download (or is an admin)
+ owner = ApiDependencies.invoker.services.bulk_download.get_owner(bulk_download_item_name)
+ if owner is not None and owner != current_user.user_id and not current_user.is_admin:
+ raise HTTPException(status_code=403, detail="Not authorized to access this download")
+
path = ApiDependencies.invoker.services.bulk_download.get_path(bulk_download_item_name)
response = FileResponse(
@@ -574,6 +757,8 @@ async def get_bulk_download_item(
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
background_tasks.add_task(ApiDependencies.invoker.services.bulk_download.delete, bulk_download_item_name)
return response
+ except HTTPException:
+ raise
except Exception:
raise HTTPException(status_code=404)
@@ -594,6 +779,10 @@ async def get_image_names(
) -> ImageNamesResult:
"""Gets ordered list of image names with metadata for optimistic updates"""
+ # Validate that the caller can read from this board before listing its images.
+ if board_id is not None and board_id != "none":
+ _assert_board_read_access(board_id, current_user)
+
try:
result = ApiDependencies.invoker.services.images.get_image_names(
starred_first=starred_first,
@@ -617,6 +806,7 @@ async def get_image_names(
responses={200: {"model": list[ImageDTO]}},
)
async def get_images_by_names(
+ current_user: CurrentUserOrDefault,
image_names: list[str] = Body(embed=True, description="Object containing list of image names to fetch DTOs for"),
) -> list[ImageDTO]:
"""Gets image DTOs for the specified image names. Maintains order of input names."""
@@ -628,8 +818,12 @@ async def get_images_by_names(
image_dtos: list[ImageDTO] = []
for name in image_names:
try:
+ _assert_image_read_access(name, current_user)
dto = image_service.get_dto(name)
image_dtos.append(dto)
+ except HTTPException:
+ # Skip images the user is not authorized to view
+ continue
except Exception:
# Skip missing images - they may have been deleted between name fetch and DTO fetch
continue
diff --git a/invokeai/app/api/routers/model_manager.py b/invokeai/app/api/routers/model_manager.py
index a1f55a3b04..f351be11ad 100644
--- a/invokeai/app/api/routers/model_manager.py
+++ b/invokeai/app/api/routers/model_manager.py
@@ -889,7 +889,7 @@ async def install_hugging_face_model(
"/install",
operation_id="list_model_installs",
)
-async def list_model_installs() -> List[ModelInstallJob]:
+async def list_model_installs(current_admin: AdminUserOrDefault) -> List[ModelInstallJob]:
"""Return the list of model install jobs.
Install jobs have a numeric `id`, a `status`, and other fields that provide information on
@@ -921,7 +921,9 @@ async def list_model_installs() -> List[ModelInstallJob]:
404: {"description": "No such job"},
},
)
-async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob:
+async def get_model_install_job(
+ current_admin: AdminUserOrDefault, id: int = Path(description="Model install id")
+) -> ModelInstallJob:
"""
Return model install job corresponding to the given source. See the documentation for 'List Model Install Jobs'
for information on the format of the return value.
@@ -964,7 +966,9 @@ async def cancel_model_install_job(
},
status_code=201,
)
-async def pause_model_install_job(id: int = Path(description="Model install job ID")) -> ModelInstallJob:
+async def pause_model_install_job(
+ current_admin: AdminUserOrDefault, id: int = Path(description="Model install job ID")
+) -> ModelInstallJob:
"""Pause the model install job corresponding to the given job ID."""
installer = ApiDependencies.invoker.services.model_manager.install
try:
@@ -984,7 +988,9 @@ async def pause_model_install_job(id: int = Path(description="Model install job
},
status_code=201,
)
-async def resume_model_install_job(id: int = Path(description="Model install job ID")) -> ModelInstallJob:
+async def resume_model_install_job(
+ current_admin: AdminUserOrDefault, id: int = Path(description="Model install job ID")
+) -> ModelInstallJob:
"""Resume a paused model install job corresponding to the given job ID."""
installer = ApiDependencies.invoker.services.model_manager.install
try:
@@ -1004,7 +1010,9 @@ async def resume_model_install_job(id: int = Path(description="Model install job
},
status_code=201,
)
-async def restart_failed_model_install_job(id: int = Path(description="Model install job ID")) -> ModelInstallJob:
+async def restart_failed_model_install_job(
+ current_admin: AdminUserOrDefault, id: int = Path(description="Model install job ID")
+) -> ModelInstallJob:
"""Restart failed or non-resumable file downloads for the given job."""
installer = ApiDependencies.invoker.services.model_manager.install
try:
@@ -1025,6 +1033,7 @@ async def restart_failed_model_install_job(id: int = Path(description="Model ins
status_code=201,
)
async def restart_model_install_file(
+ current_admin: AdminUserOrDefault,
id: int = Path(description="Model install job ID"),
file_source: AnyHttpUrl = Body(description="File download URL to restart"),
) -> ModelInstallJob:
@@ -1336,7 +1345,7 @@ class DeleteOrphanedModelsResponse(BaseModel):
operation_id="get_orphaned_models",
response_model=list[OrphanedModelInfo],
)
-async def get_orphaned_models() -> list[OrphanedModelInfo]:
+async def get_orphaned_models(_: AdminUserOrDefault) -> list[OrphanedModelInfo]:
"""Find orphaned model directories.
Orphaned models are directories in the models folder that contain model files
@@ -1363,7 +1372,9 @@ async def get_orphaned_models() -> list[OrphanedModelInfo]:
operation_id="delete_orphaned_models",
response_model=DeleteOrphanedModelsResponse,
)
-async def delete_orphaned_models(request: DeleteOrphanedModelsRequest) -> DeleteOrphanedModelsResponse:
+async def delete_orphaned_models(
+ request: DeleteOrphanedModelsRequest, _: AdminUserOrDefault
+) -> DeleteOrphanedModelsResponse:
"""Delete specified orphaned model directories.
Args:
diff --git a/invokeai/app/api/routers/recall_parameters.py b/invokeai/app/api/routers/recall_parameters.py
index 0af3fd29b0..ec08adba2e 100644
--- a/invokeai/app/api/routers/recall_parameters.py
+++ b/invokeai/app/api/routers/recall_parameters.py
@@ -7,6 +7,7 @@ from fastapi import Body, HTTPException, Path
from fastapi.routing import APIRouter
from pydantic import BaseModel, ConfigDict, Field
+from invokeai.app.api.auth_dependencies import CurrentUserOrDefault
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.backend.image_util.controlnet_processor import process_controlnet_image
from invokeai.backend.model_manager.taxonomy import ModelType
@@ -291,12 +292,58 @@ def resolve_ip_adapter_models(ip_adapters: list[IPAdapterRecallParameter]) -> li
return resolved_adapters
+def _assert_recall_image_access(parameters: "RecallParameter", current_user: CurrentUserOrDefault) -> None:
+ """Validate that the caller can read every image referenced in the recall parameters.
+
+ Control layers and IP adapters may reference image_name fields. Without this
+ check an attacker who knows another user's image UUID could use the recall
+ endpoint to extract image dimensions and — for ControlNet preprocessors — mint
+ a derived processed image they can then fetch.
+ """
+ from invokeai.app.services.board_records.board_records_common import BoardVisibility
+
+ image_names: list[str] = []
+ if parameters.control_layers:
+ for layer in parameters.control_layers:
+ if layer.image_name is not None:
+ image_names.append(layer.image_name)
+ if parameters.ip_adapters:
+ for adapter in parameters.ip_adapters:
+ if adapter.image_name is not None:
+ image_names.append(adapter.image_name)
+
+ if not image_names:
+ return
+
+ # Admin can access all images
+ if current_user.is_admin:
+ return
+
+ for image_name in image_names:
+ owner = ApiDependencies.invoker.services.image_records.get_user_id(image_name)
+ if owner is not None and owner == current_user.user_id:
+ continue
+
+ # Check board visibility
+ board_id = ApiDependencies.invoker.services.board_image_records.get_board_for_image(image_name)
+ if board_id is not None:
+ try:
+ board = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
+ if board.board_visibility in (BoardVisibility.Shared, BoardVisibility.Public):
+ continue
+ except Exception:
+ pass
+
+ raise HTTPException(status_code=403, detail=f"Not authorized to access image {image_name}")
+
+
@recall_parameters_router.post(
"/{queue_id}",
operation_id="update_recall_parameters",
response_model=dict[str, Any],
)
async def update_recall_parameters(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(..., description="The queue id to perform this operation on"),
parameters: RecallParameter = Body(..., description="Recall parameters to update"),
) -> dict[str, Any]:
@@ -328,6 +375,10 @@ async def update_recall_parameters(
"""
logger = ApiDependencies.invoker.services.logger
+ # Validate image access before processing — prevents information leakage
+ # (dimensions) and derived-image minting via ControlNet preprocessors.
+ _assert_recall_image_access(parameters, current_user)
+
try:
# Get only the parameters that were actually provided (non-None values)
provided_params = {k: v for k, v in parameters.model_dump().items() if v is not None}
@@ -335,14 +386,14 @@ async def update_recall_parameters(
if not provided_params:
return {"status": "no_parameters_provided", "updated_count": 0}
- # Store each parameter in client state using a consistent key format
+ # Store each parameter in client state scoped to the current user
updated_count = 0
for param_key, param_value in provided_params.items():
# Convert parameter values to JSON strings for storage
value_str = json.dumps(param_value)
try:
ApiDependencies.invoker.services.client_state_persistence.set_by_key(
- queue_id, f"recall_{param_key}", value_str
+ current_user.user_id, f"recall_{param_key}", value_str
)
updated_count += 1
except Exception as e:
@@ -396,7 +447,9 @@ async def update_recall_parameters(
logger.info(
f"Emitting recall_parameters_updated event for queue {queue_id} with {len(provided_params)} parameters"
)
- ApiDependencies.invoker.services.events.emit_recall_parameters_updated(queue_id, provided_params)
+ ApiDependencies.invoker.services.events.emit_recall_parameters_updated(
+ queue_id, current_user.user_id, provided_params
+ )
logger.info("Successfully emitted recall_parameters_updated event")
except Exception as e:
logger.error(f"Error emitting recall parameters event: {e}", exc_info=True)
@@ -425,6 +478,7 @@ async def update_recall_parameters(
response_model=dict[str, Any],
)
async def get_recall_parameters(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(..., description="The queue id to retrieve parameters for"),
) -> dict[str, Any]:
"""
diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py
index 403e7727cb..41a5a411c7 100644
--- a/invokeai/app/api/routers/session_queue.py
+++ b/invokeai/app/api/routers/session_queue.py
@@ -44,7 +44,8 @@ def sanitize_queue_item_for_user(
"""Sanitize queue item for non-admin users viewing other users' items.
For non-admin users viewing queue items belonging to other users,
- the field_values, session graph, and workflow should be hidden/cleared to protect privacy.
+ only timestamps, status, and error information are exposed. All other
+ fields (user identity, generation parameters, graphs, workflows) are stripped.
Args:
queue_item: The queue item to sanitize
@@ -58,15 +59,25 @@ def sanitize_queue_item_for_user(
if is_admin or queue_item.user_id == current_user_id:
return queue_item
- # For non-admins viewing other users' items, clear sensitive fields
- # Create a shallow copy to avoid mutating the original
+ # For non-admins viewing other users' items, strip everything except
+ # item_id, queue_id, status, and timestamps
sanitized_item = queue_item.model_copy(deep=False)
+ sanitized_item.user_id = "redacted"
+ sanitized_item.user_display_name = None
+ sanitized_item.user_email = None
+ sanitized_item.batch_id = "redacted"
+ sanitized_item.session_id = "redacted"
+ sanitized_item.origin = None
+ sanitized_item.destination = None
+ sanitized_item.priority = 0
sanitized_item.field_values = None
+ sanitized_item.retried_from_item_id = None
sanitized_item.workflow = None
- # Clear the session graph by replacing it with an empty graph execution state
- # This prevents information leakage through the generation graph
+ sanitized_item.error_type = None
+ sanitized_item.error_message = None
+ sanitized_item.error_traceback = None
sanitized_item.session = GraphExecutionState(
- id=queue_item.session.id,
+ id="redacted",
graph=Graph(),
)
return sanitized_item
@@ -126,12 +137,16 @@ async def list_all_queue_items(
},
)
async def get_queue_item_ids(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(description="The queue id to perform this operation on"),
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
) -> ItemIdsResult:
- """Gets all queue item ids that match the given parameters"""
+ """Gets all queue item ids that match the given parameters. Non-admin users only see their own items."""
try:
- return ApiDependencies.invoker.services.session_queue.get_queue_item_ids(queue_id=queue_id, order_dir=order_dir)
+ user_id = None if current_user.is_admin else current_user.user_id
+ return ApiDependencies.invoker.services.session_queue.get_queue_item_ids(
+ queue_id=queue_id, order_dir=order_dir, user_id=user_id
+ )
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue item ids: {e}")
@@ -376,11 +391,15 @@ async def prune(
},
)
async def get_current_queue_item(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> Optional[SessionQueueItem]:
"""Gets the currently execution queue item"""
try:
- return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
+ item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
+ if item is not None:
+ item = sanitize_queue_item_for_user(item, current_user.user_id, current_user.is_admin)
+ return item
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting current queue item: {e}")
@@ -393,11 +412,15 @@ async def get_current_queue_item(
},
)
async def get_next_queue_item(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> Optional[SessionQueueItem]:
"""Gets the next queue item, without executing it"""
try:
- return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
+ item = ApiDependencies.invoker.services.session_queue.get_next(queue_id)
+ if item is not None:
+ item = sanitize_queue_item_for_user(item, current_user.user_id, current_user.is_admin)
+ return item
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting next queue item: {e}")
@@ -413,9 +436,10 @@ async def get_queue_status(
current_user: CurrentUserOrDefault,
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> SessionQueueAndProcessorStatus:
- """Gets the status of the session queue"""
+ """Gets the status of the session queue. Non-admin users see only their own counts and cannot see current item details unless they own it."""
try:
- queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id, user_id=current_user.user_id)
+ user_id = None if current_user.is_admin else current_user.user_id
+ queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id, user_id=user_id)
processor = ApiDependencies.invoker.services.session_processor.get_status()
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
except Exception as e:
@@ -430,12 +454,16 @@ async def get_queue_status(
},
)
async def get_batch_status(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(description="The queue id to perform this operation on"),
batch_id: str = Path(description="The batch to get the status of"),
) -> BatchStatus:
- """Gets the status of the session queue"""
+ """Gets the status of a batch. Non-admin users only see their own batches."""
try:
- return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
+ user_id = None if current_user.is_admin else current_user.user_id
+ return ApiDependencies.invoker.services.session_queue.get_batch_status(
+ queue_id=queue_id, batch_id=batch_id, user_id=user_id
+ )
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting batch status: {e}")
@@ -529,13 +557,15 @@ async def cancel_queue_item(
responses={200: {"model": SessionQueueCountsByDestination}},
)
async def counts_by_destination(
+ current_user: CurrentUserOrDefault,
queue_id: str = Path(description="The queue id to query"),
destination: str = Query(description="The destination to query"),
) -> SessionQueueCountsByDestination:
- """Gets the counts of queue items by destination"""
+ """Gets the counts of queue items by destination. Non-admin users only see their own items."""
try:
+ user_id = None if current_user.is_admin else current_user.user_id
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
- queue_id=queue_id, destination=destination
+ queue_id=queue_id, destination=destination, user_id=user_id
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching counts by destination: {e}")
diff --git a/invokeai/app/api/routers/workflows.py b/invokeai/app/api/routers/workflows.py
index 72d50a416b..1c88a77a3f 100644
--- a/invokeai/app/api/routers/workflows.py
+++ b/invokeai/app/api/routers/workflows.py
@@ -6,6 +6,7 @@ from fastapi import APIRouter, Body, File, HTTPException, Path, Query, UploadFil
from fastapi.responses import FileResponse
from PIL import Image
+from invokeai.app.api.auth_dependencies import CurrentUserOrDefault
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
@@ -33,16 +34,25 @@ workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
},
)
async def get_workflow(
+ current_user: CurrentUserOrDefault,
workflow_id: str = Path(description="The workflow to get"),
) -> WorkflowRecordWithThumbnailDTO:
"""Gets a workflow"""
try:
- thumbnail_url = ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow_id)
workflow = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
- return WorkflowRecordWithThumbnailDTO(thumbnail_url=thumbnail_url, **workflow.model_dump())
except WorkflowNotFoundError:
raise HTTPException(status_code=404, detail="Workflow not found")
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser:
+ is_default = workflow.workflow.meta.category is WorkflowCategory.Default
+ is_owner = workflow.user_id == current_user.user_id
+ if not (is_default or is_owner or workflow.is_public or current_user.is_admin):
+ raise HTTPException(status_code=403, detail="Not authorized to access this workflow")
+
+ thumbnail_url = ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow_id)
+ return WorkflowRecordWithThumbnailDTO(thumbnail_url=thumbnail_url, **workflow.model_dump())
+
@workflows_router.patch(
"/i/{workflow_id}",
@@ -52,10 +62,21 @@ async def get_workflow(
},
)
async def update_workflow(
+ current_user: CurrentUserOrDefault,
workflow: Workflow = Body(description="The updated workflow", embed=True),
) -> WorkflowRecordDTO:
"""Updates a workflow"""
- return ApiDependencies.invoker.services.workflow_records.update(workflow=workflow)
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser:
+ try:
+ existing = ApiDependencies.invoker.services.workflow_records.get(workflow.id)
+ except WorkflowNotFoundError:
+ raise HTTPException(status_code=404, detail="Workflow not found")
+ if not current_user.is_admin and existing.user_id != current_user.user_id:
+ raise HTTPException(status_code=403, detail="Not authorized to update this workflow")
+ # Pass user_id for defense-in-depth SQL scoping; admins pass None to allow any.
+ user_id = None if current_user.is_admin else current_user.user_id
+ return ApiDependencies.invoker.services.workflow_records.update(workflow=workflow, user_id=user_id)
@workflows_router.delete(
@@ -63,15 +84,25 @@ async def update_workflow(
operation_id="delete_workflow",
)
async def delete_workflow(
+ current_user: CurrentUserOrDefault,
workflow_id: str = Path(description="The workflow to delete"),
) -> None:
"""Deletes a workflow"""
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser:
+ try:
+ existing = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
+ except WorkflowNotFoundError:
+ raise HTTPException(status_code=404, detail="Workflow not found")
+ if not current_user.is_admin and existing.user_id != current_user.user_id:
+ raise HTTPException(status_code=403, detail="Not authorized to delete this workflow")
try:
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
except WorkflowThumbnailFileNotFoundException:
# It's OK if the workflow has no thumbnail file. We can still delete the workflow.
pass
- ApiDependencies.invoker.services.workflow_records.delete(workflow_id)
+ user_id = None if current_user.is_admin else current_user.user_id
+ ApiDependencies.invoker.services.workflow_records.delete(workflow_id, user_id=user_id)
@workflows_router.post(
@@ -82,10 +113,11 @@ async def delete_workflow(
},
)
async def create_workflow(
+ current_user: CurrentUserOrDefault,
workflow: WorkflowWithoutID = Body(description="The workflow to create", embed=True),
) -> WorkflowRecordDTO:
"""Creates a workflow"""
- return ApiDependencies.invoker.services.workflow_records.create(workflow=workflow)
+ return ApiDependencies.invoker.services.workflow_records.create(workflow=workflow, user_id=current_user.user_id)
@workflows_router.get(
@@ -96,6 +128,7 @@ async def create_workflow(
},
)
async def list_workflows(
+ current_user: CurrentUserOrDefault,
page: int = Query(default=0, description="The page to get"),
per_page: Optional[int] = Query(default=None, description="The number of workflows per page"),
order_by: WorkflowRecordOrderBy = Query(
@@ -106,8 +139,19 @@ async def list_workflows(
tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"),
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
+ is_public: Optional[bool] = Query(default=None, description="Filter by public/shared status"),
) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]:
"""Gets a page of workflows"""
+ config = ApiDependencies.invoker.services.configuration
+
+ # In multiuser mode, scope user-category workflows to the current user unless fetching shared workflows
+ user_id_filter: Optional[str] = None
+ if config.multiuser:
+ # Only filter 'user' category results by user_id when not explicitly listing public workflows
+ has_user_category = not categories or WorkflowCategory.User in categories
+ if has_user_category and is_public is not True:
+ user_id_filter = current_user.user_id
+
workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = []
workflows = ApiDependencies.invoker.services.workflow_records.get_many(
order_by=order_by,
@@ -118,6 +162,8 @@ async def list_workflows(
categories=categories,
tags=tags,
has_been_opened=has_been_opened,
+ user_id=user_id_filter,
+ is_public=is_public,
)
for workflow in workflows.items:
workflows_with_thumbnails.append(
@@ -143,15 +189,20 @@ async def list_workflows(
},
)
async def set_workflow_thumbnail(
+ current_user: CurrentUserOrDefault,
workflow_id: str = Path(description="The workflow to update"),
image: UploadFile = File(description="The image file to upload"),
):
"""Sets a workflow's thumbnail image"""
try:
- ApiDependencies.invoker.services.workflow_records.get(workflow_id)
+ existing = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
except WorkflowNotFoundError:
raise HTTPException(status_code=404, detail="Workflow not found")
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser and not current_user.is_admin and existing.user_id != current_user.user_id:
+ raise HTTPException(status_code=403, detail="Not authorized to update this workflow")
+
if not image.content_type or not image.content_type.startswith("image"):
raise HTTPException(status_code=415, detail="Not an image")
@@ -177,14 +228,19 @@ async def set_workflow_thumbnail(
},
)
async def delete_workflow_thumbnail(
+ current_user: CurrentUserOrDefault,
workflow_id: str = Path(description="The workflow to update"),
):
"""Removes a workflow's thumbnail image"""
try:
- ApiDependencies.invoker.services.workflow_records.get(workflow_id)
+ existing = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
except WorkflowNotFoundError:
raise HTTPException(status_code=404, detail="Workflow not found")
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser and not current_user.is_admin and existing.user_id != current_user.user_id:
+ raise HTTPException(status_code=403, detail="Not authorized to update this workflow")
+
try:
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
except ValueError as e:
@@ -206,8 +262,12 @@ async def delete_workflow_thumbnail(
async def get_workflow_thumbnail(
workflow_id: str = Path(description="The id of the workflow thumbnail to get"),
) -> FileResponse:
- """Gets a workflow's thumbnail image"""
+ """Gets a workflow's thumbnail image.
+ This endpoint is intentionally unauthenticated because browsers load images
+ via
tags which cannot send Bearer tokens. Workflow IDs are UUIDs,
+ providing security through unguessability.
+ """
try:
path = ApiDependencies.invoker.services.workflow_thumbnails.get_path(workflow_id)
@@ -223,37 +283,91 @@ async def get_workflow_thumbnail(
raise HTTPException(status_code=404)
+@workflows_router.patch(
+ "/i/{workflow_id}/is_public",
+ operation_id="update_workflow_is_public",
+ responses={
+ 200: {"model": WorkflowRecordDTO},
+ },
+)
+async def update_workflow_is_public(
+ current_user: CurrentUserOrDefault,
+ workflow_id: str = Path(description="The workflow to update"),
+ is_public: bool = Body(description="Whether the workflow should be shared publicly", embed=True),
+) -> WorkflowRecordDTO:
+ """Updates whether a workflow is shared publicly"""
+ try:
+ existing = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
+ except WorkflowNotFoundError:
+ raise HTTPException(status_code=404, detail="Workflow not found")
+
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser and not current_user.is_admin and existing.user_id != current_user.user_id:
+ raise HTTPException(status_code=403, detail="Not authorized to update this workflow")
+
+ user_id = None if current_user.is_admin else current_user.user_id
+ return ApiDependencies.invoker.services.workflow_records.update_is_public(
+ workflow_id=workflow_id, is_public=is_public, user_id=user_id
+ )
+
+
@workflows_router.get("/tags", operation_id="get_all_tags")
async def get_all_tags(
+ current_user: CurrentUserOrDefault,
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories to include"),
+ is_public: Optional[bool] = Query(default=None, description="Filter by public/shared status"),
) -> list[str]:
"""Gets all unique tags from workflows"""
+ config = ApiDependencies.invoker.services.configuration
+ user_id_filter: Optional[str] = None
+ if config.multiuser:
+ has_user_category = not categories or WorkflowCategory.User in categories
+ if has_user_category and is_public is not True:
+ user_id_filter = current_user.user_id
- return ApiDependencies.invoker.services.workflow_records.get_all_tags(categories=categories)
+ return ApiDependencies.invoker.services.workflow_records.get_all_tags(
+ categories=categories, user_id=user_id_filter, is_public=is_public
+ )
@workflows_router.get("/counts_by_tag", operation_id="get_counts_by_tag")
async def get_counts_by_tag(
+ current_user: CurrentUserOrDefault,
tags: list[str] = Query(description="The tags to get counts for"),
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories to include"),
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
+ is_public: Optional[bool] = Query(default=None, description="Filter by public/shared status"),
) -> dict[str, int]:
"""Counts workflows by tag"""
+ config = ApiDependencies.invoker.services.configuration
+ user_id_filter: Optional[str] = None
+ if config.multiuser:
+ has_user_category = not categories or WorkflowCategory.User in categories
+ if has_user_category and is_public is not True:
+ user_id_filter = current_user.user_id
return ApiDependencies.invoker.services.workflow_records.counts_by_tag(
- tags=tags, categories=categories, has_been_opened=has_been_opened
+ tags=tags, categories=categories, has_been_opened=has_been_opened, user_id=user_id_filter, is_public=is_public
)
@workflows_router.get("/counts_by_category", operation_id="counts_by_category")
async def counts_by_category(
+ current_user: CurrentUserOrDefault,
categories: list[WorkflowCategory] = Query(description="The categories to include"),
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
+ is_public: Optional[bool] = Query(default=None, description="Filter by public/shared status"),
) -> dict[str, int]:
"""Counts workflows by category"""
+ config = ApiDependencies.invoker.services.configuration
+ user_id_filter: Optional[str] = None
+ if config.multiuser:
+ has_user_category = WorkflowCategory.User in categories
+ if has_user_category and is_public is not True:
+ user_id_filter = current_user.user_id
return ApiDependencies.invoker.services.workflow_records.counts_by_category(
- categories=categories, has_been_opened=has_been_opened
+ categories=categories, has_been_opened=has_been_opened, user_id=user_id_filter, is_public=is_public
)
@@ -262,7 +376,18 @@ async def counts_by_category(
operation_id="update_opened_at",
)
async def update_opened_at(
+ current_user: CurrentUserOrDefault,
workflow_id: str = Path(description="The workflow to update"),
) -> None:
"""Updates the opened_at field of a workflow"""
- ApiDependencies.invoker.services.workflow_records.update_opened_at(workflow_id)
+ try:
+ existing = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
+ except WorkflowNotFoundError:
+ raise HTTPException(status_code=404, detail="Workflow not found")
+
+ config = ApiDependencies.invoker.services.configuration
+ if config.multiuser and not current_user.is_admin and existing.user_id != current_user.user_id:
+ raise HTTPException(status_code=403, detail="Not authorized to update this workflow")
+
+ user_id = None if current_user.is_admin else current_user.user_id
+ ApiDependencies.invoker.services.workflow_records.update_opened_at(workflow_id, user_id=user_id)
diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py
index fcead54eb1..5783b804c0 100644
--- a/invokeai/app/api/sockets.py
+++ b/invokeai/app/api/sockets.py
@@ -121,6 +121,11 @@ class SocketIO:
Returns True to accept the connection, False to reject it.
Stores user_id in the internal socket users dict for later use.
+
+ In multiuser mode, connections without a valid token are rejected outright
+ so that anonymous clients cannot subscribe to queue rooms and observe
+ queue activity belonging to other users. In single-user mode, unauthenticated
+ connections are accepted as the system admin user.
"""
# Extract token from auth data or headers
token = None
@@ -137,6 +142,23 @@ class SocketIO:
if token:
token_data = verify_token(token)
if token_data:
+ # In multiuser mode, also verify the backing user record still
+ # exists and is active — mirrors the REST auth check in
+ # auth_dependencies.py. A deleted or deactivated user whose
+ # JWT has not yet expired must not be allowed to open a socket.
+ if self._is_multiuser_enabled():
+ try:
+ from invokeai.app.api.dependencies import ApiDependencies
+
+ user = ApiDependencies.invoker.services.users.get(token_data.user_id)
+ if user is None or not user.is_active:
+ logger.warning(f"Rejecting socket {sid}: user {token_data.user_id} not found or inactive")
+ return False
+ except Exception:
+ # If user service is unavailable, fail closed
+ logger.warning(f"Rejecting socket {sid}: unable to verify user record")
+ return False
+
# Store user_id and is_admin in socket users dict
self._socket_users[sid] = {
"user_id": token_data.user_id,
@@ -147,14 +169,37 @@ class SocketIO:
)
return True
- # If no valid token, store system user for backward compatibility
+ # No valid token provided. In multiuser mode this is not allowed — reject
+ # the connection so anonymous clients cannot subscribe to queue rooms.
+ # In single-user mode, fall through and accept the socket as system admin.
+ if self._is_multiuser_enabled():
+ logger.warning(
+ f"Rejecting socket {sid} connection: multiuser mode is enabled and no valid auth token was provided"
+ )
+ return False
+
self._socket_users[sid] = {
"user_id": "system",
- "is_admin": False,
+ "is_admin": True,
}
- logger.debug(f"Socket {sid} connected as system user (no valid token)")
+ logger.debug(f"Socket {sid} connected as system admin (single-user mode)")
return True
+ @staticmethod
+ def _is_multiuser_enabled() -> bool:
+ """Check whether multiuser mode is enabled. Fails closed if configuration
+ is not yet initialized, which should not happen in practice but prevents
+ accidentally opening the socket during startup races."""
+ try:
+ # Imported here to avoid a circular import at module load time.
+ from invokeai.app.api.dependencies import ApiDependencies
+
+ return bool(ApiDependencies.invoker.services.configuration.multiuser)
+ except Exception:
+ # If dependencies are not initialized, fail closed (treat as multiuser)
+ # so we never accidentally admit an anonymous socket.
+ return True
+
async def _handle_disconnect(self, sid: str) -> None:
"""Handle socket disconnection and cleanup user info."""
if sid in self._socket_users:
@@ -165,15 +210,20 @@ class SocketIO:
"""Handle queue subscription and add socket to both queue and user-specific rooms."""
queue_id = QueueSubscriptionEvent(**data).queue_id
- # Check if we have user info for this socket
+ # Check if we have user info for this socket. In multiuser mode _handle_connect
+ # will have already rejected any socket without a valid token, so missing user
+ # info here is a bug — refuse the subscription rather than silently falling back
+ # to an anonymous system user who could then receive queue item events.
if sid not in self._socket_users:
- logger.warning(
- f"Socket {sid} subscribing to queue {queue_id} but has no user info - need to authenticate via connect event"
- )
- # Store as system user temporarily - real auth should happen in connect
+ if self._is_multiuser_enabled():
+ logger.warning(
+ f"Refusing queue subscription for socket {sid}: no user info (socket not authenticated via connect event)"
+ )
+ return
+ # Single-user mode: safe to fall back to the system admin user.
self._socket_users[sid] = {
"user_id": "system",
- "is_admin": False,
+ "is_admin": True,
}
user_id = self._socket_users[sid]["user_id"]
@@ -198,6 +248,13 @@ class SocketIO:
await self._sio.leave_room(sid, QueueSubscriptionEvent(**data).queue_id)
async def _handle_sub_bulk_download(self, sid: str, data: Any) -> None:
+ # In multiuser mode, only allow authenticated sockets to subscribe.
+ # Bulk download events are routed to user-specific rooms, so the
+ # bulk_download_id room subscription is only kept for single-user
+ # backward compatibility.
+ if self._is_multiuser_enabled() and sid not in self._socket_users:
+ logger.warning(f"Refusing bulk download subscription for unknown socket {sid} in multiuser mode")
+ return
await self._sio.enter_room(sid, BulkDownloadSubscriptionEvent(**data).bulk_download_id)
async def _handle_unsub_bulk_download(self, sid: str, data: Any) -> None:
@@ -206,9 +263,17 @@ class SocketIO:
async def _handle_queue_event(self, event: FastAPIEvent[QueueEventBase]):
"""Handle queue events with user isolation.
- Invocation events (progress, started, complete) are private - only emit to owner and admins.
- Queue item status events are public - emit to all users (field values hidden via API).
- Other queue events emit to all subscribers.
+ All queue item events (invocation events AND QueueItemStatusChangedEvent) are
+ private to the owning user and admins. They carry unsanitized user_id, batch_id,
+ session_id, origin, destination and error metadata, and must never be broadcast
+ to the whole queue room — otherwise any other authenticated subscriber could
+ observe cross-user queue activity.
+
+ RecallParametersUpdatedEvent is also private to the owner + admins.
+
+ BatchEnqueuedEvent carries the enqueuing user's batch_id/origin/counts and
+ is also routed privately. QueueClearedEvent is the only queue event that
+ is still broadcast to the whole queue room.
IMPORTANT: Check InvocationEventBase BEFORE QueueItemEventBase since InvocationEventBase
inherits from QueueItemEventBase. The order of isinstance checks matters!
@@ -237,24 +302,40 @@ class SocketIO:
logger.debug(f"Emitted private invocation event {event_name} to user room {user_room} and admin room")
- # Queue item status events are visible to all users (field values masked via API)
- # This catches QueueItemStatusChangedEvent but NOT InvocationEvents (already handled above)
+ # Other queue item events (QueueItemStatusChangedEvent) carry unsanitized
+ # user_id, batch_id, session_id, origin, destination and error metadata.
+ # They are private to the owning user + admins — never broadcast to the
+ # full queue room.
elif isinstance(event_data, QueueItemEventBase) and hasattr(event_data, "user_id"):
- # Emit to all subscribers in the queue
- await self._sio.emit(
- event=event_name, data=event_data.model_dump(mode="json"), room=event_data.queue_id
- )
+ user_room = f"user:{event_data.user_id}"
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room=user_room)
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room="admin")
- logger.info(
- f"Emitted public queue item event {event_name} to all subscribers in queue {event_data.queue_id}"
- )
+ logger.debug(f"Emitted private queue item event {event_name} to user room {user_room} and admin room")
+
+ # RecallParametersUpdatedEvent is private - only emit to owner + admins
+ elif isinstance(event_data, RecallParametersUpdatedEvent):
+ user_room = f"user:{event_data.user_id}"
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room=user_room)
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room="admin")
+ logger.debug(f"Emitted private recall_parameters_updated event to user room {user_room} and admin room")
+
+ # BatchEnqueuedEvent carries the enqueuing user's batch_id, origin, and
+ # enqueued counts. Route it privately to the owner + admins so other
+ # users do not observe cross-user batch activity.
+ elif isinstance(event_data, BatchEnqueuedEvent):
+ user_room = f"user:{event_data.user_id}"
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room=user_room)
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room="admin")
+ logger.debug(f"Emitted private batch_enqueued event to user room {user_room} and admin room")
else:
- # For other queue events (like QueueClearedEvent, BatchEnqueuedEvent), emit to all subscribers
+ # For remaining queue events (e.g. QueueClearedEvent) that do not
+ # carry user identity, emit to all subscribers in the queue room.
await self._sio.emit(
event=event_name, data=event_data.model_dump(mode="json"), room=event_data.queue_id
)
- logger.info(
+ logger.debug(
f"Emitted general queue event {event_name} to all subscribers in queue {event_data.queue_id}"
)
except Exception as e:
@@ -265,4 +346,17 @@ class SocketIO:
await self._sio.emit(event=event[0], data=event[1].model_dump(mode="json"))
async def _handle_bulk_image_download_event(self, event: FastAPIEvent[BulkDownloadEventBase]) -> None:
- await self._sio.emit(event=event[0], data=event[1].model_dump(mode="json"), room=event[1].bulk_download_id)
+ event_name, event_data = event
+ # Route to user-specific + admin rooms so that other authenticated
+ # users cannot learn the bulk_download_item_name (the capability token
+ # needed to fetch the zip from the unauthenticated GET endpoint).
+ # In single-user mode (user_id="system"), fall back to the shared
+ # bulk_download_id room for backward compatibility.
+ if hasattr(event_data, "user_id") and event_data.user_id != "system":
+ user_room = f"user:{event_data.user_id}"
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room=user_room)
+ await self._sio.emit(event=event_name, data=event_data.model_dump(mode="json"), room="admin")
+ else:
+ await self._sio.emit(
+ event=event_name, data=event_data.model_dump(mode="json"), room=event_data.bulk_download_id
+ )
diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py
index 71b99d6687..fbe0e9a615 100644
--- a/invokeai/app/invocations/fields.py
+++ b/invokeai/app/invocations/fields.py
@@ -171,6 +171,8 @@ class FieldDescriptions:
sd3_model = "SD3 model (MMDiTX) to load"
cogview4_model = "CogView4 model (Transformer) to load"
z_image_model = "Z-Image model (Transformer) to load"
+ qwen_image_model = "Qwen Image Edit model (Transformer) to load"
+ qwen_vl_encoder = "Qwen2.5-VL tokenizer, processor and text/vision encoder"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
@@ -340,6 +342,12 @@ class ZImageConditioningField(BaseModel):
)
+class QwenImageConditioningField(BaseModel):
+ """A Qwen Image Edit conditioning tensor primitive value"""
+
+ conditioning_name: str = Field(description="The name of conditioning tensor")
+
+
class AnimaConditioningField(BaseModel):
"""An Anima conditioning tensor primitive value.
diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py
index 29e8b3d69b..da24d8802b 100644
--- a/invokeai/app/invocations/metadata.py
+++ b/invokeai/app/invocations/metadata.py
@@ -166,6 +166,10 @@ GENERATION_MODES = Literal[
"z_image_img2img",
"z_image_inpaint",
"z_image_outpaint",
+ "qwen_image_txt2img",
+ "qwen_image_img2img",
+ "qwen_image_inpaint",
+ "qwen_image_outpaint",
"anima_txt2img",
"anima_img2img",
"anima_inpaint",
diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py
index 29fbe5100c..6b5afb5529 100644
--- a/invokeai/app/invocations/model.py
+++ b/invokeai/app/invocations/model.py
@@ -72,6 +72,13 @@ class GlmEncoderField(BaseModel):
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
+class QwenVLEncoderField(BaseModel):
+ """Field for Qwen2.5-VL encoder used by Qwen Image Edit models."""
+
+ tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
+ text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
+
+
class Qwen3EncoderField(BaseModel):
"""Field for Qwen3 text encoder used by Z-Image models."""
diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py
index 2f404d16ba..7ec6c3dc14 100644
--- a/invokeai/app/invocations/primitives.py
+++ b/invokeai/app/invocations/primitives.py
@@ -25,6 +25,7 @@ from invokeai.app.invocations.fields import (
InputField,
LatentsField,
OutputField,
+ QwenImageConditioningField,
SD3ConditioningField,
TensorField,
UIComponent,
@@ -474,6 +475,17 @@ class ZImageConditioningOutput(BaseInvocationOutput):
return cls(conditioning=ZImageConditioningField(conditioning_name=conditioning_name))
+@invocation_output("qwen_image_conditioning_output")
+class QwenImageConditioningOutput(BaseInvocationOutput):
+ """Base class for nodes that output a Qwen Image Edit conditioning tensor."""
+
+ conditioning: QwenImageConditioningField = OutputField(description=FieldDescriptions.cond)
+
+ @classmethod
+ def build(cls, conditioning_name: str) -> "QwenImageConditioningOutput":
+ return cls(conditioning=QwenImageConditioningField(conditioning_name=conditioning_name))
+
+
@invocation_output("anima_conditioning_output")
class AnimaConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output an Anima text conditioning tensor."""
diff --git a/invokeai/app/invocations/qwen_image_denoise.py b/invokeai/app/invocations/qwen_image_denoise.py
new file mode 100644
index 0000000000..04e21a26c3
--- /dev/null
+++ b/invokeai/app/invocations/qwen_image_denoise.py
@@ -0,0 +1,490 @@
+from contextlib import ExitStack
+from typing import Callable, Iterator, Optional, Tuple
+
+import torch
+import torchvision.transforms as tv_transforms
+from diffusers.models.transformers.transformer_qwenimage import QwenImageTransformer2DModel
+from torchvision.transforms.functional import resize as tv_resize
+from tqdm import tqdm
+
+from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
+from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
+from invokeai.app.invocations.fields import (
+ DenoiseMaskField,
+ FieldDescriptions,
+ Input,
+ InputField,
+ LatentsField,
+ QwenImageConditioningField,
+ WithBoard,
+ WithMetadata,
+)
+from invokeai.app.invocations.model import TransformerField
+from invokeai.app.invocations.primitives import LatentsOutput
+from invokeai.app.services.shared.invocation_context import InvocationContext
+from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat
+from invokeai.backend.patches.layer_patcher import LayerPatcher
+from invokeai.backend.patches.lora_conversions.qwen_image_lora_constants import (
+ QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX,
+)
+from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
+from invokeai.backend.rectified_flow.rectified_flow_inpaint_extension import RectifiedFlowInpaintExtension
+from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
+from invokeai.backend.stable_diffusion.diffusion.conditioning_data import QwenImageConditioningInfo
+from invokeai.backend.util.devices import TorchDevice
+
+
+@invocation(
+ "qwen_image_denoise",
+ title="Denoise - Qwen Image",
+ tags=["image", "qwen_image"],
+ category="image",
+ version="1.0.0",
+ classification=Classification.Prototype,
+)
+class QwenImageDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
+ """Run the denoising process with a Qwen Image model."""
+
+ # If latents is provided, this means we are doing image-to-image.
+ latents: Optional[LatentsField] = InputField(
+ default=None, description=FieldDescriptions.latents, input=Input.Connection
+ )
+ # Reference image latents (encoded through VAE) to concatenate with noisy latents.
+ reference_latents: Optional[LatentsField] = InputField(
+ default=None,
+ description="Reference image latents to guide generation. Encoded through the VAE.",
+ input=Input.Connection,
+ )
+ # denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
+ denoise_mask: Optional[DenoiseMaskField] = InputField(
+ default=None, description=FieldDescriptions.denoise_mask, input=Input.Connection
+ )
+ denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
+ denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
+ transformer: TransformerField = InputField(
+ description=FieldDescriptions.qwen_image_model, input=Input.Connection, title="Transformer"
+ )
+ positive_conditioning: QwenImageConditioningField = InputField(
+ description=FieldDescriptions.positive_cond, input=Input.Connection
+ )
+ negative_conditioning: Optional[QwenImageConditioningField] = InputField(
+ default=None, description=FieldDescriptions.negative_cond, input=Input.Connection
+ )
+ cfg_scale: float | list[float] = InputField(default=4.0, description=FieldDescriptions.cfg_scale, title="CFG Scale")
+ width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
+ height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
+ steps: int = InputField(default=40, gt=0, description=FieldDescriptions.steps)
+ seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
+ shift: Optional[float] = InputField(
+ default=None,
+ description="Override the sigma schedule shift. "
+ "When set, uses a fixed shift (e.g. 3.0 for Lightning LoRAs) instead of the default dynamic shifting. "
+ "Leave unset for the base model's default schedule.",
+ )
+
+ @torch.no_grad()
+ def invoke(self, context: InvocationContext) -> LatentsOutput:
+ latents = self._run_diffusion(context)
+ latents = latents.detach().to("cpu")
+
+ name = context.tensors.save(tensor=latents)
+ return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
+
+ def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
+ if self.denoise_mask is None:
+ return None
+ mask = context.tensors.load(self.denoise_mask.mask_name)
+ mask = 1.0 - mask
+
+ _, _, latent_height, latent_width = latents.shape
+ mask = tv_resize(
+ img=mask,
+ size=[latent_height, latent_width],
+ interpolation=tv_transforms.InterpolationMode.BILINEAR,
+ antialias=False,
+ )
+
+ mask = mask.to(device=latents.device, dtype=latents.dtype)
+ return mask
+
+ def _load_text_conditioning(
+ self,
+ context: InvocationContext,
+ conditioning_name: str,
+ dtype: torch.dtype,
+ device: torch.device,
+ ) -> tuple[torch.Tensor, torch.Tensor | None]:
+ cond_data = context.conditioning.load(conditioning_name)
+ assert len(cond_data.conditionings) == 1
+ conditioning = cond_data.conditionings[0]
+ assert isinstance(conditioning, QwenImageConditioningInfo)
+ conditioning = conditioning.to(dtype=dtype, device=device)
+ return conditioning.prompt_embeds, conditioning.prompt_embeds_mask
+
+ def _get_noise(
+ self,
+ batch_size: int,
+ num_channels_latents: int,
+ height: int,
+ width: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ seed: int,
+ ) -> torch.Tensor:
+ rand_device = "cpu"
+ rand_dtype = torch.float32
+
+ return torch.randn(
+ batch_size,
+ num_channels_latents,
+ int(height) // LATENT_SCALE_FACTOR,
+ int(width) // LATENT_SCALE_FACTOR,
+ device=rand_device,
+ dtype=rand_dtype,
+ generator=torch.Generator(device=rand_device).manual_seed(seed),
+ ).to(device=device, dtype=dtype)
+
+ def _prepare_cfg_scale(self, num_timesteps: int) -> list[float]:
+ if isinstance(self.cfg_scale, float):
+ cfg_scale = [self.cfg_scale] * num_timesteps
+ elif isinstance(self.cfg_scale, list):
+ assert len(self.cfg_scale) == num_timesteps
+ cfg_scale = self.cfg_scale
+ else:
+ raise ValueError(f"Invalid CFG scale type: {type(self.cfg_scale)}")
+ return cfg_scale
+
+ @staticmethod
+ def _pack_latents(
+ latents: torch.Tensor, batch_size: int, num_channels: int, height: int, width: int
+ ) -> torch.Tensor:
+ """Pack 4D latents (B, C, H, W) into 2x2-patched 3D (B, H/2*W/2, C*4)."""
+ latents = latents.view(batch_size, num_channels, height // 2, 2, width // 2, 2)
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels * 4)
+ return latents
+
+ @staticmethod
+ def _unpack_latents(latents: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """Unpack 3D patched latents (B, seq, C*4) back to 4D (B, C, H, W)."""
+ batch_size, _num_patches, channels = latents.shape
+ # height/width are in latent space; they must be divisible by 2 for packing
+ h = 2 * (height // 2)
+ w = 2 * (width // 2)
+ latents = latents.view(batch_size, h // 2, w // 2, channels // 4, 2, 2)
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
+ latents = latents.reshape(batch_size, channels // 4, h, w)
+ return latents
+
+ def _run_diffusion(self, context: InvocationContext):
+ inference_dtype = torch.bfloat16
+ device = TorchDevice.choose_torch_device()
+
+ transformer_info = context.models.load(self.transformer.transformer)
+ assert isinstance(transformer_info.model, QwenImageTransformer2DModel)
+
+ # Load conditioning
+ pos_prompt_embeds, pos_prompt_mask = self._load_text_conditioning(
+ context=context,
+ conditioning_name=self.positive_conditioning.conditioning_name,
+ dtype=inference_dtype,
+ device=device,
+ )
+
+ neg_prompt_embeds = None
+ neg_prompt_mask = None
+ # Match the diffusers pipeline: only enable CFG when cfg_scale > 1 AND negative conditioning is provided.
+ # With cfg_scale <= 1, the negative prediction is unused, so skip it entirely.
+ # For per-step arrays, enable CFG if any step has scale > 1.
+ if isinstance(self.cfg_scale, list):
+ any_cfg_above_one = any(v > 1.0 for v in self.cfg_scale)
+ else:
+ any_cfg_above_one = self.cfg_scale > 1.0
+ do_classifier_free_guidance = self.negative_conditioning is not None and any_cfg_above_one
+ if do_classifier_free_guidance:
+ neg_prompt_embeds, neg_prompt_mask = self._load_text_conditioning(
+ context=context,
+ conditioning_name=self.negative_conditioning.conditioning_name,
+ dtype=inference_dtype,
+ device=device,
+ )
+
+ # Prepare the timestep / sigma schedule
+ patch_size = transformer_info.model.config.patch_size
+ assert isinstance(patch_size, int)
+ # Output channels is 16 (the actual latent channels)
+ out_channels = transformer_info.model.config.out_channels
+ assert isinstance(out_channels, int)
+
+ latent_height = self.height // LATENT_SCALE_FACTOR
+ latent_width = self.width // LATENT_SCALE_FACTOR
+ image_seq_len = (latent_height * latent_width) // (patch_size**2)
+
+ # Use the actual FlowMatchEulerDiscreteScheduler to compute sigmas/timesteps,
+ # exactly matching the diffusers pipeline.
+ import math
+
+ import numpy as np
+ from diffusers.schedulers.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
+
+ # Try to load the scheduler config from the model's directory (Diffusers models
+ # have a scheduler/ subdir). For GGUF models this path doesn't exist, so fall
+ # back to instantiating the scheduler with the known Qwen Image defaults.
+ model_path = context.models.get_absolute_path(context.models.get_config(self.transformer.transformer))
+ scheduler_path = model_path / "scheduler"
+ if scheduler_path.is_dir() and (scheduler_path / "scheduler_config.json").exists():
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(str(scheduler_path), local_files_only=True)
+ else:
+ scheduler = FlowMatchEulerDiscreteScheduler(
+ use_dynamic_shifting=True,
+ base_shift=0.5,
+ max_shift=0.9,
+ base_image_seq_len=256,
+ max_image_seq_len=8192,
+ shift_terminal=0.02,
+ num_train_timesteps=1000,
+ time_shift_type="exponential",
+ )
+
+ if self.shift is not None:
+ # Lightning LoRA: fixed shift
+ mu = math.log(self.shift)
+ else:
+ # Default dynamic shifting
+ # Linear interpolation matching diffusers' calculate_shift
+ base_shift = scheduler.config.get("base_shift", 0.5)
+ max_shift = scheduler.config.get("max_shift", 0.9)
+ base_seq = scheduler.config.get("base_image_seq_len", 256)
+ max_seq = scheduler.config.get("max_image_seq_len", 4096)
+ m = (max_shift - base_shift) / (max_seq - base_seq)
+ b = base_shift - m * base_seq
+ mu = image_seq_len * m + b
+
+ init_sigmas = np.linspace(1.0, 1.0 / self.steps, self.steps).tolist()
+ scheduler.set_timesteps(sigmas=init_sigmas, mu=mu, device=device)
+
+ # Clip the schedule based on denoising_start/denoising_end to support img2img strength.
+ # The scheduler's sigmas go from high (noisy) to 0 (clean). We clip to the fractional range.
+ sigmas_sched = scheduler.sigmas # (N+1,) including terminal 0
+ if self.denoising_start > 0 or self.denoising_end < 1:
+ total_sigmas = len(sigmas_sched) - 1 # exclude terminal
+ start_idx = int(round(self.denoising_start * total_sigmas))
+ end_idx = int(round(self.denoising_end * total_sigmas))
+ sigmas_sched = sigmas_sched[start_idx : end_idx + 1] # +1 to include the next sigma for dt
+ # Rebuild timesteps from clipped sigmas (exclude terminal 0)
+ timesteps_sched = sigmas_sched[:-1] * scheduler.config.num_train_timesteps
+ else:
+ timesteps_sched = scheduler.timesteps
+
+ total_steps = len(timesteps_sched)
+
+ cfg_scale = self._prepare_cfg_scale(total_steps)
+
+ # Load initial latents if provided (for img2img)
+ init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
+ if init_latents is not None:
+ init_latents = init_latents.to(device=device, dtype=inference_dtype)
+ if init_latents.dim() == 5:
+ init_latents = init_latents.squeeze(2)
+
+ # Load reference image latents if provided
+ ref_latents = None
+ if self.reference_latents is not None:
+ ref_latents = context.tensors.load(self.reference_latents.latents_name)
+ ref_latents = ref_latents.to(device=device, dtype=inference_dtype)
+ # The VAE encoder produces 5D latents (B, C, 1, H, W); squeeze the frame dim
+ # so we have 4D (B, C, H, W) for packing.
+ if ref_latents.dim() == 5:
+ ref_latents = ref_latents.squeeze(2)
+
+ # Generate noise (16 channels - the output latent channels)
+ noise = self._get_noise(
+ batch_size=1,
+ num_channels_latents=out_channels,
+ height=self.height,
+ width=self.width,
+ dtype=inference_dtype,
+ device=device,
+ seed=self.seed,
+ )
+
+ # Prepare input latent image
+ if init_latents is not None:
+ s_0 = sigmas_sched[0].item()
+ latents = s_0 * noise + (1.0 - s_0) * init_latents
+ else:
+ if self.denoising_start > 1e-5:
+ raise ValueError("denoising_start should be 0 when initial latents are not provided.")
+ latents = noise
+
+ if total_steps <= 0:
+ return latents
+
+ # Pack latents into 2x2 patches: (B, C, H, W) -> (B, H/2*W/2, C*4)
+ latents = self._pack_latents(latents, 1, out_channels, latent_height, latent_width)
+
+ # Determine whether the model uses reference latent conditioning (zero_cond_t).
+ # Edit models (zero_cond_t=True) expect [noisy_patches ; ref_patches] in the sequence.
+ # Txt2img models (zero_cond_t=False) only take noisy patches.
+ has_zero_cond_t = getattr(transformer_info.model, "zero_cond_t", False) or getattr(
+ transformer_info.model.config, "zero_cond_t", False
+ )
+ use_ref_latents = has_zero_cond_t
+
+ ref_latents_packed = None
+ if use_ref_latents:
+ if ref_latents is not None:
+ _, ref_ch, rh, rw = ref_latents.shape
+ if rh != latent_height or rw != latent_width:
+ ref_latents = torch.nn.functional.interpolate(
+ ref_latents, size=(latent_height, latent_width), mode="bilinear"
+ )
+ else:
+ # No reference image provided — use zeros so the model still gets the
+ # expected sequence layout.
+ ref_latents = torch.zeros(
+ 1, out_channels, latent_height, latent_width, device=device, dtype=inference_dtype
+ )
+ ref_latents_packed = self._pack_latents(ref_latents, 1, out_channels, latent_height, latent_width)
+
+ # img_shapes tells the transformer the spatial layout of patches.
+ if use_ref_latents:
+ img_shapes = [
+ [
+ (1, latent_height // 2, latent_width // 2),
+ (1, latent_height // 2, latent_width // 2),
+ ]
+ ]
+ else:
+ img_shapes = [
+ [
+ (1, latent_height // 2, latent_width // 2),
+ ]
+ ]
+
+ # Prepare inpaint extension (operates in 4D space, so unpack/repack around it)
+ inpaint_mask = self._prep_inpaint_mask(context, noise) # noise has the right 4D shape
+ inpaint_extension: RectifiedFlowInpaintExtension | None = None
+ if inpaint_mask is not None:
+ assert init_latents is not None
+ inpaint_extension = RectifiedFlowInpaintExtension(
+ init_latents=init_latents,
+ inpaint_mask=inpaint_mask,
+ noise=noise,
+ )
+
+ step_callback = self._build_step_callback(context)
+
+ step_callback(
+ PipelineIntermediateState(
+ step=0,
+ order=1,
+ total_steps=total_steps,
+ timestep=int(timesteps_sched[0].item()) if len(timesteps_sched) > 0 else 0,
+ latents=self._unpack_latents(latents, latent_height, latent_width),
+ ),
+ )
+
+ noisy_seq_len = latents.shape[1]
+
+ # Determine if the model is quantized — GGUF models need sidecar patching for LoRAs
+ transformer_config = context.models.get_config(self.transformer.transformer)
+ model_is_quantized = transformer_config.format in (ModelFormat.GGUFQuantized,)
+
+ with ExitStack() as exit_stack:
+ (cached_weights, transformer) = exit_stack.enter_context(transformer_info.model_on_device())
+ assert isinstance(transformer, QwenImageTransformer2DModel)
+
+ # Apply LoRA patches to the transformer
+ exit_stack.enter_context(
+ LayerPatcher.apply_smart_model_patches(
+ model=transformer,
+ patches=self._lora_iterator(context),
+ prefix=QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX,
+ dtype=inference_dtype,
+ cached_weights=cached_weights,
+ force_sidecar_patching=model_is_quantized,
+ )
+ )
+
+ for step_idx, t in enumerate(tqdm(timesteps_sched)):
+ # The pipeline passes timestep / 1000 to the transformer
+ timestep = t.expand(latents.shape[0]).to(inference_dtype)
+
+ # For edit models: concatenate noisy and reference patches along the sequence dim
+ # For txt2img models: just use noisy patches
+ if ref_latents_packed is not None:
+ model_input = torch.cat([latents, ref_latents_packed], dim=1)
+ else:
+ model_input = latents
+
+ noise_pred_cond = transformer(
+ hidden_states=model_input,
+ encoder_hidden_states=pos_prompt_embeds,
+ encoder_hidden_states_mask=pos_prompt_mask,
+ timestep=timestep / 1000,
+ img_shapes=img_shapes,
+ return_dict=False,
+ )[0]
+ # Only keep the noisy-latent portion of the output
+ noise_pred_cond = noise_pred_cond[:, :noisy_seq_len]
+
+ if do_classifier_free_guidance and neg_prompt_embeds is not None:
+ noise_pred_uncond = transformer(
+ hidden_states=model_input,
+ encoder_hidden_states=neg_prompt_embeds,
+ encoder_hidden_states_mask=neg_prompt_mask,
+ timestep=timestep / 1000,
+ img_shapes=img_shapes,
+ return_dict=False,
+ )[0]
+ noise_pred_uncond = noise_pred_uncond[:, :noisy_seq_len]
+
+ noise_pred = noise_pred_uncond + cfg_scale[step_idx] * (noise_pred_cond - noise_pred_uncond)
+ else:
+ noise_pred = noise_pred_cond
+
+ # Euler step using the (possibly clipped) sigma schedule
+ sigma_curr = sigmas_sched[step_idx]
+ sigma_next = sigmas_sched[step_idx + 1]
+ dt = sigma_next - sigma_curr
+ latents = latents.to(torch.float32) + dt * noise_pred.to(torch.float32)
+ latents = latents.to(inference_dtype)
+
+ if inpaint_extension is not None:
+ sigma_next = sigmas_sched[step_idx + 1].item()
+ latents_4d = self._unpack_latents(latents, latent_height, latent_width)
+ latents_4d = inpaint_extension.merge_intermediate_latents_with_init_latents(latents_4d, sigma_next)
+ latents = self._pack_latents(latents_4d, 1, out_channels, latent_height, latent_width)
+
+ step_callback(
+ PipelineIntermediateState(
+ step=step_idx + 1,
+ order=1,
+ total_steps=total_steps,
+ timestep=int(t.item()),
+ latents=self._unpack_latents(latents, latent_height, latent_width),
+ ),
+ )
+
+ # Unpack back to 4D then add frame dim for the video-style VAE: (B, C, 1, H, W)
+ latents = self._unpack_latents(latents, latent_height, latent_width)
+ latents = latents.unsqueeze(2)
+ return latents
+
+ def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
+ def step_callback(state: PipelineIntermediateState) -> None:
+ context.util.sd_step_callback(state, BaseModelType.QwenImage)
+
+ return step_callback
+
+ def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
+ """Iterate over LoRA models to apply to the transformer."""
+ for lora in self.transformer.loras:
+ lora_info = context.models.load(lora.lora)
+ if not isinstance(lora_info.model, ModelPatchRaw):
+ raise TypeError(
+ f"Expected ModelPatchRaw for LoRA '{lora.lora.key}', got {type(lora_info.model).__name__}."
+ )
+ yield (lora_info.model, lora.weight)
+ del lora_info
diff --git a/invokeai/app/invocations/qwen_image_image_to_latents.py b/invokeai/app/invocations/qwen_image_image_to_latents.py
new file mode 100644
index 0000000000..c5fe1b5d5c
--- /dev/null
+++ b/invokeai/app/invocations/qwen_image_image_to_latents.py
@@ -0,0 +1,96 @@
+import einops
+import torch
+from diffusers.models.autoencoders.autoencoder_kl_qwenimage import AutoencoderKLQwenImage
+from PIL import Image as PILImage
+
+from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
+from invokeai.app.invocations.fields import (
+ FieldDescriptions,
+ ImageField,
+ Input,
+ InputField,
+ WithBoard,
+ WithMetadata,
+)
+from invokeai.app.invocations.model import VAEField
+from invokeai.app.invocations.primitives import LatentsOutput
+from invokeai.app.services.shared.invocation_context import InvocationContext
+from invokeai.backend.model_manager.load.load_base import LoadedModel
+from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
+from invokeai.backend.util.devices import TorchDevice
+
+
+@invocation(
+ "qwen_image_i2l",
+ title="Image to Latents - Qwen Image",
+ tags=["image", "latents", "vae", "i2l", "qwen_image"],
+ category="image",
+ version="1.0.0",
+ classification=Classification.Prototype,
+)
+class QwenImageImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
+ """Generates latents from an image using the Qwen Image VAE."""
+
+ image: ImageField = InputField(description="The image to encode.")
+ vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
+ width: int | None = InputField(
+ default=None,
+ description="Resize the image to this width before encoding. If not set, encodes at the image's original size.",
+ )
+ height: int | None = InputField(
+ default=None,
+ description="Resize the image to this height before encoding. If not set, encodes at the image's original size.",
+ )
+
+ @staticmethod
+ def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
+ with vae_info.model_on_device() as (_, vae):
+ assert isinstance(vae, AutoencoderKLQwenImage)
+
+ vae.disable_tiling()
+
+ image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae.dtype)
+ with torch.inference_mode():
+ # The Qwen Image VAE expects 5D input: (B, C, num_frames, H, W)
+ if image_tensor.dim() == 4:
+ image_tensor = image_tensor.unsqueeze(2)
+
+ posterior = vae.encode(image_tensor).latent_dist
+ # Use mode (argmax) for deterministic encoding, matching diffusers
+ latents: torch.Tensor = posterior.mode().to(dtype=vae.dtype)
+
+ # Normalize with per-channel latents_mean / latents_std
+ latents_mean = (
+ torch.tensor(vae.config.latents_mean)
+ .view(1, vae.config.z_dim, 1, 1, 1)
+ .to(latents.device, latents.dtype)
+ )
+ latents_std = (
+ torch.tensor(vae.config.latents_std)
+ .view(1, vae.config.z_dim, 1, 1, 1)
+ .to(latents.device, latents.dtype)
+ )
+ latents = (latents - latents_mean) / latents_std
+
+ return latents
+
+ @torch.no_grad()
+ def invoke(self, context: InvocationContext) -> LatentsOutput:
+ image = context.images.get_pil(self.image.image_name)
+
+ # If target dimensions are specified, resize the image BEFORE encoding
+ # (matching the diffusers pipeline which resizes in pixel space, not latent space).
+ if self.width is not None and self.height is not None:
+ image = image.convert("RGB").resize((self.width, self.height), resample=PILImage.LANCZOS)
+
+ image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
+ if image_tensor.dim() == 3:
+ image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
+
+ vae_info = context.models.load(self.vae.vae)
+
+ latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
+
+ latents = latents.to("cpu")
+ name = context.tensors.save(tensor=latents)
+ return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
diff --git a/invokeai/app/invocations/qwen_image_latents_to_image.py b/invokeai/app/invocations/qwen_image_latents_to_image.py
new file mode 100644
index 0000000000..b3ea39c4bb
--- /dev/null
+++ b/invokeai/app/invocations/qwen_image_latents_to_image.py
@@ -0,0 +1,85 @@
+from contextlib import nullcontext
+
+import torch
+from diffusers.models.autoencoders.autoencoder_kl_qwenimage import AutoencoderKLQwenImage
+from einops import rearrange
+from PIL import Image
+
+from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
+from invokeai.app.invocations.fields import (
+ FieldDescriptions,
+ Input,
+ InputField,
+ LatentsField,
+ WithBoard,
+ WithMetadata,
+)
+from invokeai.app.invocations.model import VAEField
+from invokeai.app.invocations.primitives import ImageOutput
+from invokeai.app.services.shared.invocation_context import InvocationContext
+from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
+from invokeai.backend.util.devices import TorchDevice
+
+
+@invocation(
+ "qwen_image_l2i",
+ title="Latents to Image - Qwen Image",
+ tags=["latents", "image", "vae", "l2i", "qwen_image"],
+ category="latents",
+ version="1.0.0",
+ classification=Classification.Prototype,
+)
+class QwenImageLatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
+ """Generates an image from latents using the Qwen Image VAE."""
+
+ latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
+ vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
+
+ @torch.no_grad()
+ def invoke(self, context: InvocationContext) -> ImageOutput:
+ latents = context.tensors.load(self.latents.latents_name)
+
+ vae_info = context.models.load(self.vae.vae)
+ assert isinstance(vae_info.model, AutoencoderKLQwenImage)
+ with (
+ SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
+ vae_info.model_on_device() as (_, vae),
+ ):
+ context.util.signal_progress("Running VAE")
+ assert isinstance(vae, AutoencoderKLQwenImage)
+ latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=vae.dtype)
+
+ vae.disable_tiling()
+
+ tiling_context = nullcontext()
+
+ TorchDevice.empty_cache()
+
+ with torch.inference_mode(), tiling_context:
+ # The Qwen Image VAE uses per-channel latents_mean / latents_std
+ # instead of a single scaling_factor.
+ # Latents are 5D: (B, C, num_frames, H, W) — the unpack from the
+ # denoise step already produces this shape.
+ latents_mean = (
+ torch.tensor(vae.config.latents_mean)
+ .view(1, vae.config.z_dim, 1, 1, 1)
+ .to(latents.device, latents.dtype)
+ )
+ latents_std = 1.0 / torch.tensor(vae.config.latents_std).view(1, vae.config.z_dim, 1, 1, 1).to(
+ latents.device, latents.dtype
+ )
+ latents = latents / latents_std + latents_mean
+
+ img = vae.decode(latents, return_dict=False)[0]
+ # Drop the temporal frame dimension: (B, C, 1, H, W) -> (B, C, H, W)
+ img = img[:, :, 0]
+
+ img = img.clamp(-1, 1)
+ img = rearrange(img[0], "c h w -> h w c")
+ img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
+
+ TorchDevice.empty_cache()
+
+ image_dto = context.images.save(image=img_pil)
+
+ return ImageOutput.build(image_dto)
diff --git a/invokeai/app/invocations/qwen_image_lora_loader.py b/invokeai/app/invocations/qwen_image_lora_loader.py
new file mode 100644
index 0000000000..f670b2d895
--- /dev/null
+++ b/invokeai/app/invocations/qwen_image_lora_loader.py
@@ -0,0 +1,115 @@
+from typing import Optional
+
+from invokeai.app.invocations.baseinvocation import (
+ BaseInvocation,
+ BaseInvocationOutput,
+ Classification,
+ invocation,
+ invocation_output,
+)
+from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
+from invokeai.app.invocations.model import LoRAField, ModelIdentifierField, TransformerField
+from invokeai.app.services.shared.invocation_context import InvocationContext
+from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
+
+
+@invocation_output("qwen_image_lora_loader_output")
+class QwenImageLoRALoaderOutput(BaseInvocationOutput):
+ """Qwen Image LoRA Loader Output"""
+
+ transformer: Optional[TransformerField] = OutputField(
+ default=None, description=FieldDescriptions.transformer, title="Transformer"
+ )
+
+
+@invocation(
+ "qwen_image_lora_loader",
+ title="Apply LoRA - Qwen Image",
+ tags=["lora", "model", "qwen_image"],
+ category="model",
+ version="1.0.0",
+ classification=Classification.Prototype,
+)
+class QwenImageLoRALoaderInvocation(BaseInvocation):
+ """Apply a LoRA model to a Qwen Image transformer."""
+
+ lora: ModelIdentifierField = InputField(
+ description=FieldDescriptions.lora_model,
+ title="LoRA",
+ ui_model_base=BaseModelType.QwenImage,
+ ui_model_type=ModelType.LoRA,
+ )
+ weight: float = InputField(default=1.0, description=FieldDescriptions.lora_weight)
+ transformer: TransformerField | None = InputField(
+ default=None,
+ description=FieldDescriptions.transformer,
+ input=Input.Connection,
+ title="Transformer",
+ )
+
+ def invoke(self, context: InvocationContext) -> QwenImageLoRALoaderOutput:
+ lora_key = self.lora.key
+
+ if not context.models.exists(lora_key):
+ raise ValueError(f"Unknown lora: {lora_key}!")
+
+ if self.transformer and any(lora.lora.key == lora_key for lora in self.transformer.loras):
+ raise ValueError(f'LoRA "{lora_key}" already applied to transformer.')
+
+ output = QwenImageLoRALoaderOutput()
+
+ if self.transformer is not None:
+ output.transformer = self.transformer.model_copy(deep=True)
+ output.transformer.loras.append(
+ LoRAField(
+ lora=self.lora,
+ weight=self.weight,
+ )
+ )
+
+ return output
+
+
+@invocation(
+ "qwen_image_lora_collection_loader",
+ title="Apply LoRA Collection - Qwen Image",
+ tags=["lora", "model", "qwen_image"],
+ category="model",
+ version="1.0.0",
+ classification=Classification.Prototype,
+)
+class QwenImageLoRACollectionLoader(BaseInvocation):
+ """Applies a collection of LoRAs to a Qwen Image transformer."""
+
+ loras: Optional[LoRAField | list[LoRAField]] = InputField(
+ default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
+ )
+ transformer: Optional[TransformerField] = InputField(
+ default=None,
+ description=FieldDescriptions.transformer,
+ input=Input.Connection,
+ title="Transformer",
+ )
+
+ def invoke(self, context: InvocationContext) -> QwenImageLoRALoaderOutput:
+ output = QwenImageLoRALoaderOutput()
+ loras = self.loras if isinstance(self.loras, list) else [self.loras]
+ added_loras: list[str] = []
+
+ if self.transformer is not None:
+ output.transformer = self.transformer.model_copy(deep=True)
+
+ for lora in loras:
+ if lora is None:
+ continue
+ if lora.lora.key in added_loras:
+ continue
+ if not context.models.exists(lora.lora.key):
+ raise Exception(f"Unknown lora: {lora.lora.key}!")
+
+ added_loras.append(lora.lora.key)
+
+ if self.transformer is not None and output.transformer is not None:
+ output.transformer.loras.append(lora)
+
+ return output
diff --git a/invokeai/app/invocations/qwen_image_model_loader.py b/invokeai/app/invocations/qwen_image_model_loader.py
new file mode 100644
index 0000000000..fd96067f56
--- /dev/null
+++ b/invokeai/app/invocations/qwen_image_model_loader.py
@@ -0,0 +1,107 @@
+from typing import Optional
+
+from invokeai.app.invocations.baseinvocation import (
+ BaseInvocation,
+ BaseInvocationOutput,
+ Classification,
+ invocation,
+ invocation_output,
+)
+from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
+from invokeai.app.invocations.model import (
+ ModelIdentifierField,
+ QwenVLEncoderField,
+ TransformerField,
+ VAEField,
+)
+from invokeai.app.services.shared.invocation_context import InvocationContext
+from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType, SubModelType
+
+
+@invocation_output("qwen_image_model_loader_output")
+class QwenImageModelLoaderOutput(BaseInvocationOutput):
+ """Qwen Image model loader output."""
+
+ transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
+ qwen_vl_encoder: QwenVLEncoderField = OutputField(
+ description=FieldDescriptions.qwen_vl_encoder, title="Qwen VL Encoder"
+ )
+ vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
+
+
+@invocation(
+ "qwen_image_model_loader",
+ title="Main Model - Qwen Image",
+ tags=["model", "qwen_image"],
+ category="model",
+ version="1.1.0",
+ classification=Classification.Prototype,
+)
+class QwenImageModelLoaderInvocation(BaseInvocation):
+ """Loads a Qwen Image model, outputting its submodels.
+
+ The transformer is always loaded from the main model (Diffusers or GGUF).
+
+ For GGUF quantized models, the VAE and Qwen VL encoder must come from a
+ separate Diffusers model specified in the "Component Source" field.
+
+ For Diffusers models, all components are extracted from the main model
+ automatically. The "Component Source" field is ignored.
+ """
+
+ model: ModelIdentifierField = InputField(
+ description=FieldDescriptions.qwen_image_model,
+ input=Input.Direct,
+ ui_model_base=BaseModelType.QwenImage,
+ ui_model_type=ModelType.Main,
+ title="Transformer",
+ )
+
+ component_source: Optional[ModelIdentifierField] = InputField(
+ default=None,
+ description="Diffusers Qwen Image model to extract the VAE and Qwen VL encoder from. "
+ "Required when using a GGUF quantized transformer. "
+ "Ignored when the main model is already in Diffusers format.",
+ input=Input.Direct,
+ ui_model_base=BaseModelType.QwenImage,
+ ui_model_type=ModelType.Main,
+ ui_model_format=ModelFormat.Diffusers,
+ title="Component Source (Diffusers)",
+ )
+
+ def invoke(self, context: InvocationContext) -> QwenImageModelLoaderOutput:
+ main_config = context.models.get_config(self.model)
+ main_is_diffusers = main_config.format == ModelFormat.Diffusers
+
+ # Transformer always comes from the main model
+ transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
+
+ if main_is_diffusers:
+ # Diffusers model: extract all components directly
+ vae = self.model.model_copy(update={"submodel_type": SubModelType.VAE})
+ tokenizer = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
+ text_encoder = self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
+ elif self.component_source is not None:
+ # GGUF/checkpoint transformer: get VAE + encoder from the component source
+ source_config = context.models.get_config(self.component_source)
+ if source_config.format != ModelFormat.Diffusers:
+ raise ValueError(
+ f"The Component Source model must be in Diffusers format. "
+ f"The selected model '{source_config.name}' is in {source_config.format.value} format."
+ )
+ vae = self.component_source.model_copy(update={"submodel_type": SubModelType.VAE})
+ tokenizer = self.component_source.model_copy(update={"submodel_type": SubModelType.Tokenizer})
+ text_encoder = self.component_source.model_copy(update={"submodel_type": SubModelType.TextEncoder})
+ else:
+ raise ValueError(
+ "No source for VAE and Qwen VL encoder. "
+ "GGUF quantized models only contain the transformer — "
+ "please set 'Component Source' to a Diffusers Qwen Image model "
+ "to provide the VAE and text encoder."
+ )
+
+ return QwenImageModelLoaderOutput(
+ transformer=TransformerField(transformer=transformer, loras=[]),
+ qwen_vl_encoder=QwenVLEncoderField(tokenizer=tokenizer, text_encoder=text_encoder),
+ vae=VAEField(vae=vae),
+ )
diff --git a/invokeai/app/invocations/qwen_image_text_encoder.py b/invokeai/app/invocations/qwen_image_text_encoder.py
new file mode 100644
index 0000000000..a067421452
--- /dev/null
+++ b/invokeai/app/invocations/qwen_image_text_encoder.py
@@ -0,0 +1,298 @@
+from typing import Literal
+
+import torch
+from PIL import Image as PILImage
+
+from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
+from invokeai.app.invocations.fields import (
+ FieldDescriptions,
+ ImageField,
+ Input,
+ InputField,
+ UIComponent,
+)
+from invokeai.app.invocations.model import QwenVLEncoderField
+from invokeai.app.invocations.primitives import QwenImageConditioningOutput
+from invokeai.app.services.shared.invocation_context import InvocationContext
+from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
+from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
+ ConditioningFieldData,
+ QwenImageConditioningInfo,
+)
+
+# Prompt templates and drop indices for the two Qwen Image model modes.
+# These are taken directly from the diffusers pipelines.
+
+# Image editing mode (QwenImagePipeline)
+_EDIT_SYSTEM_PROMPT = (
+ "Describe the key features of the input image (color, shape, size, texture, objects, background), "
+ "then explain how the user's text instruction should alter or modify the image. "
+ "Generate a new image that meets the user's requirements while maintaining consistency "
+ "with the original input where appropriate."
+)
+_EDIT_DROP_IDX = 64
+
+# Text-to-image mode (QwenImagePipeline)
+_GENERATE_SYSTEM_PROMPT = (
+ "Describe the image by detailing the color, shape, size, texture, quantity, "
+ "text, spatial relationships of the objects and background:"
+)
+_GENERATE_DROP_IDX = 34
+
+_IMAGE_PLACEHOLDER = "<|vision_start|><|image_pad|><|vision_end|>"
+
+
+def _build_prompt(user_prompt: str, num_images: int) -> str:
+ """Build the full prompt with the appropriate template based on whether reference images are provided."""
+ if num_images > 0:
+ # Edit mode: include vision placeholders for reference images
+ image_tokens = _IMAGE_PLACEHOLDER * num_images
+ return (
+ f"<|im_start|>system\n{_EDIT_SYSTEM_PROMPT}<|im_end|>\n"
+ f"<|im_start|>user\n{image_tokens}{user_prompt}<|im_end|>\n"
+ "<|im_start|>assistant\n"
+ )
+ else:
+ # Generate mode: text-only prompt
+ return (
+ f"<|im_start|>system\n{_GENERATE_SYSTEM_PROMPT}<|im_end|>\n"
+ f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
+ "<|im_start|>assistant\n"
+ )
+
+
+@invocation(
+ "qwen_image_text_encoder",
+ title="Prompt - Qwen Image",
+ tags=["prompt", "conditioning", "qwen_image"],
+ category="conditioning",
+ version="1.2.0",
+ classification=Classification.Prototype,
+)
+class QwenImageTextEncoderInvocation(BaseInvocation):
+ """Encodes text and reference images for Qwen Image using Qwen2.5-VL."""
+
+ prompt: str = InputField(description="Text prompt describing the desired edit.", ui_component=UIComponent.Textarea)
+ reference_images: list[ImageField] = InputField(
+ default=[],
+ description="Reference images to guide the edit. The model can use multiple reference images.",
+ )
+ qwen_vl_encoder: QwenVLEncoderField = InputField(
+ title="Qwen VL Encoder",
+ description=FieldDescriptions.qwen_vl_encoder,
+ input=Input.Connection,
+ )
+ quantization: Literal["none", "int8", "nf4"] = InputField(
+ default="none",
+ description="Quantize the Qwen VL encoder to reduce VRAM usage. "
+ "'nf4' (4-bit) saves the most memory, 'int8' (8-bit) is a middle ground.",
+ )
+
+ @staticmethod
+ def _resize_for_vl_encoder(image: PILImage.Image, target_pixels: int = 512 * 512) -> PILImage.Image:
+ """Resize image to fit within target_pixels while preserving aspect ratio.
+
+ Matches the diffusers pipeline's calculate_dimensions logic: the image is resized
+ so its total pixel count is approximately target_pixels, with dimensions rounded to
+ multiples of 32. This prevents large images from producing too many vision tokens
+ which can overwhelm the text prompt.
+ """
+ w, h = image.size
+ aspect = w / h
+ # Compute dimensions that preserve aspect ratio at ~target_pixels total
+ new_w = int((target_pixels * aspect) ** 0.5)
+ new_h = int(target_pixels / new_w)
+ # Round to multiples of 32
+ new_w = max(32, (new_w // 32) * 32)
+ new_h = max(32, (new_h // 32) * 32)
+ if new_w != w or new_h != h:
+ image = image.resize((new_w, new_h), resample=PILImage.LANCZOS)
+ return image
+
+ @torch.no_grad()
+ def invoke(self, context: InvocationContext) -> QwenImageConditioningOutput:
+ # Load and resize reference images to ~1M pixels (matching diffusers pipeline)
+ pil_images: list[PILImage.Image] = []
+ for img_field in self.reference_images:
+ pil_img = context.images.get_pil(img_field.image_name)
+ pil_img = self._resize_for_vl_encoder(pil_img.convert("RGB"))
+ pil_images.append(pil_img)
+
+ prompt_embeds, prompt_mask = self._encode(context, pil_images)
+ prompt_embeds = prompt_embeds.detach().to("cpu")
+ prompt_mask = prompt_mask.detach().to("cpu") if prompt_mask is not None else None
+
+ conditioning_data = ConditioningFieldData(
+ conditionings=[QwenImageConditioningInfo(prompt_embeds=prompt_embeds, prompt_embeds_mask=prompt_mask)]
+ )
+ conditioning_name = context.conditioning.save(conditioning_data)
+ return QwenImageConditioningOutput.build(conditioning_name)
+
+ def _encode(
+ self, context: InvocationContext, images: list[PILImage.Image]
+ ) -> tuple[torch.Tensor, torch.Tensor | None]:
+ """Encode text prompt and reference images using Qwen2.5-VL.
+
+ Matches the diffusers QwenImagePipeline._get_qwen_prompt_embeds logic:
+ 1. Format prompt with the edit-specific system template
+ 2. Run through Qwen2.5-VL to get hidden states
+ 3. Extract valid (non-padding) tokens and drop the system prefix
+ 4. Return padded embeddings + attention mask
+ """
+ from transformers import AutoTokenizer, Qwen2_5_VLProcessor
+
+ try:
+ from transformers import Qwen2_5_VLImageProcessor as _ImageProcessorCls
+ except ImportError:
+ from transformers.models.qwen2_vl.image_processing_qwen2_vl import ( # type: ignore[no-redef]
+ Qwen2VLImageProcessor as _ImageProcessorCls,
+ )
+
+ try:
+ from transformers import Qwen2_5_VLVideoProcessor as _VideoProcessorCls
+ except ImportError:
+ from transformers.models.qwen2_vl.video_processing_qwen2_vl import ( # type: ignore[no-redef]
+ Qwen2VLVideoProcessor as _VideoProcessorCls,
+ )
+
+ # Format the prompt with one vision placeholder per reference image
+ text = _build_prompt(self.prompt, len(images))
+
+ # Build the processor
+ tokenizer_config = context.models.get_config(self.qwen_vl_encoder.tokenizer)
+ model_root = context.models.get_absolute_path(tokenizer_config)
+ tokenizer_dir = model_root / "tokenizer"
+
+ tokenizer = AutoTokenizer.from_pretrained(str(tokenizer_dir), local_files_only=True)
+
+ image_processor = None
+ for search_dir in [model_root / "processor", tokenizer_dir, model_root, model_root / "image_processor"]:
+ if (search_dir / "preprocessor_config.json").exists():
+ image_processor = _ImageProcessorCls.from_pretrained(str(search_dir), local_files_only=True)
+ break
+ if image_processor is None:
+ image_processor = _ImageProcessorCls()
+
+ processor = Qwen2_5_VLProcessor(
+ tokenizer=tokenizer,
+ image_processor=image_processor,
+ video_processor=_VideoProcessorCls(),
+ )
+
+ context.util.signal_progress("Running Qwen2.5-VL text/vision encoder")
+
+ if self.quantization != "none":
+ text_encoder, device, cleanup = self._load_quantized_encoder(context)
+ else:
+ text_encoder, device, cleanup = self._load_cached_encoder(context)
+
+ try:
+ model_inputs = processor(
+ text=[text],
+ images=images if images else None,
+ padding=True,
+ return_tensors="pt",
+ ).to(device=device)
+
+ outputs = text_encoder(
+ input_ids=model_inputs.input_ids,
+ attention_mask=model_inputs.attention_mask,
+ pixel_values=getattr(model_inputs, "pixel_values", None),
+ image_grid_thw=getattr(model_inputs, "image_grid_thw", None),
+ output_hidden_states=True,
+ )
+
+ # Use last hidden state (matching diffusers pipeline)
+ hidden_states = outputs.hidden_states[-1]
+
+ # Extract valid (non-padding) tokens using the attention mask,
+ # then drop the system prompt prefix tokens.
+ # The drop index differs between edit mode (64) and generate mode (34).
+ drop_idx = _EDIT_DROP_IDX if images else _GENERATE_DROP_IDX
+
+ attn_mask = model_inputs.attention_mask
+ bool_mask = attn_mask.bool()
+ valid_lengths = bool_mask.sum(dim=1)
+ selected = hidden_states[bool_mask]
+ split_hidden = torch.split(selected, valid_lengths.tolist(), dim=0)
+
+ # Drop system prefix tokens and build padded output
+ trimmed = [h[drop_idx:] for h in split_hidden]
+ attn_mask_list = [torch.ones(h.size(0), dtype=torch.long, device=device) for h in trimmed]
+ max_seq_len = max(h.size(0) for h in trimmed)
+
+ prompt_embeds = torch.stack(
+ [torch.cat([h, h.new_zeros(max_seq_len - h.size(0), h.size(1))]) for h in trimmed]
+ )
+ encoder_attention_mask = torch.stack(
+ [torch.cat([m, m.new_zeros(max_seq_len - m.size(0))]) for m in attn_mask_list]
+ )
+
+ prompt_embeds = prompt_embeds.to(dtype=torch.bfloat16)
+ finally:
+ if cleanup is not None:
+ cleanup()
+
+ # If all tokens are valid (no padding), mask is not needed
+ if encoder_attention_mask.all():
+ encoder_attention_mask = None
+
+ return prompt_embeds, encoder_attention_mask
+
+ def _load_cached_encoder(self, context: InvocationContext):
+ """Load the text encoder through the model cache (no quantization)."""
+ from transformers import Qwen2_5_VLForConditionalGeneration
+
+ text_encoder_info = context.models.load(self.qwen_vl_encoder.text_encoder)
+ ctx = text_encoder_info.model_on_device()
+ _, text_encoder = ctx.__enter__()
+ device = get_effective_device(text_encoder)
+ assert isinstance(text_encoder, Qwen2_5_VLForConditionalGeneration)
+ return text_encoder, device, lambda: ctx.__exit__(None, None, None)
+
+ def _load_quantized_encoder(self, context: InvocationContext):
+ """Load the text encoder with BitsAndBytes quantization, bypassing the model cache.
+
+ BnB-quantized models are pinned to GPU and can't be moved between devices,
+ so they can't go through the standard model cache. The model is loaded fresh
+ each time and freed after use via the cleanup callback.
+ """
+ import gc
+ import warnings
+
+ from transformers import BitsAndBytesConfig, Qwen2_5_VLForConditionalGeneration
+
+ encoder_config = context.models.get_config(self.qwen_vl_encoder.text_encoder)
+ model_root = context.models.get_absolute_path(encoder_config)
+ encoder_path = model_root / "text_encoder"
+
+ if self.quantization == "nf4":
+ bnb_config = BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.bfloat16,
+ bnb_4bit_quant_type="nf4",
+ )
+ else: # int8
+ bnb_config = BitsAndBytesConfig(load_in_8bit=True)
+
+ context.util.signal_progress("Loading Qwen2.5-VL encoder (quantized)")
+ with warnings.catch_warnings():
+ # BnB int8 internally casts bfloat16→float16; the warning is harmless
+ warnings.filterwarnings("ignore", message="MatMul8bitLt.*cast.*float16")
+ text_encoder = Qwen2_5_VLForConditionalGeneration.from_pretrained(
+ str(encoder_path),
+ quantization_config=bnb_config,
+ device_map="auto",
+ torch_dtype=torch.bfloat16,
+ local_files_only=True,
+ )
+
+ device = next(text_encoder.parameters()).device
+
+ def cleanup():
+ nonlocal text_encoder
+ del text_encoder
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ return text_encoder, device, cleanup
diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py
index ab6355a393..b263f264cb 100644
--- a/invokeai/app/services/board_records/board_records_common.py
+++ b/invokeai/app/services/board_records/board_records_common.py
@@ -9,6 +9,17 @@ from invokeai.app.util.misc import get_iso_timestamp
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
+class BoardVisibility(str, Enum, metaclass=MetaEnum):
+ """The visibility options for a board."""
+
+ Private = "private"
+ """Only the board owner (and admins) can see and modify this board."""
+ Shared = "shared"
+ """All users can view this board, but only the owner (and admins) can modify it."""
+ Public = "public"
+ """All users can view this board; only the owner (and admins) can modify its structure."""
+
+
class BoardRecord(BaseModelExcludeNull):
"""Deserialized board record."""
@@ -28,6 +39,10 @@ class BoardRecord(BaseModelExcludeNull):
"""The name of the cover image of the board."""
archived: bool = Field(description="Whether or not the board is archived.")
"""Whether or not the board is archived."""
+ board_visibility: BoardVisibility = Field(
+ default=BoardVisibility.Private, description="The visibility of the board."
+ )
+ """The visibility of the board (private, shared, or public)."""
def deserialize_board_record(board_dict: dict) -> BoardRecord:
@@ -44,6 +59,11 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord:
updated_at = board_dict.get("updated_at", get_iso_timestamp())
deleted_at = board_dict.get("deleted_at", get_iso_timestamp())
archived = board_dict.get("archived", False)
+ board_visibility_raw = board_dict.get("board_visibility", BoardVisibility.Private.value)
+ try:
+ board_visibility = BoardVisibility(board_visibility_raw)
+ except ValueError:
+ board_visibility = BoardVisibility.Private
return BoardRecord(
board_id=board_id,
@@ -54,6 +74,7 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord:
updated_at=updated_at,
deleted_at=deleted_at,
archived=archived,
+ board_visibility=board_visibility,
)
@@ -61,6 +82,7 @@ class BoardChanges(BaseModel, extra="forbid"):
board_name: Optional[str] = Field(default=None, description="The board's new name.", max_length=300)
cover_image_name: Optional[str] = Field(default=None, description="The name of the board's new cover image.")
archived: Optional[bool] = Field(default=None, description="Whether or not the board is archived")
+ board_visibility: Optional[BoardVisibility] = Field(default=None, description="The visibility of the board.")
class BoardRecordOrderBy(str, Enum, metaclass=MetaEnum):
diff --git a/invokeai/app/services/board_records/board_records_sqlite.py b/invokeai/app/services/board_records/board_records_sqlite.py
index a54f65686f..1e3e11c8a3 100644
--- a/invokeai/app/services/board_records/board_records_sqlite.py
+++ b/invokeai/app/services/board_records/board_records_sqlite.py
@@ -116,6 +116,17 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
(changes.archived, board_id),
)
+ # Change the visibility of a board
+ if changes.board_visibility is not None:
+ cursor.execute(
+ """--sql
+ UPDATE boards
+ SET board_visibility = ?
+ WHERE board_id = ?;
+ """,
+ (changes.board_visibility.value, board_id),
+ )
+
except sqlite3.Error as e:
raise BoardRecordSaveException from e
return self.get(board_id)
@@ -155,7 +166,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
SELECT DISTINCT boards.*
FROM boards
LEFT JOIN shared_boards ON boards.board_id = shared_boards.board_id
- WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.is_public = 1)
+ WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.board_visibility IN ('shared', 'public'))
{archived_filter}
ORDER BY {order_by} {direction}
LIMIT ? OFFSET ?;
@@ -194,14 +205,14 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
SELECT COUNT(DISTINCT boards.board_id)
FROM boards
LEFT JOIN shared_boards ON boards.board_id = shared_boards.board_id
- WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.is_public = 1);
+ WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.board_visibility IN ('shared', 'public'));
"""
else:
count_query = """
SELECT COUNT(DISTINCT boards.board_id)
FROM boards
LEFT JOIN shared_boards ON boards.board_id = shared_boards.board_id
- WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.is_public = 1)
+ WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.board_visibility IN ('shared', 'public'))
AND boards.archived = 0;
"""
@@ -251,7 +262,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
SELECT DISTINCT boards.*
FROM boards
LEFT JOIN shared_boards ON boards.board_id = shared_boards.board_id
- WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.is_public = 1)
+ WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.board_visibility IN ('shared', 'public'))
{archived_filter}
ORDER BY LOWER(boards.board_name) {direction}
"""
@@ -260,7 +271,7 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
SELECT DISTINCT boards.*
FROM boards
LEFT JOIN shared_boards ON boards.board_id = shared_boards.board_id
- WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.is_public = 1)
+ WHERE (boards.user_id = ? OR shared_boards.user_id = ? OR boards.board_visibility IN ('shared', 'public'))
{archived_filter}
ORDER BY {order_by} {direction}
"""
diff --git a/invokeai/app/services/bulk_download/bulk_download_base.py b/invokeai/app/services/bulk_download/bulk_download_base.py
index 617b611f56..6cd4ed0cba 100644
--- a/invokeai/app/services/bulk_download/bulk_download_base.py
+++ b/invokeai/app/services/bulk_download/bulk_download_base.py
@@ -7,7 +7,11 @@ class BulkDownloadBase(ABC):
@abstractmethod
def handler(
- self, image_names: Optional[list[str]], board_id: Optional[str], bulk_download_item_id: Optional[str]
+ self,
+ image_names: Optional[list[str]],
+ board_id: Optional[str],
+ bulk_download_item_id: Optional[str],
+ user_id: str = "system",
) -> None:
"""
Create a zip file containing the images specified by the given image names or board id.
@@ -15,6 +19,7 @@ class BulkDownloadBase(ABC):
:param image_names: A list of image names to include in the zip file.
:param board_id: The ID of the board. If provided, all images associated with the board will be included in the zip file.
:param bulk_download_item_id: The bulk_download_item_id that will be used to retrieve the bulk download item when it is prepared, if none is provided a uuid will be generated.
+ :param user_id: The ID of the user who initiated the download.
"""
@abstractmethod
@@ -42,3 +47,12 @@ class BulkDownloadBase(ABC):
:param bulk_download_item_name: The name of the bulk download item.
"""
+
+ @abstractmethod
+ def get_owner(self, bulk_download_item_name: str) -> Optional[str]:
+ """
+ Get the user_id of the user who initiated the download.
+
+ :param bulk_download_item_name: The name of the bulk download item.
+ :return: The user_id of the owner, or None if not tracked.
+ """
diff --git a/invokeai/app/services/bulk_download/bulk_download_default.py b/invokeai/app/services/bulk_download/bulk_download_default.py
index dc4f8b1d81..c037e9c5c1 100644
--- a/invokeai/app/services/bulk_download/bulk_download_default.py
+++ b/invokeai/app/services/bulk_download/bulk_download_default.py
@@ -25,15 +25,24 @@ class BulkDownloadService(BulkDownloadBase):
self._temp_directory = TemporaryDirectory()
self._bulk_downloads_folder = Path(self._temp_directory.name) / "bulk_downloads"
self._bulk_downloads_folder.mkdir(parents=True, exist_ok=True)
+ # Track which user owns each download so the fetch endpoint can enforce ownership
+ self._download_owners: dict[str, str] = {}
def handler(
- self, image_names: Optional[list[str]], board_id: Optional[str], bulk_download_item_id: Optional[str]
+ self,
+ image_names: Optional[list[str]],
+ board_id: Optional[str],
+ bulk_download_item_id: Optional[str],
+ user_id: str = "system",
) -> None:
bulk_download_id: str = DEFAULT_BULK_DOWNLOAD_ID
bulk_download_item_id = bulk_download_item_id or uuid_string()
bulk_download_item_name = bulk_download_item_id + ".zip"
- self._signal_job_started(bulk_download_id, bulk_download_item_id, bulk_download_item_name)
+ # Record ownership so the fetch endpoint can verify the caller
+ self._download_owners[bulk_download_item_name] = user_id
+
+ self._signal_job_started(bulk_download_id, bulk_download_item_id, bulk_download_item_name, user_id)
try:
image_dtos: list[ImageDTO] = []
@@ -46,16 +55,16 @@ class BulkDownloadService(BulkDownloadBase):
raise BulkDownloadParametersException()
bulk_download_item_name: str = self._create_zip_file(image_dtos, bulk_download_item_id)
- self._signal_job_completed(bulk_download_id, bulk_download_item_id, bulk_download_item_name)
+ self._signal_job_completed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, user_id)
except (
ImageRecordNotFoundException,
BoardRecordNotFoundException,
BulkDownloadException,
BulkDownloadParametersException,
) as e:
- self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e)
+ self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e, user_id)
except Exception as e:
- self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e)
+ self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e, user_id)
self._invoker.services.logger.error("Problem bulk downloading images.")
raise e
@@ -103,43 +112,60 @@ class BulkDownloadService(BulkDownloadBase):
return "".join([c for c in s if c.isalpha() or c.isdigit() or c == " " or c == "_" or c == "-"]).rstrip()
def _signal_job_started(
- self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
+ self,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ user_id: str = "system",
) -> None:
"""Signal that a bulk download job has started."""
if self._invoker:
assert bulk_download_id is not None
self._invoker.services.events.emit_bulk_download_started(
- bulk_download_id, bulk_download_item_id, bulk_download_item_name
+ bulk_download_id, bulk_download_item_id, bulk_download_item_name, user_id=user_id
)
def _signal_job_completed(
- self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
+ self,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ user_id: str = "system",
) -> None:
"""Signal that a bulk download job has completed."""
if self._invoker:
assert bulk_download_id is not None
assert bulk_download_item_name is not None
self._invoker.services.events.emit_bulk_download_complete(
- bulk_download_id, bulk_download_item_id, bulk_download_item_name
+ bulk_download_id, bulk_download_item_id, bulk_download_item_name, user_id=user_id
)
def _signal_job_failed(
- self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, exception: Exception
+ self,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ exception: Exception,
+ user_id: str = "system",
) -> None:
"""Signal that a bulk download job has failed."""
if self._invoker:
assert bulk_download_id is not None
assert exception is not None
self._invoker.services.events.emit_bulk_download_error(
- bulk_download_id, bulk_download_item_id, bulk_download_item_name, str(exception)
+ bulk_download_id, bulk_download_item_id, bulk_download_item_name, str(exception), user_id=user_id
)
def stop(self, *args, **kwargs):
self._temp_directory.cleanup()
+ def get_owner(self, bulk_download_item_name: str) -> Optional[str]:
+ return self._download_owners.get(bulk_download_item_name)
+
def delete(self, bulk_download_item_name: str) -> None:
path = self.get_path(bulk_download_item_name)
Path(path).unlink()
+ self._download_owners.pop(bulk_download_item_name, None)
def get_path(self, bulk_download_item_name: str) -> str:
path = str(self._bulk_downloads_folder / bulk_download_item_name)
diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py
index aa1cbb5e0e..935b422a73 100644
--- a/invokeai/app/services/events/events_base.py
+++ b/invokeai/app/services/events/events_base.py
@@ -100,9 +100,9 @@ class EventServiceBase:
"""Emitted when a queue item's status changes"""
self.dispatch(QueueItemStatusChangedEvent.build(queue_item, batch_status, queue_status))
- def emit_batch_enqueued(self, enqueue_result: "EnqueueBatchResult") -> None:
+ def emit_batch_enqueued(self, enqueue_result: "EnqueueBatchResult", user_id: str = "system") -> None:
"""Emitted when a batch is enqueued"""
- self.dispatch(BatchEnqueuedEvent.build(enqueue_result))
+ self.dispatch(BatchEnqueuedEvent.build(enqueue_result, user_id))
def emit_queue_items_retried(self, retry_result: "RetryItemsResult") -> None:
"""Emitted when a list of queue items are retried"""
@@ -112,9 +112,9 @@ class EventServiceBase:
"""Emitted when a queue is cleared"""
self.dispatch(QueueClearedEvent.build(queue_id))
- def emit_recall_parameters_updated(self, queue_id: str, parameters: dict) -> None:
+ def emit_recall_parameters_updated(self, queue_id: str, user_id: str, parameters: dict) -> None:
"""Emitted when recall parameters are updated"""
- self.dispatch(RecallParametersUpdatedEvent.build(queue_id, parameters))
+ self.dispatch(RecallParametersUpdatedEvent.build(queue_id, user_id, parameters))
# endregion
@@ -194,23 +194,42 @@ class EventServiceBase:
# region Bulk image download
def emit_bulk_download_started(
- self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
+ self,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ user_id: str = "system",
) -> None:
"""Emitted when a bulk image download is started"""
- self.dispatch(BulkDownloadStartedEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name))
+ self.dispatch(
+ BulkDownloadStartedEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name, user_id)
+ )
def emit_bulk_download_complete(
- self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
+ self,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ user_id: str = "system",
) -> None:
"""Emitted when a bulk image download is complete"""
- self.dispatch(BulkDownloadCompleteEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name))
+ self.dispatch(
+ BulkDownloadCompleteEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name, user_id)
+ )
def emit_bulk_download_error(
- self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, error: str
+ self,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ error: str,
+ user_id: str = "system",
) -> None:
"""Emitted when a bulk image download has an error"""
self.dispatch(
- BulkDownloadErrorEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name, error)
+ BulkDownloadErrorEvent.build(
+ bulk_download_id, bulk_download_item_id, bulk_download_item_name, error, user_id
+ )
)
# endregion
diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py
index bfb44eb48e..998fe4f530 100644
--- a/invokeai/app/services/events/events_common.py
+++ b/invokeai/app/services/events/events_common.py
@@ -281,9 +281,10 @@ class BatchEnqueuedEvent(QueueEventBase):
)
priority: int = Field(description="The priority of the batch")
origin: str | None = Field(default=None, description="The origin of the batch")
+ user_id: str = Field(default="system", description="The ID of the user who enqueued the batch")
@classmethod
- def build(cls, enqueue_result: EnqueueBatchResult) -> "BatchEnqueuedEvent":
+ def build(cls, enqueue_result: EnqueueBatchResult, user_id: str = "system") -> "BatchEnqueuedEvent":
return cls(
queue_id=enqueue_result.queue_id,
batch_id=enqueue_result.batch.batch_id,
@@ -291,6 +292,7 @@ class BatchEnqueuedEvent(QueueEventBase):
enqueued=enqueue_result.enqueued,
requested=enqueue_result.requested,
priority=enqueue_result.priority,
+ user_id=user_id,
)
@@ -609,6 +611,7 @@ class BulkDownloadEventBase(EventBase):
bulk_download_id: str = Field(description="The ID of the bulk image download")
bulk_download_item_id: str = Field(description="The ID of the bulk image download item")
bulk_download_item_name: str = Field(description="The name of the bulk image download item")
+ user_id: str = Field(default="system", description="The ID of the user who initiated the download")
@payload_schema.register
@@ -619,12 +622,17 @@ class BulkDownloadStartedEvent(BulkDownloadEventBase):
@classmethod
def build(
- cls, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
+ cls,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ user_id: str = "system",
) -> "BulkDownloadStartedEvent":
return cls(
bulk_download_id=bulk_download_id,
bulk_download_item_id=bulk_download_item_id,
bulk_download_item_name=bulk_download_item_name,
+ user_id=user_id,
)
@@ -636,12 +644,17 @@ class BulkDownloadCompleteEvent(BulkDownloadEventBase):
@classmethod
def build(
- cls, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
+ cls,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ user_id: str = "system",
) -> "BulkDownloadCompleteEvent":
return cls(
bulk_download_id=bulk_download_id,
bulk_download_item_id=bulk_download_item_id,
bulk_download_item_name=bulk_download_item_name,
+ user_id=user_id,
)
@@ -655,13 +668,19 @@ class BulkDownloadErrorEvent(BulkDownloadEventBase):
@classmethod
def build(
- cls, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, error: str
+ cls,
+ bulk_download_id: str,
+ bulk_download_item_id: str,
+ bulk_download_item_name: str,
+ error: str,
+ user_id: str = "system",
) -> "BulkDownloadErrorEvent":
return cls(
bulk_download_id=bulk_download_id,
bulk_download_item_id=bulk_download_item_id,
bulk_download_item_name=bulk_download_item_name,
error=error,
+ user_id=user_id,
)
@@ -671,8 +690,9 @@ class RecallParametersUpdatedEvent(QueueEventBase):
__event_name__ = "recall_parameters_updated"
+ user_id: str = Field(description="The ID of the user whose recall parameters were updated")
parameters: dict[str, Any] = Field(description="The recall parameters that were updated")
@classmethod
- def build(cls, queue_id: str, parameters: dict[str, Any]) -> "RecallParametersUpdatedEvent":
- return cls(queue_id=queue_id, parameters=parameters)
+ def build(cls, queue_id: str, user_id: str, parameters: dict[str, Any]) -> "RecallParametersUpdatedEvent":
+ return cls(queue_id=queue_id, user_id=user_id, parameters=parameters)
diff --git a/invokeai/app/services/events/events_fastapievents.py b/invokeai/app/services/events/events_fastapievents.py
index f44eecc555..90e1402773 100644
--- a/invokeai/app/services/events/events_fastapievents.py
+++ b/invokeai/app/services/events/events_fastapievents.py
@@ -46,3 +46,9 @@ class FastAPIEventService(EventServiceBase):
except asyncio.CancelledError as e:
raise e # Raise a proper error
+ except Exception:
+ import logging
+
+ logging.getLogger("InvokeAI").error(
+ f"Error dispatching event {getattr(event, '__event_name__', event)}", exc_info=True
+ )
diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py
index 16405c5270..457cf2f468 100644
--- a/invokeai/app/services/image_records/image_records_base.py
+++ b/invokeai/app/services/image_records/image_records_base.py
@@ -74,8 +74,8 @@ class ImageRecordStorageBase(ABC):
pass
@abstractmethod
- def get_intermediates_count(self) -> int:
- """Gets a count of all intermediate images."""
+ def get_intermediates_count(self, user_id: Optional[str] = None) -> int:
+ """Gets a count of intermediate images. If user_id is provided, only counts that user's intermediates."""
pass
@abstractmethod
@@ -97,6 +97,11 @@ class ImageRecordStorageBase(ABC):
"""Saves an image record."""
pass
+ @abstractmethod
+ def get_user_id(self, image_name: str) -> Optional[str]:
+ """Gets the user_id of the image owner. Returns None if image not found."""
+ pass
+
@abstractmethod
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
"""Gets the most recent image for a board."""
diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py
index c6c237fc1e..07126d53a9 100644
--- a/invokeai/app/services/image_records/image_records_sqlite.py
+++ b/invokeai/app/services/image_records/image_records_sqlite.py
@@ -46,6 +46,20 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
return deserialize_image_record(dict(result))
+ def get_user_id(self, image_name: str) -> Optional[str]:
+ with self._db.transaction() as cursor:
+ cursor.execute(
+ """--sql
+ SELECT user_id FROM images
+ WHERE image_name = ?;
+ """,
+ (image_name,),
+ )
+ result = cast(Optional[sqlite3.Row], cursor.fetchone())
+ if not result:
+ return None
+ return cast(Optional[str], dict(result).get("user_id"))
+
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
with self._db.transaction() as cursor:
try:
@@ -269,14 +283,14 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
except sqlite3.Error as e:
raise ImageRecordDeleteException from e
- def get_intermediates_count(self) -> int:
+ def get_intermediates_count(self, user_id: Optional[str] = None) -> int:
with self._db.transaction() as cursor:
- cursor.execute(
- """--sql
- SELECT COUNT(*) FROM images
- WHERE is_intermediate = TRUE;
- """
- )
+ query = "SELECT COUNT(*) FROM images WHERE is_intermediate = TRUE"
+ params: list[str] = []
+ if user_id is not None:
+ query += " AND user_id = ?"
+ params.append(user_id)
+ cursor.execute(query, params)
count = cast(int, cursor.fetchone()[0])
return count
diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py
index d11d75b3c1..aebbead2f3 100644
--- a/invokeai/app/services/images/images_base.py
+++ b/invokeai/app/services/images/images_base.py
@@ -143,8 +143,8 @@ class ImageServiceABC(ABC):
pass
@abstractmethod
- def get_intermediates_count(self) -> int:
- """Gets the number of intermediate images."""
+ def get_intermediates_count(self, user_id: Optional[str] = None) -> int:
+ """Gets the number of intermediate images. If user_id is provided, only counts that user's intermediates."""
pass
@abstractmethod
diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py
index e82bd7f4de..0f03f7c400 100644
--- a/invokeai/app/services/images/images_default.py
+++ b/invokeai/app/services/images/images_default.py
@@ -310,9 +310,9 @@ class ImageService(ImageServiceABC):
self.__invoker.services.logger.error("Problem deleting image records and files")
raise e
- def get_intermediates_count(self) -> int:
+ def get_intermediates_count(self, user_id: Optional[str] = None) -> int:
try:
- return self.__invoker.services.image_records.get_intermediates_count()
+ return self.__invoker.services.image_records.get_intermediates_count(user_id=user_id)
except Exception as e:
self.__invoker.services.logger.error("Problem getting intermediates count")
raise e
diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py
index 361c2e4811..49d3cfdf7f 100644
--- a/invokeai/app/services/model_install/model_install_default.py
+++ b/invokeai/app/services/model_install/model_install_default.py
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
import torch
import yaml
-from huggingface_hub import HfFolder
+from huggingface_hub import get_token as hf_get_token
from pydantic.networks import AnyHttpUrl
from pydantic_core import Url
from requests import Session
@@ -1115,7 +1115,7 @@ class ModelInstallService(ModelInstallServiceBase):
) -> ModelInstallJob:
# Add user's cached access token to HuggingFace requests
if source.access_token is None:
- source.access_token = HfFolder.get_token()
+ source.access_token = hf_get_token()
remote_files, metadata = self._remote_files_from_source(source)
return self._import_remote_model(
source=source,
diff --git a/invokeai/app/services/model_records/model_records_base.py b/invokeai/app/services/model_records/model_records_base.py
index 318ebb000e..6420949c29 100644
--- a/invokeai/app/services/model_records/model_records_base.py
+++ b/invokeai/app/services/model_records/model_records_base.py
@@ -30,6 +30,7 @@ from invokeai.backend.model_manager.taxonomy import (
ModelType,
ModelVariantType,
Qwen3VariantType,
+ QwenImageVariantType,
SchedulerPredictionType,
ZImageVariantType,
)
@@ -109,7 +110,13 @@ class ModelRecordChanges(BaseModelExcludeNull):
# Checkpoint-specific changes
# TODO(MM2): Should we expose these? Feels footgun-y...
variant: Optional[
- ModelVariantType | ClipVariantType | FluxVariantType | Flux2VariantType | ZImageVariantType | Qwen3VariantType
+ ModelVariantType
+ | ClipVariantType
+ | FluxVariantType
+ | Flux2VariantType
+ | ZImageVariantType
+ | QwenImageVariantType
+ | Qwen3VariantType
] = Field(description="The variant of the model.", default=None)
prediction_type: Optional[SchedulerPredictionType] = Field(
description="The prediction type of the model.", default=None
diff --git a/invokeai/app/services/session_queue/session_queue_base.py b/invokeai/app/services/session_queue/session_queue_base.py
index 3c037dc77a..14b93d97fc 100644
--- a/invokeai/app/services/session_queue/session_queue_base.py
+++ b/invokeai/app/services/session_queue/session_queue_base.py
@@ -78,13 +78,15 @@ class SessionQueueBase(ABC):
pass
@abstractmethod
- def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
- """Gets the counts of queue items by destination"""
+ def get_counts_by_destination(
+ self, queue_id: str, destination: str, user_id: Optional[str] = None
+ ) -> SessionQueueCountsByDestination:
+ """Gets the counts of queue items by destination. If user_id is provided, only counts that user's items."""
pass
@abstractmethod
- def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
- """Gets the status of a batch"""
+ def get_batch_status(self, queue_id: str, batch_id: str, user_id: Optional[str] = None) -> BatchStatus:
+ """Gets the status of a batch. If user_id is provided, only counts that user's items."""
pass
@abstractmethod
@@ -172,8 +174,9 @@ class SessionQueueBase(ABC):
self,
queue_id: str,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
+ user_id: Optional[str] = None,
) -> ItemIdsResult:
- """Gets all queue item ids that match the given parameters"""
+ """Gets all queue item ids that match the given parameters. If user_id is provided, only returns items for that user."""
pass
@abstractmethod
diff --git a/invokeai/app/services/session_queue/session_queue_common.py b/invokeai/app/services/session_queue/session_queue_common.py
index 5854442211..09820fe621 100644
--- a/invokeai/app/services/session_queue/session_queue_common.py
+++ b/invokeai/app/services/session_queue/session_queue_common.py
@@ -304,12 +304,6 @@ class SessionQueueStatus(BaseModel):
failed: int = Field(..., description="Number of queue items with status 'error'")
canceled: int = Field(..., description="Number of queue items with status 'canceled'")
total: int = Field(..., description="Total number of queue items")
- user_pending: Optional[int] = Field(
- default=None, description="Number of queue items with status 'pending' for the current user"
- )
- user_in_progress: Optional[int] = Field(
- default=None, description="Number of queue items with status 'in_progress' for the current user"
- )
class SessionQueueCountsByDestination(BaseModel):
diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py
index 4f46136fd7..070a7cef29 100644
--- a/invokeai/app/services/session_queue/session_queue_sqlite.py
+++ b/invokeai/app/services/session_queue/session_queue_sqlite.py
@@ -151,7 +151,7 @@ class SqliteSessionQueue(SessionQueueBase):
priority=priority,
item_ids=item_ids,
)
- self.__invoker.services.events.emit_batch_enqueued(enqueue_result)
+ self.__invoker.services.events.emit_batch_enqueued(enqueue_result, user_id=user_id)
return enqueue_result
def dequeue(self) -> Optional[SessionQueueItem]:
@@ -765,15 +765,21 @@ class SqliteSessionQueue(SessionQueueBase):
self,
queue_id: str,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
+ user_id: Optional[str] = None,
) -> ItemIdsResult:
with self._db.transaction() as cursor_:
- query = f"""--sql
+ query = """--sql
SELECT item_id
FROM session_queue
WHERE queue_id = ?
- ORDER BY created_at {order_dir.value}
"""
- query_params = [queue_id]
+ query_params: list[str] = [queue_id]
+
+ if user_id is not None:
+ query += " AND user_id = ?"
+ query_params.append(user_id)
+
+ query += f" ORDER BY created_at {order_dir.value}"
cursor_.execute(query, query_params)
result = cast(list[sqlite3.Row], cursor_.fetchall())
@@ -783,20 +789,7 @@ class SqliteSessionQueue(SessionQueueBase):
def get_queue_status(self, queue_id: str, user_id: Optional[str] = None) -> SessionQueueStatus:
with self._db.transaction() as cursor:
- # Get total counts
- cursor.execute(
- """--sql
- SELECT status, count(*)
- FROM session_queue
- WHERE queue_id = ?
- GROUP BY status
- """,
- (queue_id,),
- )
- counts_result = cast(list[sqlite3.Row], cursor.fetchall())
-
- # Get user-specific counts if user_id is provided (using a single query with CASE)
- user_counts_result = []
+ # When user_id is provided (non-admin), only count that user's items
if user_id is not None:
cursor.execute(
"""--sql
@@ -807,48 +800,51 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id, user_id),
)
- user_counts_result = cast(list[sqlite3.Row], cursor.fetchall())
+ else:
+ cursor.execute(
+ """--sql
+ SELECT status, count(*)
+ FROM session_queue
+ WHERE queue_id = ?
+ GROUP BY status
+ """,
+ (queue_id,),
+ )
+ counts_result = cast(list[sqlite3.Row], cursor.fetchall())
current_item = self.get_current(queue_id=queue_id)
total = sum(row[1] or 0 for row in counts_result)
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
- # Process user-specific counts if available
- user_pending = None
- user_in_progress = None
- if user_id is not None:
- user_counts: dict[str, int] = {row[0]: row[1] for row in user_counts_result}
- user_pending = user_counts.get("pending", 0)
- user_in_progress = user_counts.get("in_progress", 0)
+ # For non-admin users, hide current item details if they don't own it
+ show_current_item = current_item is not None and (user_id is None or current_item.user_id == user_id)
return SessionQueueStatus(
queue_id=queue_id,
- item_id=current_item.item_id if current_item else None,
- session_id=current_item.session_id if current_item else None,
- batch_id=current_item.batch_id if current_item else None,
+ item_id=current_item.item_id if show_current_item else None,
+ session_id=current_item.session_id if show_current_item else None,
+ batch_id=current_item.batch_id if show_current_item else None,
pending=counts.get("pending", 0),
in_progress=counts.get("in_progress", 0),
completed=counts.get("completed", 0),
failed=counts.get("failed", 0),
canceled=counts.get("canceled", 0),
total=total,
- user_pending=user_pending,
- user_in_progress=user_in_progress,
)
- def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
+ def get_batch_status(self, queue_id: str, batch_id: str, user_id: Optional[str] = None) -> BatchStatus:
with self._db.transaction() as cursor:
- cursor.execute(
- """--sql
+ query = """--sql
SELECT status, count(*), origin, destination
FROM session_queue
- WHERE
- queue_id = ?
- AND batch_id = ?
- GROUP BY status
- """,
- (queue_id, batch_id),
- )
+ WHERE queue_id = ? AND batch_id = ?
+ """
+ params: list[str] = [queue_id, batch_id]
+ if user_id is not None:
+ query += " AND user_id = ?"
+ params.append(user_id)
+ query += " GROUP BY status"
+ cursor.execute(query, params)
result = cast(list[sqlite3.Row], cursor.fetchall())
total = sum(row[1] or 0 for row in result)
counts: dict[str, int] = {row[0]: row[1] for row in result}
@@ -868,18 +864,21 @@ class SqliteSessionQueue(SessionQueueBase):
total=total,
)
- def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
+ def get_counts_by_destination(
+ self, queue_id: str, destination: str, user_id: Optional[str] = None
+ ) -> SessionQueueCountsByDestination:
with self._db.transaction() as cursor:
- cursor.execute(
- """--sql
+ query = """--sql
SELECT status, count(*)
FROM session_queue
- WHERE queue_id = ?
- AND destination = ?
- GROUP BY status
- """,
- (queue_id, destination),
- )
+ WHERE queue_id = ? AND destination = ?
+ """
+ params: list[str] = [queue_id, destination]
+ if user_id is not None:
+ query += " AND user_id = ?"
+ params.append(user_id)
+ query += " GROUP BY status"
+ cursor.execute(query, params)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
total = sum(row[1] or 0 for row in counts_result)
diff --git a/invokeai/app/services/shared/sqlite/sqlite_util.py b/invokeai/app/services/shared/sqlite/sqlite_util.py
index 645509f1dd..fb8ca9fca3 100644
--- a/invokeai/app/services/shared/sqlite/sqlite_util.py
+++ b/invokeai/app/services/shared/sqlite/sqlite_util.py
@@ -30,6 +30,8 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_24 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_25 import build_migration_25
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_26 import build_migration_26
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_27 import build_migration_27
+from invokeai.app.services.shared.sqlite_migrator.migrations.migration_28 import build_migration_28
+from invokeai.app.services.shared.sqlite_migrator.migrations.migration_29 import build_migration_29
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -77,6 +79,8 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_25(app_config=config, logger=logger))
migrator.register_migration(build_migration_26(app_config=config, logger=logger))
migrator.register_migration(build_migration_27())
+ migrator.register_migration(build_migration_28())
+ migrator.register_migration(build_migration_29())
migrator.run_migrations()
return db
diff --git a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_28.py b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_28.py
new file mode 100644
index 0000000000..0cbd683ab5
--- /dev/null
+++ b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_28.py
@@ -0,0 +1,45 @@
+"""Migration 28: Add per-user workflow isolation columns to workflow_library.
+
+This migration adds the database columns required for multiuser workflow isolation
+to the workflow_library table:
+- user_id: the owner of the workflow (defaults to 'system' for existing workflows)
+- is_public: whether the workflow is shared with all users
+"""
+
+import sqlite3
+
+from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
+
+
+class Migration28Callback:
+ """Migration to add user_id and is_public to the workflow_library table."""
+
+ def __call__(self, cursor: sqlite3.Cursor) -> None:
+ self._update_workflow_library_table(cursor)
+
+ def _update_workflow_library_table(self, cursor: sqlite3.Cursor) -> None:
+ """Add user_id and is_public columns to workflow_library table."""
+ cursor.execute("PRAGMA table_info(workflow_library);")
+ columns = [row[1] for row in cursor.fetchall()]
+
+ if "user_id" not in columns:
+ cursor.execute("ALTER TABLE workflow_library ADD COLUMN user_id TEXT DEFAULT 'system';")
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_workflow_library_user_id ON workflow_library(user_id);")
+
+ if "is_public" not in columns:
+ cursor.execute("ALTER TABLE workflow_library ADD COLUMN is_public BOOLEAN NOT NULL DEFAULT FALSE;")
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_workflow_library_is_public ON workflow_library(is_public);")
+
+
+def build_migration_28() -> Migration:
+ """Builds the migration object for migrating from version 27 to version 28.
+
+ This migration adds per-user workflow isolation to the workflow_library table:
+ - user_id column: identifies the owner of each workflow
+ - is_public column: controls whether a workflow is shared with all users
+ """
+ return Migration(
+ from_version=27,
+ to_version=28,
+ callback=Migration28Callback(),
+ )
diff --git a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_29.py b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_29.py
new file mode 100644
index 0000000000..c9eb7c901b
--- /dev/null
+++ b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_29.py
@@ -0,0 +1,53 @@
+"""Migration 29: Add board_visibility column to boards table.
+
+This migration adds a board_visibility column to the boards table to support
+three visibility levels:
+ - 'private': only the board owner (and admins) can view/modify
+ - 'shared': all users can view, but only the owner (and admins) can modify
+ - 'public': all users can view; only the owner (and admins) can modify the
+ board structure (rename/archive/delete)
+
+Existing boards with is_public = 1 are migrated to 'public'.
+All other existing boards default to 'private'.
+"""
+
+import sqlite3
+
+from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
+
+
+class Migration29Callback:
+ """Migration to add board_visibility column to the boards table."""
+
+ def __call__(self, cursor: sqlite3.Cursor) -> None:
+ self._update_boards_table(cursor)
+
+ def _update_boards_table(self, cursor: sqlite3.Cursor) -> None:
+ """Add board_visibility column to boards table."""
+ # Check if boards table exists
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='boards';")
+ if cursor.fetchone() is None:
+ return
+
+ cursor.execute("PRAGMA table_info(boards);")
+ columns = [row[1] for row in cursor.fetchall()]
+
+ if "board_visibility" not in columns:
+ cursor.execute("ALTER TABLE boards ADD COLUMN board_visibility TEXT NOT NULL DEFAULT 'private';")
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_boards_board_visibility ON boards(board_visibility);")
+ # Migrate existing is_public = 1 boards to 'public'
+ if "is_public" in columns:
+ cursor.execute("UPDATE boards SET board_visibility = 'public' WHERE is_public = 1;")
+
+
+def build_migration_29() -> Migration:
+ """Builds the migration object for migrating from version 28 to version 29.
+
+ This migration adds the board_visibility column to the boards table,
+ supporting 'private', 'shared', and 'public' visibility levels.
+ """
+ return Migration(
+ from_version=28,
+ to_version=29,
+ callback=Migration29Callback(),
+ )
diff --git a/invokeai/app/services/users/users_base.py b/invokeai/app/services/users/users_base.py
index 728a0adfa3..dd789b561e 100644
--- a/invokeai/app/services/users/users_base.py
+++ b/invokeai/app/services/users/users_base.py
@@ -131,6 +131,15 @@ class UserServiceBase(ABC):
"""
pass
+ @abstractmethod
+ def get_admin_email(self) -> str | None:
+ """Get the email address of the first active admin user.
+
+ Returns:
+ Email address of the first active admin, or None if no admin exists
+ """
+ pass
+
@abstractmethod
def count_admins(self) -> int:
"""Count active admin users.
diff --git a/invokeai/app/services/users/users_default.py b/invokeai/app/services/users/users_default.py
index 709e4cb82c..6e47288212 100644
--- a/invokeai/app/services/users/users_default.py
+++ b/invokeai/app/services/users/users_default.py
@@ -256,6 +256,20 @@ class UserService(UserServiceBase):
for row in rows
]
+ def get_admin_email(self) -> str | None:
+ """Get the email address of the first active admin user."""
+ with self._db.transaction() as cursor:
+ cursor.execute(
+ """
+ SELECT email FROM users
+ WHERE is_admin = TRUE AND is_active = TRUE
+ ORDER BY created_at ASC
+ LIMIT 1
+ """,
+ )
+ row = cursor.fetchone()
+ return row[0] if row else None
+
def count_admins(self) -> int:
"""Count active admin users."""
with self._db.transaction() as cursor:
diff --git a/invokeai/app/services/workflow_records/workflow_records_base.py b/invokeai/app/services/workflow_records/workflow_records_base.py
index d5cf319594..856a6c6d49 100644
--- a/invokeai/app/services/workflow_records/workflow_records_base.py
+++ b/invokeai/app/services/workflow_records/workflow_records_base.py
@@ -4,6 +4,7 @@ from typing import Optional
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.services.workflow_records.workflow_records_common import (
+ WORKFLOW_LIBRARY_DEFAULT_USER_ID,
Workflow,
WorkflowCategory,
WorkflowRecordDTO,
@@ -22,18 +23,18 @@ class WorkflowRecordsStorageBase(ABC):
pass
@abstractmethod
- def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
+ def create(self, workflow: WorkflowWithoutID, user_id: str = WORKFLOW_LIBRARY_DEFAULT_USER_ID) -> WorkflowRecordDTO:
"""Creates a workflow."""
pass
@abstractmethod
- def update(self, workflow: Workflow) -> WorkflowRecordDTO:
- """Updates a workflow."""
+ def update(self, workflow: Workflow, user_id: Optional[str] = None) -> WorkflowRecordDTO:
+ """Updates a workflow. When user_id is provided, the UPDATE is scoped to that user."""
pass
@abstractmethod
- def delete(self, workflow_id: str) -> None:
- """Deletes a workflow."""
+ def delete(self, workflow_id: str, user_id: Optional[str] = None) -> None:
+ """Deletes a workflow. When user_id is provided, the DELETE is scoped to that user."""
pass
@abstractmethod
@@ -47,6 +48,8 @@ class WorkflowRecordsStorageBase(ABC):
query: Optional[str],
tags: Optional[list[str]],
has_been_opened: Optional[bool],
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> PaginatedResults[WorkflowRecordListItemDTO]:
"""Gets many workflows."""
pass
@@ -56,6 +59,8 @@ class WorkflowRecordsStorageBase(ABC):
self,
categories: list[WorkflowCategory],
has_been_opened: Optional[bool] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> dict[str, int]:
"""Gets a dictionary of counts for each of the provided categories."""
pass
@@ -66,19 +71,28 @@ class WorkflowRecordsStorageBase(ABC):
tags: list[str],
categories: Optional[list[WorkflowCategory]] = None,
has_been_opened: Optional[bool] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> dict[str, int]:
"""Gets a dictionary of counts for each of the provided tags."""
pass
@abstractmethod
- def update_opened_at(self, workflow_id: str) -> None:
- """Open a workflow."""
+ def update_opened_at(self, workflow_id: str, user_id: Optional[str] = None) -> None:
+ """Open a workflow. When user_id is provided, the UPDATE is scoped to that user."""
pass
@abstractmethod
def get_all_tags(
self,
categories: Optional[list[WorkflowCategory]] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> list[str]:
"""Gets all unique tags from workflows."""
pass
+
+ @abstractmethod
+ def update_is_public(self, workflow_id: str, is_public: bool, user_id: Optional[str] = None) -> WorkflowRecordDTO:
+ """Updates the is_public field of a workflow. When user_id is provided, the UPDATE is scoped to that user."""
+ pass
diff --git a/invokeai/app/services/workflow_records/workflow_records_common.py b/invokeai/app/services/workflow_records/workflow_records_common.py
index e0cea37468..9c505530c9 100644
--- a/invokeai/app/services/workflow_records/workflow_records_common.py
+++ b/invokeai/app/services/workflow_records/workflow_records_common.py
@@ -9,6 +9,9 @@ from invokeai.app.util.metaenum import MetaEnum
__workflow_meta_version__ = semver.Version.parse("1.0.0")
+WORKFLOW_LIBRARY_DEFAULT_USER_ID = "system"
+"""Default user_id for workflows created in single-user mode or migrated from pre-multiuser databases."""
+
class ExposedField(BaseModel):
nodeId: str
@@ -26,6 +29,7 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
UpdatedAt = "updated_at"
OpenedAt = "opened_at"
Name = "name"
+ IsPublic = "is_public"
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
@@ -100,6 +104,8 @@ class WorkflowRecordDTOBase(BaseModel):
opened_at: Optional[Union[datetime.datetime, str]] = Field(
default=None, description="The opened timestamp of the workflow."
)
+ user_id: str = Field(description="The id of the user who owns this workflow.")
+ is_public: bool = Field(description="Whether this workflow is shared with all users.")
class WorkflowRecordDTO(WorkflowRecordDTOBase):
diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py
index 0f72f7cd92..c83d87eff6 100644
--- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py
+++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py
@@ -7,6 +7,7 @@ from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
from invokeai.app.services.workflow_records.workflow_records_common import (
+ WORKFLOW_LIBRARY_DEFAULT_USER_ID,
Workflow,
WorkflowCategory,
WorkflowNotFoundError,
@@ -36,7 +37,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
- SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
+ SELECT workflow_id, workflow, name, created_at, updated_at, opened_at, user_id, is_public
FROM workflow_library
WHERE workflow_id = ?;
""",
@@ -47,7 +48,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
return WorkflowRecordDTO.from_dict(dict(row))
- def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
+ def create(self, workflow: WorkflowWithoutID, user_id: str = WORKFLOW_LIBRARY_DEFAULT_USER_ID) -> WorkflowRecordDTO:
if workflow.meta.category is WorkflowCategory.Default:
raise ValueError("Default workflows cannot be created via this method")
@@ -57,43 +58,98 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
"""--sql
INSERT OR IGNORE INTO workflow_library (
workflow_id,
- workflow
+ workflow,
+ user_id
)
- VALUES (?, ?);
+ VALUES (?, ?, ?);
""",
- (workflow_with_id.id, workflow_with_id.model_dump_json()),
+ (workflow_with_id.id, workflow_with_id.model_dump_json(), user_id),
)
return self.get(workflow_with_id.id)
- def update(self, workflow: Workflow) -> WorkflowRecordDTO:
+ def update(self, workflow: Workflow, user_id: Optional[str] = None) -> WorkflowRecordDTO:
if workflow.meta.category is WorkflowCategory.Default:
raise ValueError("Default workflows cannot be updated")
with self._db.transaction() as cursor:
- cursor.execute(
- """--sql
- UPDATE workflow_library
- SET workflow = ?
- WHERE workflow_id = ? AND category = 'user';
- """,
- (workflow.model_dump_json(), workflow.id),
- )
+ if user_id is not None:
+ cursor.execute(
+ """--sql
+ UPDATE workflow_library
+ SET workflow = ?
+ WHERE workflow_id = ? AND category = 'user' AND user_id = ?;
+ """,
+ (workflow.model_dump_json(), workflow.id, user_id),
+ )
+ else:
+ cursor.execute(
+ """--sql
+ UPDATE workflow_library
+ SET workflow = ?
+ WHERE workflow_id = ? AND category = 'user';
+ """,
+ (workflow.model_dump_json(), workflow.id),
+ )
return self.get(workflow.id)
- def delete(self, workflow_id: str) -> None:
+ def delete(self, workflow_id: str, user_id: Optional[str] = None) -> None:
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
raise ValueError("Default workflows cannot be deleted")
with self._db.transaction() as cursor:
- cursor.execute(
- """--sql
- DELETE from workflow_library
- WHERE workflow_id = ? AND category = 'user';
- """,
- (workflow_id,),
- )
+ if user_id is not None:
+ cursor.execute(
+ """--sql
+ DELETE from workflow_library
+ WHERE workflow_id = ? AND category = 'user' AND user_id = ?;
+ """,
+ (workflow_id, user_id),
+ )
+ else:
+ cursor.execute(
+ """--sql
+ DELETE from workflow_library
+ WHERE workflow_id = ? AND category = 'user';
+ """,
+ (workflow_id,),
+ )
return None
+ def update_is_public(self, workflow_id: str, is_public: bool, user_id: Optional[str] = None) -> WorkflowRecordDTO:
+ """Updates the is_public field of a workflow and manages the 'shared' tag automatically."""
+ record = self.get(workflow_id)
+ workflow = record.workflow
+
+ # Manage "shared" tag: add when public, remove when private
+ tags_list = [t.strip() for t in workflow.tags.split(",") if t.strip()] if workflow.tags else []
+ if is_public and "shared" not in tags_list:
+ tags_list.append("shared")
+ elif not is_public and "shared" in tags_list:
+ tags_list.remove("shared")
+ updated_tags = ", ".join(tags_list)
+ updated_workflow = workflow.model_copy(update={"tags": updated_tags})
+
+ with self._db.transaction() as cursor:
+ if user_id is not None:
+ cursor.execute(
+ """--sql
+ UPDATE workflow_library
+ SET workflow = ?, is_public = ?
+ WHERE workflow_id = ? AND category = 'user' AND user_id = ?;
+ """,
+ (updated_workflow.model_dump_json(), is_public, workflow_id, user_id),
+ )
+ else:
+ cursor.execute(
+ """--sql
+ UPDATE workflow_library
+ SET workflow = ?, is_public = ?
+ WHERE workflow_id = ? AND category = 'user';
+ """,
+ (updated_workflow.model_dump_json(), is_public, workflow_id),
+ )
+ return self.get(workflow_id)
+
def get_many(
self,
order_by: WorkflowRecordOrderBy,
@@ -104,6 +160,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
query: Optional[str] = None,
tags: Optional[list[str]] = None,
has_been_opened: Optional[bool] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> PaginatedResults[WorkflowRecordListItemDTO]:
with self._db.transaction() as cursor:
# sanitize!
@@ -122,7 +180,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
created_at,
updated_at,
opened_at,
- tags
+ tags,
+ user_id,
+ is_public
FROM workflow_library
"""
count_query = "SELECT COUNT(*) FROM workflow_library"
@@ -177,6 +237,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
conditions.append(query_condition)
params.extend([wildcard_query, wildcard_query, wildcard_query])
+ if user_id is not None:
+ # Scope to the given user but always include default workflows
+ conditions.append("(user_id = ? OR category = 'default')")
+ params.append(user_id)
+
+ if is_public is True:
+ conditions.append("is_public = TRUE")
+ elif is_public is False:
+ conditions.append("is_public = FALSE")
+
if conditions:
# If there are conditions, add a WHERE clause and then join the conditions
main_query += " WHERE "
@@ -226,6 +296,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
tags: list[str],
categories: Optional[list[WorkflowCategory]] = None,
has_been_opened: Optional[bool] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> dict[str, int]:
if not tags:
return {}
@@ -248,6 +320,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
+ if user_id is not None:
+ # Scope to the given user but always include default workflows
+ base_conditions.append("(user_id = ? OR category = 'default')")
+ base_params.append(user_id)
+
+ if is_public is True:
+ base_conditions.append("is_public = TRUE")
+ elif is_public is False:
+ base_conditions.append("is_public = FALSE")
+
# For each tag to count, run a separate query
for tag in tags:
# Start with the base conditions
@@ -277,6 +359,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
self,
categories: list[WorkflowCategory],
has_been_opened: Optional[bool] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> dict[str, int]:
with self._db.transaction() as cursor:
result: dict[str, int] = {}
@@ -296,6 +380,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
+ if user_id is not None:
+ # Scope to the given user but always include default workflows
+ base_conditions.append("(user_id = ? OR category = 'default')")
+ base_params.append(user_id)
+
+ if is_public is True:
+ base_conditions.append("is_public = TRUE")
+ elif is_public is False:
+ base_conditions.append("is_public = FALSE")
+
# For each category to count, run a separate query
for category in categories:
# Start with the base conditions
@@ -321,20 +415,32 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
return result
- def update_opened_at(self, workflow_id: str) -> None:
+ def update_opened_at(self, workflow_id: str, user_id: Optional[str] = None) -> None:
with self._db.transaction() as cursor:
- cursor.execute(
- f"""--sql
- UPDATE workflow_library
- SET opened_at = STRFTIME('{SQL_TIME_FORMAT}', 'NOW')
- WHERE workflow_id = ?;
- """,
- (workflow_id,),
- )
+ if user_id is not None:
+ cursor.execute(
+ f"""--sql
+ UPDATE workflow_library
+ SET opened_at = STRFTIME('{SQL_TIME_FORMAT}', 'NOW')
+ WHERE workflow_id = ? AND user_id = ?;
+ """,
+ (workflow_id, user_id),
+ )
+ else:
+ cursor.execute(
+ f"""--sql
+ UPDATE workflow_library
+ SET opened_at = STRFTIME('{SQL_TIME_FORMAT}', 'NOW')
+ WHERE workflow_id = ?;
+ """,
+ (workflow_id,),
+ )
def get_all_tags(
self,
categories: Optional[list[WorkflowCategory]] = None,
+ user_id: Optional[str] = None,
+ is_public: Optional[bool] = None,
) -> list[str]:
with self._db.transaction() as cursor:
conditions: list[str] = []
@@ -349,6 +455,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
conditions.append(f"category IN ({placeholders})")
params.extend([category.value for category in categories])
+ if user_id is not None:
+ # Scope to the given user but always include default workflows
+ conditions.append("(user_id = ? OR category = 'default')")
+ params.append(user_id)
+
+ if is_public is True:
+ conditions.append("is_public = TRUE")
+ elif is_public is False:
+ conditions.append("is_public = FALSE")
+
stmt = """--sql
SELECT DISTINCT tags
FROM workflow_library
diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py
index 0e2faeca39..08dc9a2265 100644
--- a/invokeai/app/util/step_callback.py
+++ b/invokeai/app/util/step_callback.py
@@ -93,6 +93,29 @@ COGVIEW4_LATENT_RGB_FACTORS = [
[-0.00955853, -0.00980067, -0.00977842],
]
+# Qwen Image uses the same VAE as Wan 2.1 (16-channel).
+# Factors from ComfyUI: https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/latent_formats.py
+QWEN_IMAGE_LATENT_RGB_FACTORS = [
+ [-0.1299, -0.1692, 0.2932],
+ [0.0671, 0.0406, 0.0442],
+ [0.3568, 0.2548, 0.1747],
+ [0.0372, 0.2344, 0.1420],
+ [0.0313, 0.0189, -0.0328],
+ [0.0296, -0.0956, -0.0665],
+ [-0.3477, -0.4059, -0.2925],
+ [0.0166, 0.1902, 0.1975],
+ [-0.0412, 0.0267, -0.1364],
+ [-0.1293, 0.0740, 0.1636],
+ [0.0680, 0.3019, 0.1128],
+ [0.0032, 0.0581, 0.0639],
+ [-0.1251, 0.0927, 0.1699],
+ [0.0060, -0.0633, 0.0005],
+ [0.3477, 0.2275, 0.2950],
+ [0.1984, 0.0913, 0.1861],
+]
+
+QWEN_IMAGE_LATENT_RGB_BIAS = [-0.1835, -0.0868, -0.3360]
+
# FLUX.2 uses 32 latent channels.
# Factors from ComfyUI: https://github.com/Comfy-Org/ComfyUI/blob/main/comfy/latent_formats.py
FLUX2_LATENT_RGB_FACTORS = [
@@ -232,6 +255,9 @@ def diffusion_step_callback(
latent_rgb_factors = SD3_5_LATENT_RGB_FACTORS
elif base_model == BaseModelType.CogView4:
latent_rgb_factors = COGVIEW4_LATENT_RGB_FACTORS
+ elif base_model == BaseModelType.QwenImage:
+ latent_rgb_factors = QWEN_IMAGE_LATENT_RGB_FACTORS
+ latent_rgb_bias = QWEN_IMAGE_LATENT_RGB_BIAS
elif base_model == BaseModelType.Flux:
latent_rgb_factors = FLUX_LATENT_RGB_FACTORS
elif base_model == BaseModelType.Flux2:
diff --git a/invokeai/backend/model_manager/configs/factory.py b/invokeai/backend/model_manager/configs/factory.py
index 3865ea562a..4d26b4c334 100644
--- a/invokeai/backend/model_manager/configs/factory.py
+++ b/invokeai/backend/model_manager/configs/factory.py
@@ -50,6 +50,7 @@ from invokeai.backend.model_manager.configs.lora import (
LoRA_LyCORIS_Anima_Config,
LoRA_LyCORIS_Flux2_Config,
LoRA_LyCORIS_FLUX_Config,
+ LoRA_LyCORIS_QwenImage_Config,
LoRA_LyCORIS_SD1_Config,
LoRA_LyCORIS_SD2_Config,
LoRA_LyCORIS_SDXL_Config,
@@ -71,6 +72,7 @@ from invokeai.backend.model_manager.configs.main import (
Main_Diffusers_CogView4_Config,
Main_Diffusers_Flux2_Config,
Main_Diffusers_FLUX_Config,
+ Main_Diffusers_QwenImage_Config,
Main_Diffusers_SD1_Config,
Main_Diffusers_SD2_Config,
Main_Diffusers_SD3_Config,
@@ -79,6 +81,7 @@ from invokeai.backend.model_manager.configs.main import (
Main_Diffusers_ZImage_Config,
Main_GGUF_Flux2_Config,
Main_GGUF_FLUX_Config,
+ Main_GGUF_QwenImage_Config,
Main_GGUF_ZImage_Config,
MainModelDefaultSettings,
)
@@ -163,6 +166,7 @@ AnyModelConfig = Annotated[
Annotated[Main_Diffusers_FLUX_Config, Main_Diffusers_FLUX_Config.get_tag()],
Annotated[Main_Diffusers_Flux2_Config, Main_Diffusers_Flux2_Config.get_tag()],
Annotated[Main_Diffusers_CogView4_Config, Main_Diffusers_CogView4_Config.get_tag()],
+ Annotated[Main_Diffusers_QwenImage_Config, Main_Diffusers_QwenImage_Config.get_tag()],
Annotated[Main_Diffusers_ZImage_Config, Main_Diffusers_ZImage_Config.get_tag()],
# Main (Pipeline) - checkpoint format
# IMPORTANT: FLUX.2 must be checked BEFORE FLUX.1 because FLUX.2 has specific validation
@@ -181,6 +185,7 @@ AnyModelConfig = Annotated[
Annotated[Main_BnBNF4_FLUX_Config, Main_BnBNF4_FLUX_Config.get_tag()],
Annotated[Main_GGUF_Flux2_Config, Main_GGUF_Flux2_Config.get_tag()],
Annotated[Main_GGUF_FLUX_Config, Main_GGUF_FLUX_Config.get_tag()],
+ Annotated[Main_GGUF_QwenImage_Config, Main_GGUF_QwenImage_Config.get_tag()],
Annotated[Main_GGUF_ZImage_Config, Main_GGUF_ZImage_Config.get_tag()],
# VAE - checkpoint format
Annotated[VAE_Checkpoint_SD1_Config, VAE_Checkpoint_SD1_Config.get_tag()],
@@ -213,6 +218,7 @@ AnyModelConfig = Annotated[
Annotated[LoRA_LyCORIS_Flux2_Config, LoRA_LyCORIS_Flux2_Config.get_tag()],
Annotated[LoRA_LyCORIS_FLUX_Config, LoRA_LyCORIS_FLUX_Config.get_tag()],
Annotated[LoRA_LyCORIS_ZImage_Config, LoRA_LyCORIS_ZImage_Config.get_tag()],
+ Annotated[LoRA_LyCORIS_QwenImage_Config, LoRA_LyCORIS_QwenImage_Config.get_tag()],
Annotated[LoRA_LyCORIS_Anima_Config, LoRA_LyCORIS_Anima_Config.get_tag()],
# LoRA - OMI format
Annotated[LoRA_OMI_SDXL_Config, LoRA_OMI_SDXL_Config.get_tag()],
diff --git a/invokeai/backend/model_manager/configs/lora.py b/invokeai/backend/model_manager/configs/lora.py
index 65f2d1c08c..88f917d0d3 100644
--- a/invokeai/backend/model_manager/configs/lora.py
+++ b/invokeai/backend/model_manager/configs/lora.py
@@ -772,6 +772,85 @@ class LoRA_LyCORIS_ZImage_Config(LoRA_LyCORIS_Config_Base, Config_Base):
raise NotAMatchError("model does not look like a Z-Image LoRA")
+class LoRA_LyCORIS_QwenImage_Config(LoRA_LyCORIS_Config_Base, Config_Base):
+ """Model config for Qwen Image Edit LoRA models in LyCORIS format."""
+
+ base: Literal[BaseModelType.QwenImage] = Field(default=BaseModelType.QwenImage)
+
+ @classmethod
+ def _validate_looks_like_lora(cls, mod: ModelOnDisk) -> None:
+ """Qwen Image Edit LoRAs have keys like transformer_blocks.X.attn.to_k.lora_down.weight."""
+ state_dict = mod.load_state_dict()
+
+ has_qwen_ie_keys = state_dict_has_any_keys_starting_with(
+ state_dict,
+ {
+ "transformer_blocks.",
+ "transformer.transformer_blocks.",
+ "lora_unet_transformer_blocks_", # Kohya format
+ },
+ )
+ has_lora_suffix = state_dict_has_any_keys_ending_with(
+ state_dict,
+ {
+ "lora_A.weight",
+ "lora_B.weight",
+ "lora_down.weight",
+ "lora_up.weight",
+ "dora_scale",
+ "lokr_w1",
+ "lokr_w2", # LoKR format
+ },
+ )
+ # Must NOT have diffusion_model.layers (Z-Image) or Flux-style keys.
+ # Flux LoRAs can have transformer.single_transformer_blocks or transformer.transformer_blocks
+ # (with the "transformer." prefix and "single_" variant) which would falsely match our check.
+ # Flux Kohya LoRAs use lora_unet_double_blocks or lora_unet_single_blocks.
+ has_z_image_keys = state_dict_has_any_keys_starting_with(state_dict, {"diffusion_model.layers."})
+ has_flux_keys = state_dict_has_any_keys_starting_with(
+ state_dict,
+ {
+ "double_blocks.",
+ "single_blocks.",
+ "single_transformer_blocks.",
+ "transformer.single_transformer_blocks.",
+ "lora_unet_double_blocks_",
+ "lora_unet_single_blocks_",
+ "lora_unet_single_transformer_blocks_",
+ },
+ )
+
+ if has_qwen_ie_keys and has_lora_suffix and not has_z_image_keys and not has_flux_keys:
+ return
+
+ raise NotAMatchError("model does not match Qwen Image LoRA heuristics")
+
+ @classmethod
+ def _get_base_or_raise(cls, mod: ModelOnDisk) -> BaseModelType:
+ state_dict = mod.load_state_dict()
+ has_qwen_ie_keys = state_dict_has_any_keys_starting_with(
+ state_dict,
+ {"transformer_blocks.", "transformer.transformer_blocks.", "lora_unet_transformer_blocks_"},
+ )
+ has_z_image_keys = state_dict_has_any_keys_starting_with(state_dict, {"diffusion_model.layers."})
+ has_flux_keys = state_dict_has_any_keys_starting_with(
+ state_dict,
+ {
+ "double_blocks.",
+ "single_blocks.",
+ "single_transformer_blocks.",
+ "transformer.single_transformer_blocks.",
+ "lora_unet_double_blocks_",
+ "lora_unet_single_blocks_",
+ "lora_unet_single_transformer_blocks_",
+ },
+ )
+
+ if has_qwen_ie_keys and not has_z_image_keys and not has_flux_keys:
+ return BaseModelType.QwenImage
+ raise NotAMatchError("model does not look like a Qwen Image Edit LoRA")
+
+
class LoRA_LyCORIS_Anima_Config(LoRA_LyCORIS_Config_Base, Config_Base):
"""Model config for Anima LoRA models in LyCORIS format."""
diff --git a/invokeai/backend/model_manager/configs/main.py b/invokeai/backend/model_manager/configs/main.py
index ee9abe54fa..1be349f394 100644
--- a/invokeai/backend/model_manager/configs/main.py
+++ b/invokeai/backend/model_manager/configs/main.py
@@ -28,6 +28,7 @@ from invokeai.backend.model_manager.taxonomy import (
ModelFormat,
ModelType,
ModelVariantType,
+ QwenImageVariantType,
SchedulerPredictionType,
SubModelType,
ZImageVariantType,
@@ -86,6 +87,8 @@ class MainModelDefaultSettings(BaseModel):
else:
# Distilled models (Klein 4B, Klein 9B) use fewer steps
return cls(steps=4, cfg_scale=1.0, width=1024, height=1024)
+ case BaseModelType.QwenImage:
+ return cls(steps=40, cfg_scale=4.0, width=1024, height=1024)
case _:
# TODO(psyche): Do we want defaults for other base types?
return None
@@ -196,9 +199,11 @@ class Main_SD_Checkpoint_Config_Base(Checkpoint_Config_Base, Main_Config_Base):
cls._validate_base(mod)
- prediction_type = override_fields.get("prediction_type") or cls._get_scheduler_prediction_type_or_raise(mod)
+ prediction_type = override_fields.pop("prediction_type", None) or cls._get_scheduler_prediction_type_or_raise(
+ mod
+ )
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
return cls(**override_fields, prediction_type=prediction_type, variant=variant)
@@ -471,7 +476,7 @@ class Main_Checkpoint_FLUX_Config(Checkpoint_Config_Base, Main_Config_Base, Conf
cls._validate_does_not_look_like_gguf_quantized(mod)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
return cls(**override_fields, variant=variant)
@@ -546,7 +551,7 @@ class Main_Checkpoint_Flux2_Config(Checkpoint_Config_Base, Main_Config_Base, Con
cls._validate_does_not_look_like_gguf_quantized(mod)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
return cls(**override_fields, variant=variant)
@@ -609,7 +614,7 @@ class Main_BnBNF4_FLUX_Config(Checkpoint_Config_Base, Main_Config_Base, Config_B
cls._validate_model_looks_like_bnb_quantized(mod)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
return cls(**override_fields, variant=variant)
@@ -660,7 +665,7 @@ class Main_GGUF_FLUX_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Bas
cls._validate_is_not_flux2(mod)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
return cls(**override_fields, variant=variant)
@@ -718,7 +723,7 @@ class Main_GGUF_Flux2_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Ba
cls._validate_is_flux2(mod)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
return cls(**override_fields, variant=variant)
@@ -779,9 +784,9 @@ class Main_Diffusers_FLUX_Config(Diffusers_Config_Base, Main_Config_Base, Config
},
)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
- repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod)
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
return cls(
**override_fields,
@@ -833,9 +838,9 @@ class Main_Diffusers_Flux2_Config(Diffusers_Config_Base, Main_Config_Base, Confi
},
)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
- repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod)
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
return cls(
**override_fields,
@@ -904,11 +909,13 @@ class Main_SD_Diffusers_Config_Base(Diffusers_Config_Base, Main_Config_Base):
cls._validate_base(mod)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
- prediction_type = override_fields.get("prediction_type") or cls._get_scheduler_prediction_type_or_raise(mod)
+ prediction_type = override_fields.pop("prediction_type", None) or cls._get_scheduler_prediction_type_or_raise(
+ mod
+ )
- repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod)
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
return cls(
**override_fields,
@@ -1014,9 +1021,9 @@ class Main_Diffusers_SD3_Config(Diffusers_Config_Base, Main_Config_Base, Config_
},
)
- submodels = override_fields.get("submodels") or cls._get_submodels_or_raise(mod)
+ submodels = override_fields.pop("submodels", None) or cls._get_submodels_or_raise(mod)
- repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod)
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
return cls(
**override_fields,
@@ -1089,7 +1096,7 @@ class Main_Diffusers_CogView4_Config(Diffusers_Config_Base, Main_Config_Base, Co
},
)
- repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod)
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
return cls(
**override_fields,
@@ -1155,9 +1162,9 @@ class Main_Diffusers_ZImage_Config(Diffusers_Config_Base, Main_Config_Base, Conf
},
)
- variant = override_fields.get("variant") or cls._get_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_variant_or_raise(mod)
- repo_variant = override_fields.get("repo_variant") or cls._get_repo_variant_or_raise(mod)
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
return cls(
**override_fields,
@@ -1201,7 +1208,7 @@ class Main_Checkpoint_ZImage_Config(Checkpoint_Config_Base, Main_Config_Base, Co
cls._validate_does_not_look_like_gguf_quantized(mod)
- variant = override_fields.get("variant", ZImageVariantType.Turbo)
+ variant = override_fields.pop("variant", None) or ZImageVariantType.Turbo
return cls(**override_fields, variant=variant)
@@ -1235,7 +1242,7 @@ class Main_GGUF_ZImage_Config(Checkpoint_Config_Base, Main_Config_Base, Config_B
cls._validate_looks_like_gguf_quantized(mod)
- variant = override_fields.get("variant", ZImageVariantType.Turbo)
+ variant = override_fields.pop("variant", None) or ZImageVariantType.Turbo
return cls(**override_fields, variant=variant)
@@ -1252,6 +1259,106 @@ class Main_GGUF_ZImage_Config(Checkpoint_Config_Base, Main_Config_Base, Config_B
raise NotAMatchError("state dict does not look like GGUF quantized")
+class Main_Diffusers_QwenImage_Config(Diffusers_Config_Base, Main_Config_Base, Config_Base):
+ """Model config for Qwen Image diffusers models (both txt2img and edit)."""
+
+ base: Literal[BaseModelType.QwenImage] = Field(BaseModelType.QwenImage)
+ variant: QwenImageVariantType | None = Field(default=None)
+
+ @classmethod
+ def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self:
+ raise_if_not_dir(mod)
+
+ raise_for_override_fields(cls, override_fields)
+
+ # This check implies the base type - no further validation needed.
+ raise_for_class_name(
+ common_config_paths(mod.path),
+ {
+ "QwenImagePlusPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImagePipeline",
+ },
+ )
+
+ repo_variant = override_fields.pop("repo_variant", None) or cls._get_repo_variant_or_raise(mod)
+ variant = override_fields.pop("variant", None) or cls._get_qwen_image_variant(mod)
+
+ return cls(
+ **override_fields,
+ repo_variant=repo_variant,
+ variant=variant,
+ )
+
+ @classmethod
+ def _get_qwen_image_variant(cls, mod: ModelOnDisk) -> QwenImageVariantType:
+ """Detect whether this is an edit or txt2img model from the pipeline class name."""
+ import json
+
+ model_index = mod.path / "model_index.json"
+ if model_index.exists():
+ with open(model_index) as f:
+ config = json.load(f)
+ class_name = config.get("_class_name", "")
+ if "Edit" in class_name:
+ return QwenImageVariantType.Edit
+ return QwenImageVariantType.Generate
+
+
+def _has_qwen_image_keys(state_dict: dict[str | int, Any]) -> bool:
+ """Check if state dict contains Qwen Image Edit transformer keys.
+
+ Qwen Image Edit uses 'txt_in' and 'txt_norm' instead of 'context_embedder' (FLUX).
+ This distinguishes it from FLUX and other architectures.
+ """
+ has_txt_in = any(isinstance(k, str) and k.startswith("txt_in.") for k in state_dict.keys())
+ has_txt_norm = any(isinstance(k, str) and k.startswith("txt_norm.") for k in state_dict.keys())
+ has_img_in = any(isinstance(k, str) and k.startswith("img_in.") for k in state_dict.keys())
+ # Must NOT have context_embedder (which would indicate FLUX)
+ has_context_embedder = any(isinstance(k, str) and "context_embedder" in k for k in state_dict.keys())
+ return has_txt_in and has_txt_norm and has_img_in and not has_context_embedder
+
+
+class Main_GGUF_QwenImage_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Base):
+ """Model config for GGUF-quantized Qwen Image transformer models."""
+
+ base: Literal[BaseModelType.QwenImage] = Field(default=BaseModelType.QwenImage)
+ format: Literal[ModelFormat.GGUFQuantized] = Field(default=ModelFormat.GGUFQuantized)
+ variant: QwenImageVariantType | None = Field(default=None)
+
+ @classmethod
+ def from_model_on_disk(cls, mod: ModelOnDisk, override_fields: dict[str, Any]) -> Self:
+ raise_if_not_file(mod)
+
+ raise_for_override_fields(cls, override_fields)
+
+ sd = mod.load_state_dict()
+
+ if not _has_qwen_image_keys(sd):
+ raise NotAMatchError("state dict does not look like a Qwen Image Edit model")
+
+ if not _has_ggml_tensors(sd):
+ raise NotAMatchError("state dict does not look like GGUF quantized")
+
+ # Infer variant from the state dict if not explicitly provided.
+ # The Edit variant includes an extra tensor `__index_timestep_zero__` (used by the
+ # `zero_cond_t` dual-modulation path in diffusers' QwenImageTransformer2DModel).
+ # If the marker tensor is missing, fall back to the filename heuristic since older
+ # or alternate GGUF converters may not emit it.
+ explicit_variant = override_fields.pop("variant", None)
+ if explicit_variant is None:
+ if "__index_timestep_zero__" in sd:
+ explicit_variant = QwenImageVariantType.Edit
+ else:
+ filename = mod.path.stem.lower()
+ if "edit" in filename:
+ explicit_variant = QwenImageVariantType.Edit
+ else:
+ explicit_variant = QwenImageVariantType.Generate
+
+ return cls(**override_fields, variant=explicit_variant)
+
+
class Main_Checkpoint_Anima_Config(Checkpoint_Config_Base, Main_Config_Base, Config_Base):
"""Model config for Anima single-file checkpoint models (safetensors).
diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py
index 38d5aebeaa..6cf06d4807 100644
--- a/invokeai/backend/model_manager/load/model_loaders/lora.py
+++ b/invokeai/backend/model_manager/load/model_loaders/lora.py
@@ -57,6 +57,9 @@ from invokeai.backend.patches.lora_conversions.flux_xlabs_lora_conversion_utils
is_state_dict_likely_in_flux_xlabs_format,
lora_model_from_flux_xlabs_state_dict,
)
+from invokeai.backend.patches.lora_conversions.qwen_image_lora_conversion_utils import (
+ lora_model_from_qwen_image_state_dict,
+)
from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
from invokeai.backend.patches.lora_conversions.z_image_lora_conversion_utils import lora_model_from_z_image_state_dict
@@ -162,6 +165,8 @@ class LoRALoader(ModelLoader):
# Z-Image LoRAs use diffusers PEFT format with transformer and/or Qwen3 encoder layers.
# We set alpha=None to use rank as alpha (common default).
model = lora_model_from_z_image_state_dict(state_dict=state_dict, alpha=None)
+ elif self._model_base == BaseModelType.QwenImage:
+ model = lora_model_from_qwen_image_state_dict(state_dict=state_dict, alpha=None)
elif self._model_base == BaseModelType.Anima:
# Anima LoRAs use Kohya-style or diffusers PEFT format targeting Cosmos DiT blocks.
model = lora_model_from_anima_state_dict(state_dict=state_dict, alpha=None)
diff --git a/invokeai/backend/model_manager/load/model_loaders/qwen_image.py b/invokeai/backend/model_manager/load/model_loaders/qwen_image.py
new file mode 100644
index 0000000000..a025e72794
--- /dev/null
+++ b/invokeai/backend/model_manager/load/model_loaders/qwen_image.py
@@ -0,0 +1,177 @@
+from pathlib import Path
+from typing import Optional
+
+import accelerate
+import torch
+
+from invokeai.backend.model_manager.configs.base import Checkpoint_Config_Base, Diffusers_Config_Base
+from invokeai.backend.model_manager.configs.factory import AnyModelConfig
+from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+from invokeai.backend.model_manager.load.load_default import ModelLoader
+from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
+from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
+from invokeai.backend.model_manager.taxonomy import (
+ AnyModel,
+ BaseModelType,
+ ModelFormat,
+ ModelType,
+ QwenImageVariantType,
+ SubModelType,
+)
+from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
+from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
+from invokeai.backend.util.devices import TorchDevice
+
+
+@ModelLoaderRegistry.register(base=BaseModelType.QwenImage, type=ModelType.Main, format=ModelFormat.Diffusers)
+class QwenImageDiffusersModel(GenericDiffusersLoader):
+ """Class to load Qwen Image Edit main models."""
+
+ def _load_model(
+ self,
+ config: AnyModelConfig,
+ submodel_type: Optional[SubModelType] = None,
+ ) -> AnyModel:
+ if isinstance(config, Checkpoint_Config_Base):
+ raise NotImplementedError("CheckpointConfigBase is not implemented for Qwen Image Edit models.")
+
+ if submodel_type is None:
+ raise Exception("A submodel type must be provided when loading main pipelines.")
+
+ model_path = Path(config.path)
+ load_class = self.get_hf_load_class(model_path, submodel_type)
+ repo_variant = config.repo_variant if isinstance(config, Diffusers_Config_Base) else None
+ variant = repo_variant.value if repo_variant else None
+ model_path = model_path / submodel_type.value
+
+ # We force bfloat16 for Qwen Image Edit models.
+ # Use `dtype` (newer) with fallback to `torch_dtype` (older diffusers).
+ dtype_kwarg = {"dtype": torch.bfloat16}
+ try:
+ result: AnyModel = load_class.from_pretrained(
+ model_path,
+ **dtype_kwarg,
+ variant=variant,
+ local_files_only=True,
+ )
+ except TypeError:
+ # Older diffusers uses torch_dtype instead of dtype
+ dtype_kwarg = {"torch_dtype": torch.bfloat16}
+ result = load_class.from_pretrained(
+ model_path,
+ **dtype_kwarg,
+ variant=variant,
+ local_files_only=True,
+ )
+ except OSError as e:
+ if variant and "no file named" in str(e):
+ result = load_class.from_pretrained(model_path, **dtype_kwarg, local_files_only=True)
+ else:
+ raise e
+
+ return result
+
+
+@ModelLoaderRegistry.register(base=BaseModelType.QwenImage, type=ModelType.Main, format=ModelFormat.GGUFQuantized)
+class QwenImageGGUFCheckpointModel(ModelLoader):
+ """Class to load GGUF-quantized Qwen Image Edit transformer models."""
+
+ def _load_model(
+ self,
+ config: AnyModelConfig,
+ submodel_type: Optional[SubModelType] = None,
+ ) -> AnyModel:
+ if not isinstance(config, Checkpoint_Config_Base):
+ raise ValueError("Only CheckpointConfigBase models are currently supported here.")
+
+ match submodel_type:
+ case SubModelType.Transformer:
+ return self._load_from_singlefile(config)
+
+ raise ValueError(
+ f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
+ )
+
+ def _load_from_singlefile(self, config: AnyModelConfig) -> AnyModel:
+ from diffusers import QwenImageTransformer2DModel
+
+ if not isinstance(config, Main_GGUF_QwenImage_Config):
+ raise TypeError(f"Expected Main_GGUF_QwenImage_Config, got {type(config).__name__}.")
+ model_path = Path(config.path)
+
+ target_device = TorchDevice.choose_torch_device()
+ compute_dtype = TorchDevice.choose_bfloat16_safe_dtype(target_device)
+
+ sd = gguf_sd_loader(model_path, compute_dtype=compute_dtype)
+
+ # Strip ComfyUI-style prefixes if present
+ prefix_to_strip = None
+ for prefix in ["model.diffusion_model.", "diffusion_model."]:
+ if any(k.startswith(prefix) for k in sd.keys() if isinstance(k, str)):
+ prefix_to_strip = prefix
+ break
+
+ if prefix_to_strip:
+ stripped_sd = {}
+ for key, value in sd.items():
+ if isinstance(key, str) and key.startswith(prefix_to_strip):
+ stripped_sd[key[len(prefix_to_strip) :]] = value
+ else:
+ stripped_sd[key] = value
+ sd = stripped_sd
+
+ # Auto-detect architecture from state dict
+ num_layers = 0
+ for key in sd.keys():
+ if isinstance(key, str) and key.startswith("transformer_blocks."):
+ parts = key.split(".")
+ if len(parts) >= 2:
+ try:
+ layer_idx = int(parts[1])
+ num_layers = max(num_layers, layer_idx + 1)
+ except ValueError:
+ pass
+
+ # Detect dimensions from weights
+ num_attention_heads = 24 # default
+ attention_head_dim = 128 # default
+
+ if "img_in.weight" in sd:
+ w = sd["img_in.weight"]
+ shape = w.tensor_shape if isinstance(w, GGMLTensor) else w.shape
+ hidden_dim = shape[0]
+ in_channels = shape[1]
+ num_attention_heads = hidden_dim // attention_head_dim
+
+ joint_attention_dim = 3584 # default
+ if "txt_in.weight" in sd:
+ w = sd["txt_in.weight"]
+ shape = w.tensor_shape if isinstance(w, GGMLTensor) else w.shape
+ joint_attention_dim = shape[1]
+
+ model_config: dict = {
+ "patch_size": 2,
+ "in_channels": in_channels if "img_in.weight" in sd else 64,
+ "out_channels": 16,
+ "num_layers": num_layers if num_layers > 0 else 60,
+ "attention_head_dim": attention_head_dim,
+ "num_attention_heads": num_attention_heads,
+ "joint_attention_dim": joint_attention_dim,
+ "guidance_embeds": False,
+ "axes_dims_rope": (16, 56, 56),
+ }
+
+ # zero_cond_t is only used by edit-variant models. It enables dual modulation
+ # for noisy vs reference patches. Setting it on txt2img models produces garbage.
+ # Also requires diffusers 0.37+ (the parameter doesn't exist in older versions).
+ import inspect
+
+ is_edit = getattr(config, "variant", None) == QwenImageVariantType.Edit
+ if is_edit and "zero_cond_t" in inspect.signature(QwenImageTransformer2DModel.__init__).parameters:
+ model_config["zero_cond_t"] = True
+
+ with accelerate.init_empty_weights():
+ model = QwenImageTransformer2DModel(**model_config)
+
+ model.load_state_dict(sd, strict=False, assign=True)
+ return model
diff --git a/invokeai/backend/model_manager/metadata/fetch/huggingface.py b/invokeai/backend/model_manager/metadata/fetch/huggingface.py
index 1b2b6c3674..30fe418fe1 100644
--- a/invokeai/backend/model_manager/metadata/fetch/huggingface.py
+++ b/invokeai/backend/model_manager/metadata/fetch/huggingface.py
@@ -19,8 +19,7 @@ from pathlib import Path
from typing import Optional
import requests
-from huggingface_hub import HfApi, configure_http_backend, hf_hub_url
-from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundError
+from huggingface_hub import hf_hub_url
from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
@@ -47,7 +46,6 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
this module without an internet connection.
"""
self._requests = session or requests.Session()
- configure_http_backend(backend_factory=lambda: self._requests)
@classmethod
def from_json(cls, json: str) -> HuggingFaceMetadata:
@@ -55,6 +53,22 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
metadata = HuggingFaceMetadata.model_validate_json(json)
return metadata
+ def _fetch_model_info(self, repo_id: str, variant: Optional[ModelRepoVariant] = None) -> dict:
+ """Fetch model info from HuggingFace API using self._requests session.
+
+ This allows the session to be mocked in tests via requests_testadapter.
+ """
+ url = f"https://huggingface.co/api/models/{repo_id}"
+ params: dict[str, str] = {"blobs": "True"}
+ if variant is not None:
+ params["revision"] = str(variant)
+
+ response = self._requests.get(url, params=params)
+ if response.status_code == 404:
+ raise UnknownMetadataException(f"'{repo_id}' not found.")
+ response.raise_for_status()
+ return response.json()
+
def from_id(self, id: str, variant: Optional[ModelRepoVariant] = None) -> AnyModelRepoMetadata:
"""Return a HuggingFaceMetadata object given the model's repo_id."""
# Little loop which tries fetching a revision corresponding to the selected variant.
@@ -67,10 +81,10 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
repo_id = id.split("::")[0] or id
while not model_info:
try:
- model_info = HfApi().model_info(repo_id=repo_id, files_metadata=True, revision=variant)
- except RepositoryNotFoundError as excp:
- raise UnknownMetadataException(f"'{repo_id}' not found. See trace for details.") from excp
- except RevisionNotFoundError:
+ model_info = self._fetch_model_info(repo_id, variant)
+ except UnknownMetadataException:
+ raise
+ except requests.HTTPError:
if variant is None:
raise
else:
@@ -80,15 +94,18 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
_, name = repo_id.split("/")
- for s in model_info.siblings or []:
- assert s.rfilename is not None
- assert s.size is not None
+ for s in model_info.get("siblings") or []:
+ rfilename = s.get("rfilename")
+ size = s.get("size")
+ assert rfilename is not None
+ assert size is not None
+ lfs = s.get("lfs")
files.append(
RemoteModelFile(
- url=hf_hub_url(repo_id, s.rfilename, revision=variant or "main"),
- path=Path(name, s.rfilename),
- size=s.size,
- sha256=s.lfs.get("sha256") if s.lfs else None,
+ url=hf_hub_url(repo_id, rfilename, revision=variant or "main"),
+ path=Path(name, rfilename),
+ size=size,
+ sha256=lfs.get("sha256") if lfs else None,
)
)
@@ -115,10 +132,10 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
)
return HuggingFaceMetadata(
- id=model_info.id,
+ id=model_info["id"],
name=name,
files=files,
- api_response=json.dumps(model_info.__dict__, default=str),
+ api_response=json.dumps(model_info, default=str),
is_diffusers=is_diffusers,
ckpt_urls=ckpt_urls,
)
diff --git a/invokeai/backend/model_manager/metadata/metadata_base.py b/invokeai/backend/model_manager/metadata/metadata_base.py
index e16ad4cbc4..b048144e54 100644
--- a/invokeai/backend/model_manager/metadata/metadata_base.py
+++ b/invokeai/backend/model_manager/metadata/metadata_base.py
@@ -17,7 +17,7 @@ remote repo.
from pathlib import Path
from typing import List, Literal, Optional, Union
-from huggingface_hub import configure_http_backend, hf_hub_url
+from huggingface_hub import hf_hub_url
from pydantic import BaseModel, Field, TypeAdapter
from pydantic.networks import AnyHttpUrl
from requests.sessions import Session
@@ -111,7 +111,6 @@ class HuggingFaceMetadata(ModelMetadataWithFiles):
full-precision model is returned.
"""
session = session or Session()
- configure_http_backend(backend_factory=lambda: session) # used in testing
paths = filter_files([x.path for x in self.files], variant, subfolder, subfolders) # all files in the model
diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py
index edcac321f1..c93a606aa8 100644
--- a/invokeai/backend/model_manager/starter_models.py
+++ b/invokeai/backend/model_manager/starter_models.py
@@ -9,7 +9,13 @@ from invokeai.backend.model_manager.configs.external_api import (
ExternalModelPanelSchema,
ExternalResolutionPreset,
)
-from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelFormat, ModelType
+from invokeai.backend.model_manager.taxonomy import (
+ AnyVariant,
+ BaseModelType,
+ ModelFormat,
+ ModelType,
+ QwenImageVariantType,
+)
class StarterModelWithoutDependencies(BaseModel):
@@ -19,6 +25,7 @@ class StarterModelWithoutDependencies(BaseModel):
base: BaseModelType
type: ModelType
format: Optional[ModelFormat] = None
+ variant: Optional[AnyVariant] = None
is_installed: bool = False
capabilities: ExternalModelCapabilities | None = None
default_settings: ExternalApiModelDefaultSettings | None = None
@@ -659,6 +666,138 @@ cogview4 = StarterModel(
)
# endregion
+# region Qwen Image Edit
+qwen_image_edit = StarterModel(
+ name="Qwen Image Edit 2511",
+ base=BaseModelType.QwenImage,
+ source="Qwen/Qwen-Image-Edit-2511",
+ description="Qwen Image Edit 2511 full diffusers model. Supports text-guided image editing with multiple reference images. (~40GB)",
+ type=ModelType.Main,
+ variant=QwenImageVariantType.Edit,
+)
+
+qwen_image_edit_gguf_q4_k_m = StarterModel(
+ name="Qwen Image Edit 2511 (Q4_K_M)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-Edit-2511-GGUF/resolve/main/qwen-image-edit-2511-Q4_K_M.gguf",
+ description="Qwen Image Edit 2511 - Q4_K_M quantized transformer. Good quality/size balance. (~13GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+ variant=QwenImageVariantType.Edit,
+)
+
+qwen_image_edit_gguf_q2_k = StarterModel(
+ name="Qwen Image Edit 2511 (Q2_K)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-Edit-2511-GGUF/resolve/main/qwen-image-edit-2511-Q2_K.gguf",
+ description="Qwen Image Edit 2511 - Q2_K heavily quantized transformer. Smallest size, lower quality. (~7.5GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+ variant=QwenImageVariantType.Edit,
+)
+
+qwen_image_edit_gguf_q6_k = StarterModel(
+ name="Qwen Image Edit 2511 (Q6_K)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-Edit-2511-GGUF/resolve/main/qwen-image-edit-2511-Q6_K.gguf",
+ description="Qwen Image Edit 2511 - Q6_K quantized transformer. Near-lossless quality. (~17GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+ variant=QwenImageVariantType.Edit,
+)
+
+qwen_image_edit_gguf_q8_0 = StarterModel(
+ name="Qwen Image Edit 2511 (Q8_0)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-Edit-2511-GGUF/resolve/main/qwen-image-edit-2511-Q8_0.gguf",
+ description="Qwen Image Edit 2511 - Q8_0 quantized transformer. Highest quality quantization. (~22GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+ variant=QwenImageVariantType.Edit,
+)
+
+qwen_image_edit_lightning_4step = StarterModel(
+ name="Qwen Image Edit Lightning (4-step, bf16)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/lightx2v/Qwen-Image-Edit-2511-Lightning/resolve/main/Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors",
+ description="Lightning distillation LoRA for Qwen Image Edit — enables generation in just 4 steps. "
+ "Settings: Steps=4, CFG=1, Shift Override=3.",
+ type=ModelType.LoRA,
+)
+
+qwen_image_edit_lightning_8step = StarterModel(
+ name="Qwen Image Edit Lightning (8-step, bf16)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/lightx2v/Qwen-Image-Edit-2511-Lightning/resolve/main/Qwen-Image-Edit-2511-Lightning-8steps-V1.0-bf16.safetensors",
+ description="Lightning distillation LoRA for Qwen Image Edit — enables generation in 8 steps with better quality. "
+ "Settings: Steps=8, CFG=1, Shift Override=3.",
+ type=ModelType.LoRA,
+)
+
+# Qwen Image (txt2img)
+qwen_image = StarterModel(
+ name="Qwen Image 2512",
+ base=BaseModelType.QwenImage,
+ source="Qwen/Qwen-Image-2512",
+ description="Qwen Image 2512 full diffusers model. High-quality text-to-image generation. (~40GB)",
+ type=ModelType.Main,
+)
+
+qwen_image_gguf_q4_k_m = StarterModel(
+ name="Qwen Image 2512 (Q4_K_M)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-2512-GGUF/resolve/main/qwen-image-2512-Q4_K_M.gguf",
+ description="Qwen Image 2512 - Q4_K_M quantized transformer. Good quality/size balance. (~13GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+)
+
+qwen_image_gguf_q2_k = StarterModel(
+ name="Qwen Image 2512 (Q2_K)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-2512-GGUF/resolve/main/qwen-image-2512-Q2_K.gguf",
+ description="Qwen Image 2512 - Q2_K heavily quantized transformer. Smallest size, lower quality. (~7.5GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+)
+
+qwen_image_gguf_q6_k = StarterModel(
+ name="Qwen Image 2512 (Q6_K)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-2512-GGUF/resolve/main/qwen-image-2512-Q6_K.gguf",
+ description="Qwen Image 2512 - Q6_K quantized transformer. Near-lossless quality. (~17GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+)
+
+qwen_image_gguf_q8_0 = StarterModel(
+ name="Qwen Image 2512 (Q8_0)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/unsloth/Qwen-Image-2512-GGUF/resolve/main/qwen-image-2512-Q8_0.gguf",
+ description="Qwen Image 2512 - Q8_0 quantized transformer. Highest quality quantization. (~22GB)",
+ type=ModelType.Main,
+ format=ModelFormat.GGUFQuantized,
+)
+
+qwen_image_lightning_4step = StarterModel(
+ name="Qwen Image Lightning (4-step, V2.0, bf16)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
+ description="Lightning distillation LoRA for Qwen Image — enables generation in just 4 steps. "
+ "Settings: Steps=4, CFG=1, Shift Override=3.",
+ type=ModelType.LoRA,
+)
+
+qwen_image_lightning_8step = StarterModel(
+ name="Qwen Image Lightning (8-step, V2.0, bf16)",
+ base=BaseModelType.QwenImage,
+ source="https://huggingface.co/lightx2v/Qwen-Image-Lightning/resolve/main/Qwen-Image-Lightning-8steps-V2.0-bf16.safetensors",
+ description="Lightning distillation LoRA for Qwen Image — enables generation in 8 steps with better quality. "
+ "Settings: Steps=8, CFG=1, Shift Override=3.",
+ type=ModelType.LoRA,
+)
+# endregion
+
# region SigLIP
siglip = StarterModel(
name="SigLIP - google/siglip-so400m-patch14-384",
@@ -1225,6 +1364,20 @@ STARTER_MODELS: list[StarterModel] = [
flux2_klein_qwen3_4b_encoder,
flux2_klein_qwen3_8b_encoder,
cogview4,
+ qwen_image_edit,
+ qwen_image_edit_gguf_q2_k,
+ qwen_image_edit_gguf_q4_k_m,
+ qwen_image_edit_gguf_q6_k,
+ qwen_image_edit_gguf_q8_0,
+ qwen_image_edit_lightning_4step,
+ qwen_image_edit_lightning_8step,
+ qwen_image,
+ qwen_image_gguf_q2_k,
+ qwen_image_gguf_q4_k_m,
+ qwen_image_gguf_q6_k,
+ qwen_image_gguf_q8_0,
+ qwen_image_lightning_4step,
+ qwen_image_lightning_8step,
flux_krea,
flux_krea_quantized,
z_image_turbo,
@@ -1313,6 +1466,19 @@ flux2_klein_bundle: list[StarterModel] = [
flux2_klein_qwen3_4b_encoder,
]
+qwen_image_bundle: list[StarterModel] = [
+ qwen_image_edit,
+ qwen_image_edit_gguf_q4_k_m,
+ qwen_image_edit_gguf_q8_0,
+ qwen_image_edit_lightning_4step,
+ qwen_image_edit_lightning_8step,
+ qwen_image,
+ qwen_image_gguf_q4_k_m,
+ qwen_image_gguf_q8_0,
+ qwen_image_lightning_4step,
+ qwen_image_lightning_8step,
+]
+
anima_bundle: list[StarterModel] = [
anima_preview3,
anima_qwen3_encoder,
@@ -1326,6 +1492,7 @@ STARTER_BUNDLES: dict[str, StarterModelBundle] = {
BaseModelType.Flux: StarterModelBundle(name="FLUX.1 dev", models=flux_bundle),
BaseModelType.Flux2: StarterModelBundle(name="FLUX.2 Klein", models=flux2_klein_bundle),
BaseModelType.ZImage: StarterModelBundle(name="Z-Image Turbo", models=zimage_bundle),
+ BaseModelType.QwenImage: StarterModelBundle(name="Qwen Image", models=qwen_image_bundle),
BaseModelType.Anima: StarterModelBundle(name="Anima", models=anima_bundle),
}
diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py
index 4a20665094..b2b55ebd3f 100644
--- a/invokeai/backend/model_manager/taxonomy.py
+++ b/invokeai/backend/model_manager/taxonomy.py
@@ -54,6 +54,8 @@ class BaseModelType(str, Enum):
"""Indicates the model is associated with Z-Image model architecture, including Z-Image-Turbo."""
External = "external"
"""Indicates the model is hosted by an external provider."""
+ QwenImage = "qwen-image"
+ """Indicates the model is associated with Qwen Image Edit 2511 model architecture."""
Anima = "anima"
"""Indicates the model is associated with Anima model architecture (Cosmos Predict2 DiT + LLM Adapter)."""
Unknown = "unknown"
@@ -148,6 +150,16 @@ class ZImageVariantType(str, Enum):
"""Z-Image Base - undistilled foundation model with full CFG and negative prompt support."""
+class QwenImageVariantType(str, Enum):
+ """Qwen Image model variants."""
+
+ Generate = "generate"
+ """Qwen Image - text-to-image generation model."""
+
+ Edit = "edit"
+ """Qwen Image Edit - image editing model with reference image support."""
+
+
class Qwen3VariantType(str, Enum):
"""Qwen3 text encoder variants based on model size."""
@@ -224,8 +236,28 @@ class FluxLoRAFormat(str, Enum):
AnyVariant: TypeAlias = Union[
- ModelVariantType, ClipVariantType, FluxVariantType, Flux2VariantType, ZImageVariantType, Qwen3VariantType
+ ModelVariantType,
+ ClipVariantType,
+ FluxVariantType,
+ Flux2VariantType,
+ ZImageVariantType,
+ QwenImageVariantType,
+ Qwen3VariantType,
]
variant_type_adapter = TypeAdapter[
- ModelVariantType | ClipVariantType | FluxVariantType | Flux2VariantType | ZImageVariantType | Qwen3VariantType
-](ModelVariantType | ClipVariantType | FluxVariantType | Flux2VariantType | ZImageVariantType | Qwen3VariantType)
+ ModelVariantType
+ | ClipVariantType
+ | FluxVariantType
+ | Flux2VariantType
+ | ZImageVariantType
+ | QwenImageVariantType
+ | Qwen3VariantType
+](
+ ModelVariantType
+ | ClipVariantType
+ | FluxVariantType
+ | Flux2VariantType
+ | ZImageVariantType
+ | QwenImageVariantType
+ | Qwen3VariantType
+)
diff --git a/invokeai/backend/patches/lora_conversions/qwen_image_lora_constants.py b/invokeai/backend/patches/lora_conversions/qwen_image_lora_constants.py
new file mode 100644
index 0000000000..727ee5a428
--- /dev/null
+++ b/invokeai/backend/patches/lora_conversions/qwen_image_lora_constants.py
@@ -0,0 +1,5 @@
+# Qwen Image Edit LoRA prefix constants
+# These prefixes are used for key mapping when applying LoRA patches to Qwen Image Edit models
+
+# Prefix for Qwen Image Edit transformer LoRA layers
+QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX = "lora_transformer-"
diff --git a/invokeai/backend/patches/lora_conversions/qwen_image_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/qwen_image_lora_conversion_utils.py
new file mode 100644
index 0000000000..7fc01f7231
--- /dev/null
+++ b/invokeai/backend/patches/lora_conversions/qwen_image_lora_conversion_utils.py
@@ -0,0 +1,197 @@
+"""Qwen Image LoRA conversion utilities.
+
+Qwen Image uses QwenImageTransformer2DModel architecture.
+Supports multiple LoRA formats:
+- Diffusers/PEFT: transformer_blocks.0.attn.to_k.lora_down.weight
+- With prefix: transformer.transformer_blocks.0.attn.to_k.lora_down.weight
+- Kohya: lora_unet_transformer_blocks_0_attn_to_k.lora_down.weight (underscores instead of dots)
+"""
+
+import re
+from typing import Dict
+
+import torch
+
+from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
+from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
+from invokeai.backend.patches.lora_conversions.qwen_image_lora_constants import (
+ QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX,
+)
+from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
+
+# Regex for Kohya-format Qwen Image LoRA keys.
+# Example: lora_unet_transformer_blocks_0_attn_to_k
+# Groups: (block_idx, sub_module_with_underscores)
+_KOHYA_KEY_REGEX = re.compile(r"lora_unet_transformer_blocks_(\d+)_(.*)")
+
+# Mapping from Kohya underscore-separated sub-module names to dot-separated model paths.
+# The Kohya format uses underscores everywhere, but some underscores are part of the
+# module name (e.g., add_k_proj, to_out). We match the longest prefix first.
+_KOHYA_MODULE_MAP: list[tuple[str, str]] = [
+ # Attention projections
+ ("attn_add_k_proj", "attn.add_k_proj"),
+ ("attn_add_q_proj", "attn.add_q_proj"),
+ ("attn_add_v_proj", "attn.add_v_proj"),
+ ("attn_to_add_out", "attn.to_add_out"),
+ ("attn_to_out_0", "attn.to_out.0"),
+ ("attn_to_k", "attn.to_k"),
+ ("attn_to_q", "attn.to_q"),
+ ("attn_to_v", "attn.to_v"),
+ # Image stream MLP and modulation
+ ("img_mlp_net_0_proj", "img_mlp.net.0.proj"),
+ ("img_mlp_net_2", "img_mlp.net.2"),
+ ("img_mod_1", "img_mod.1"),
+ # Text stream MLP and modulation
+ ("txt_mlp_net_0_proj", "txt_mlp.net.0.proj"),
+ ("txt_mlp_net_2", "txt_mlp.net.2"),
+ ("txt_mod_1", "txt_mod.1"),
+]
+
+
+def is_state_dict_likely_kohya_qwen_image(state_dict: dict[str | int, torch.Tensor]) -> bool:
+ """Check if the state dict uses Kohya-format Qwen Image LoRA keys."""
+ str_keys = [k for k in state_dict.keys() if isinstance(k, str)]
+ if not str_keys:
+ return False
+ # Check if any key matches the Kohya pattern
+ return any(k.startswith("lora_unet_transformer_blocks_") for k in str_keys)
+
+
+def _convert_kohya_key(kohya_layer: str) -> str | None:
+ """Convert a Kohya-format layer name to a dot-separated model module path.
+
+ Example: lora_unet_transformer_blocks_0_attn_to_k -> transformer_blocks.0.attn.to_k
+ """
+ m = _KOHYA_KEY_REGEX.match(kohya_layer)
+ if not m:
+ return None
+
+ block_idx = m.group(1)
+ sub_module = m.group(2)
+
+ for kohya_name, model_path in _KOHYA_MODULE_MAP:
+ if sub_module == kohya_name:
+ return f"transformer_blocks.{block_idx}.{model_path}"
+
+ # Fallback: unknown sub-module, return None so caller can warn/skip
+ return None
+
+
+def lora_model_from_qwen_image_state_dict(
+ state_dict: Dict[str, torch.Tensor], alpha: float | None = None
+) -> ModelPatchRaw:
+ """Convert a Qwen Image LoRA state dict to a ModelPatchRaw.
+
+ Handles three key formats:
+ - Diffusers/PEFT: transformer_blocks.0.attn.to_k.lora_down.weight
+ - With prefix: transformer.transformer_blocks.0.attn.to_k.lora_down.weight
+ - Kohya: lora_unet_transformer_blocks_0_attn_to_k.lora_down.weight
+ """
+ is_kohya = is_state_dict_likely_kohya_qwen_image(state_dict)
+
+ if is_kohya:
+ return _convert_kohya_format(state_dict, alpha)
+ else:
+ return _convert_diffusers_format(state_dict, alpha)
+
+
+def _convert_kohya_format(state_dict: Dict[str, torch.Tensor], alpha: float | None) -> ModelPatchRaw:
+ """Convert Kohya-format state dict. Keys are like lora_unet_transformer_blocks_0_attn_to_k.lokr_w1"""
+ layers: dict[str, BaseLayerPatch] = {}
+
+ # Group by layer (split at first dot: layer_name.param_name)
+ grouped: dict[str, dict[str, torch.Tensor]] = {}
+ for key, value in state_dict.items():
+ if not isinstance(key, str):
+ continue
+ layer_name, param_name = key.split(".", 1)
+ if layer_name not in grouped:
+ grouped[layer_name] = {}
+ grouped[layer_name][param_name] = value
+
+ for kohya_layer, layer_dict in grouped.items():
+ model_path = _convert_kohya_key(kohya_layer)
+ if model_path is None:
+ continue # Skip unrecognized layers
+
+ layer = any_lora_layer_from_state_dict(layer_dict)
+ final_key = f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}{model_path}"
+ layers[final_key] = layer
+
+ return ModelPatchRaw(layers=layers)
+
+
+def _convert_diffusers_format(state_dict: Dict[str, torch.Tensor], alpha: float | None) -> ModelPatchRaw:
+ """Convert Diffusers/PEFT format state dict."""
+ layers: dict[str, BaseLayerPatch] = {}
+
+ # Some LoRAs use a "transformer." prefix on keys
+ strip_prefixes = ["transformer."]
+
+ grouped = _group_by_layer(state_dict)
+
+ for layer_key, layer_dict in grouped.items():
+ values = _normalize_lora_keys(layer_dict, alpha)
+ layer = any_lora_layer_from_state_dict(values)
+ clean_key = layer_key
+ for prefix in strip_prefixes:
+ if clean_key.startswith(prefix):
+ clean_key = clean_key[len(prefix) :]
+ break
+ final_key = f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}{clean_key}"
+ layers[final_key] = layer
+
+ return ModelPatchRaw(layers=layers)
+
+
+def _normalize_lora_keys(layer_dict: dict[str, torch.Tensor], alpha: float | None) -> dict[str, torch.Tensor]:
+ """Normalize LoRA key names to internal format."""
+ if "lora_A.weight" in layer_dict:
+ values: dict[str, torch.Tensor] = {
+ "lora_down.weight": layer_dict["lora_A.weight"],
+ "lora_up.weight": layer_dict["lora_B.weight"],
+ }
+ if alpha is not None:
+ values["alpha"] = torch.tensor(alpha)
+ return values
+ elif "lora_down.weight" in layer_dict:
+ return layer_dict
+ else:
+ return layer_dict
+
+
+def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:
+ """Group state dict keys by layer path."""
+ layer_dict: dict[str, dict[str, torch.Tensor]] = {}
+
+ known_suffixes = [
+ ".lora_A.weight",
+ ".lora_B.weight",
+ ".lora_down.weight",
+ ".lora_up.weight",
+ ".dora_scale",
+ ".alpha",
+ ]
+
+ for key in state_dict:
+ if not isinstance(key, str):
+ continue
+
+ layer_name = None
+ key_name = None
+ for suffix in known_suffixes:
+ if key.endswith(suffix):
+ layer_name = key[: -len(suffix)]
+ key_name = suffix[1:]
+ break
+
+ if layer_name is None:
+ parts = key.rsplit(".", maxsplit=2)
+ layer_name = parts[0]
+ key_name = ".".join(parts[1:])
+
+ if layer_name not in layer_dict:
+ layer_dict[layer_name] = {}
+ layer_dict[layer_name][key_name] = state_dict[key]
+
+ return layer_dict
diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
index de5253f073..054e04dcb2 100644
--- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py
+++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
@@ -17,7 +17,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from diffusers.utils.import_utils import is_xformers_available
from pydantic import Field
-from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import IPAdapterData, TextConditioningData
@@ -139,7 +139,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
+ feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
@@ -151,7 +151,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: Optional[StableDiffusionSafetyChecker],
- feature_extractor: Optional[CLIPFeatureExtractor],
+ feature_extractor: Optional[CLIPImageProcessor],
requires_safety_checker: bool = False,
):
super().__init__(
diff --git a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py
index e6ca9aa18e..6a9959f1e8 100644
--- a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py
+++ b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py
@@ -88,6 +88,23 @@ class ZImageConditioningInfo:
return self
+@dataclass
+class QwenImageConditioningInfo:
+ """Qwen Image Edit conditioning information from Qwen2.5-VL encoder."""
+
+ prompt_embeds: torch.Tensor
+ """Text/image embeddings from Qwen2.5-VL encoder. Shape: (batch_size, seq_len, hidden_size)."""
+
+ prompt_embeds_mask: torch.Tensor | None = None
+ """Attention mask for prompt_embeds. Shape: (batch_size, seq_len). 1 for valid, 0 for padding."""
+
+ def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
+ self.prompt_embeds = self.prompt_embeds.to(device=device, dtype=dtype)
+ if self.prompt_embeds_mask is not None:
+ self.prompt_embeds_mask = self.prompt_embeds_mask.to(device=device)
+ return self
+
+
@dataclass
class AnimaConditioningInfo:
"""Anima text conditioning information from Qwen3 0.6B encoder + T5-XXL tokenizer.
@@ -125,6 +142,7 @@ class ConditioningFieldData:
| List[SD3ConditioningInfo]
| List[CogView4ConditioningInfo]
| List[ZImageConditioningInfo]
+ | List[QwenImageConditioningInfo]
| List[AnimaConditioningInfo]
)
diff --git a/invokeai/frontend/web/openapi.json b/invokeai/frontend/web/openapi.json
index af8476528d..19e5a3a68e 100644
--- a/invokeai/frontend/web/openapi.json
+++ b/invokeai/frontend/web/openapi.json
@@ -6463,6 +6463,23 @@
"title": "Has Been Opened"
},
"description": "Whether to include/exclude recent workflows"
+ },
+ {
+ "name": "is_public",
+ "in": "query",
+ "required": false,
+ "schema": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Is Public"
+ },
+ "description": "Filter by public/shared status"
}
],
"responses": {
@@ -6655,6 +6672,23 @@
"title": "Categories"
},
"description": "The categories to include"
+ },
+ {
+ "name": "is_public",
+ "in": "query",
+ "required": false,
+ "schema": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Is Public"
+ },
+ "description": "Filter by public/shared status"
}
],
"responses": {
@@ -6744,6 +6778,23 @@
"title": "Has Been Opened"
},
"description": "Whether to include/exclude recent workflows"
+ },
+ {
+ "name": "is_public",
+ "in": "query",
+ "required": false,
+ "schema": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Is Public"
+ },
+ "description": "Filter by public/shared status"
}
],
"responses": {
@@ -6812,6 +6863,23 @@
"title": "Has Been Opened"
},
"description": "Whether to include/exclude recent workflows"
+ },
+ {
+ "name": "is_public",
+ "in": "query",
+ "required": false,
+ "schema": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Is Public"
+ },
+ "description": "Filter by public/shared status"
}
],
"responses": {
@@ -7352,6 +7420,67 @@
}
}
}
+ },
+ "/api/v1/workflows/i/{workflow_id}/is_public": {
+ "patch": {
+ "tags": ["workflows"],
+ "summary": "Update Workflow Is Public",
+ "description": "Updates whether a workflow is shared publicly",
+ "operationId": "update_workflow_is_public",
+ "parameters": [
+ {
+ "name": "workflow_id",
+ "in": "path",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "title": "Workflow Id"
+ },
+ "description": "The workflow to update"
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "properties": {
+ "is_public": {
+ "type": "boolean",
+ "title": "Is Public",
+ "description": "Whether the workflow should be shared publicly"
+ }
+ },
+ "type": "object",
+ "required": ["is_public"],
+ "title": "Body_update_workflow_is_public"
+ }
+ }
+ },
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/WorkflowRecordDTO"
+ }
+ }
+ }
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/HTTPValidationError"
+ }
+ }
+ }
+ }
+ }
+ }
}
},
"components": {
@@ -59137,10 +59266,20 @@
"workflow": {
"$ref": "#/components/schemas/Workflow",
"description": "The workflow."
+ },
+ "user_id": {
+ "type": "string",
+ "title": "User Id",
+ "description": "The id of the user who owns this workflow."
+ },
+ "is_public": {
+ "type": "boolean",
+ "title": "Is Public",
+ "description": "Whether this workflow is shared with all users."
}
},
"type": "object",
- "required": ["workflow_id", "name", "created_at", "updated_at", "workflow"],
+ "required": ["workflow_id", "name", "created_at", "updated_at", "workflow", "user_id", "is_public"],
"title": "WorkflowRecordDTO"
},
"WorkflowRecordListItemWithThumbnailDTO": {
@@ -59222,15 +59361,35 @@
],
"title": "Thumbnail Url",
"description": "The URL of the workflow thumbnail."
+ },
+ "user_id": {
+ "type": "string",
+ "title": "User Id",
+ "description": "The id of the user who owns this workflow."
+ },
+ "is_public": {
+ "type": "boolean",
+ "title": "Is Public",
+ "description": "Whether this workflow is shared with all users."
}
},
"type": "object",
- "required": ["workflow_id", "name", "created_at", "updated_at", "description", "category", "tags"],
+ "required": [
+ "workflow_id",
+ "name",
+ "created_at",
+ "updated_at",
+ "description",
+ "category",
+ "tags",
+ "user_id",
+ "is_public"
+ ],
"title": "WorkflowRecordListItemWithThumbnailDTO"
},
"WorkflowRecordOrderBy": {
"type": "string",
- "enum": ["created_at", "updated_at", "opened_at", "name"],
+ "enum": ["created_at", "updated_at", "opened_at", "name", "is_public"],
"title": "WorkflowRecordOrderBy",
"description": "The order by options for workflow records"
},
@@ -59303,10 +59462,20 @@
],
"title": "Thumbnail Url",
"description": "The URL of the workflow thumbnail."
+ },
+ "user_id": {
+ "type": "string",
+ "title": "User Id",
+ "description": "The id of the user who owns this workflow."
+ },
+ "is_public": {
+ "type": "boolean",
+ "title": "Is Public",
+ "description": "Whether this workflow is shared with all users."
}
},
"type": "object",
- "required": ["workflow_id", "name", "created_at", "updated_at", "workflow"],
+ "required": ["workflow_id", "name", "created_at", "updated_at", "workflow", "user_id", "is_public"],
"title": "WorkflowRecordWithThumbnailDTO"
},
"WorkflowWithoutID": {
diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json
index cb00d2a767..2e54f250f9 100644
--- a/invokeai/frontend/web/public/locales/en.json
+++ b/invokeai/frontend/web/public/locales/en.json
@@ -161,7 +161,17 @@
"imagesWithCount_other": "{{count}} images",
"assetsWithCount_one": "{{count}} asset",
"assetsWithCount_other": "{{count}} assets",
- "updateBoardError": "Error updating board"
+ "updateBoardError": "Error updating board",
+ "setBoardVisibility": "Set Board Visibility",
+ "setVisibilityPrivate": "Set Private",
+ "setVisibilityShared": "Set Shared",
+ "setVisibilityPublic": "Set Public",
+ "visibilityPrivate": "Private",
+ "visibilityShared": "Shared",
+ "visibilityPublic": "Public",
+ "visibilityBadgeShared": "Shared board",
+ "visibilityBadgePublic": "Public board",
+ "updateBoardVisibilityError": "Error updating board visibility"
},
"accordions": {
"generation": {
@@ -1199,7 +1209,9 @@
"numImages": "Num Images",
"modelPickerFallbackNoModelsInstalled": "No models installed.",
"modelPickerFallbackNoModelsInstalled2": "Visit the Model Manager to install models.",
+ "modelPickerFallbackNoModelsInstalledNonAdmin": "No models installed. Ask your InvokeAI administrator () to install some models.",
"noModelsInstalledDesc1": "Install models with the",
+ "noModelsInstalledAskAdmin": "Ask your administrator to install some.",
"noModelSelected": "No Model Selected",
"noMatchingModels": "No matching models",
"noModelsInstalled": "No models installed",
@@ -1295,6 +1307,12 @@
"flux2KleinVaePlaceholder": "From main model",
"flux2KleinQwen3Encoder": "Qwen3 Encoder (optional)",
"flux2KleinQwen3EncoderPlaceholder": "From main model",
+ "qwenImageComponentSource": "VAE/Encoder Source (Diffusers)",
+ "qwenImageComponentSourcePlaceholder": "Required for GGUF models",
+ "qwenImageQuantization": "Encoder Quantization",
+ "qwenImageQuantizationNone": "None (bf16)",
+ "qwenImageQuantizationInt8": "8-bit (int8)",
+ "qwenImageQuantizationNf4": "4-bit (nf4)",
"upcastAttention": "Upcast Attention",
"uploadImage": "Upload Image",
"urlOrLocalPath": "URL or Local Path",
@@ -1562,6 +1580,7 @@
"info": "Info",
"invoke": {
"addingImagesTo": "Adding images to",
+ "boardNotWritable": "You do not have write access to board \"{{boardName}}\". Select a board you own or switch to Uncategorized.",
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.",
"invoke": "Invoke",
"missingFieldTemplate": "Missing field template",
@@ -1588,6 +1607,7 @@
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
"noCLIPEmbedModelSelected": "No CLIP Embed model selected for FLUX generation",
"noQwen3EncoderModelSelected": "No Qwen3 Encoder model selected for FLUX2 Klein generation",
+ "noQwenImageComponentSourceSelected": "GGUF Qwen Image models require a Diffusers Component Source for VAE/encoder",
"noZImageVaeSourceSelected": "No VAE source: Select VAE (FLUX) or Qwen3 Source model",
"noZImageQwen3EncoderSourceSelected": "No Qwen3 Encoder source: Select Qwen3 Encoder or Qwen3 Source model",
"noAnimaVaeModelSelected": "No Anima VAE model selected",
@@ -1641,6 +1661,7 @@
"sendToCanvas": "Send To Canvas",
"sendToUpscale": "Send To Upscale",
"showOptionsPanel": "Show Side Panel (O or T)",
+ "shift": "Shift",
"shuffle": "Shuffle Seed",
"steps": "Steps",
"strength": "Strength",
@@ -2317,6 +2338,8 @@
"tags": "Tags",
"yourWorkflows": "Your Workflows",
"recentlyOpened": "Recently Opened",
+ "sharedWorkflows": "Shared Workflows",
+ "shareWorkflow": "Shared workflow",
"noRecentWorkflows": "No Recent Workflows",
"private": "Private",
"shared": "Shared",
@@ -3051,6 +3074,7 @@
"tileOverlap": "Tile Overlap",
"postProcessingMissingModelWarning": "Visit the Model Manager to install a post-processing (image to image) model.",
"missingModelsWarning": "Visit the Model Manager to install the required models:",
+ "missingModelsWarningNonAdmin": "Ask your InvokeAI administrator () to install the required models:",
"mainModelDesc": "Main model (SD1.5 or SDXL architecture)",
"tileControlNetModelDesc": "Tile ControlNet model for the chosen main model architecture",
"upscaleModelDesc": "Upscale (image to image) model",
@@ -3159,6 +3183,7 @@
},
"workflows": {
"description": "Workflows are reusable templates that automate image generation tasks, allowing you to quickly perform complex operations and get consistent results.",
+ "descriptionMultiuser": "Workflows are reusable templates that automate image generation tasks, allowing you to quickly perform complex operations and get consistent results. You may share your workflows with other users of the system by selecting 'Shared workflow' when you create or edit it.",
"learnMoreLink": "Learn more about creating workflows",
"browseTemplates": {
"title": "Browse Workflow Templates",
@@ -3237,9 +3262,11 @@
"toGetStartedLocal": "To get started, make sure to download or import models needed to run Invoke. Then, enter a prompt in the box and click Invoke to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the Gallery or edit them to the Canvas.",
"toGetStarted": "To get started, enter a prompt in the box and click Invoke to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the Gallery or edit them to the Canvas.",
"toGetStartedWorkflow": "To get started, fill in the fields on the left and press Invoke to generate your image. Want to explore more workflows? Click the folder icon next to the workflow title to see a list of other templates you can try.",
+ "toGetStartedNonAdmin": "To get started, ask your InvokeAI administrator () to install the AI models needed to run Invoke. Then, enter a prompt in the box and click Invoke to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the Gallery or edit them to the Canvas.",
"gettingStartedSeries": "Want more guidance? Check out our Getting Started Series for tips on unlocking the full potential of the Invoke Studio.",
"lowVRAMMode": "For best performance, follow our Low VRAM guide.",
- "noModelsInstalled": "It looks like you don't have any models installed! You can download a starter model bundle or import models."
+ "noModelsInstalled": "It looks like you don't have any models installed! You can download a starter model bundle or import models.",
+ "noModelsInstalledAskAdmin": "Ask your administrator to install some."
},
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx
index e0e72d12ff..fa4c29b8f4 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx
@@ -12,10 +12,14 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
effect: (action) => {
log.debug(action.payload, 'Bulk download requested');
- // If we have an item name, we are processing the bulk download locally and should use it as the toast id to
- // prevent multiple toasts for the same item.
+ // Use a "preparing:" prefix so this toast cannot collide with the
+ // "ready to download" toast that arrives via the bulk_download_complete
+ // socket event. The background task can complete in under 20ms, so the
+ // socket event may arrive *before* this Redux middleware runs — without
+ // distinct IDs the "preparing" toast would overwrite the "ready" toast.
+ const itemName = action.payload.bulk_download_item_name;
toast({
- id: action.payload.bulk_download_item_name ?? undefined,
+ id: itemName ? `preparing:${itemName}` : undefined,
title: t('gallery.bulkDownloadRequested'),
status: 'success',
// Show the response message if it exists, otherwise show the default message
diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
index 251403ed04..1c7941106b 100644
--- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
+++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
@@ -11,6 +11,7 @@ import {
kleinQwen3EncoderModelSelected,
kleinVaeModelSelected,
modelChanged,
+ qwenImageComponentSourceSelected,
resolutionPresetSelected,
setZImageScheduler,
syncedToOptimalDimension,
@@ -29,12 +30,18 @@ import {
selectBboxModelBase,
selectCanvasSlice,
} from 'features/controlLayers/store/selectors';
-import { getEntityIdentifier, isAspectRatioID, isFlux2ReferenceImageConfig } from 'features/controlLayers/store/types';
+import {
+ getEntityIdentifier,
+ isAspectRatioID,
+ isFlux2ReferenceImageConfig,
+ isQwenImageReferenceImageConfig,
+} from 'features/controlLayers/store/types';
import {
initialFlux2ReferenceImage,
initialFluxKontextReferenceImage,
initialFLUXRedux,
initialIPAdapter,
+ initialQwenImageReferenceImage,
} from 'features/controlLayers/store/util';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/modelManagerV2/models';
import { zModelIdentifierField } from 'features/nodes/types/common';
@@ -49,6 +56,7 @@ import {
selectFluxVAEModels,
selectGlobalRefImageModels,
selectQwen3EncoderModels,
+ selectQwenImageDiffusersModels,
selectRegionalRefImageModels,
selectT5EncoderModels,
selectZImageDiffusersModels,
@@ -238,6 +246,44 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
}
}
+ // handle incompatible Qwen Image Edit component source - clear if switching away
+ const { qwenImageComponentSource } = state.params;
+ if (newBase !== 'qwen-image') {
+ if (qwenImageComponentSource) {
+ dispatch(qwenImageComponentSourceSelected(null));
+ modelsUpdatedDisabledOrCleared += 1;
+ }
+ } else {
+ // Switching to Qwen Image - auto-default component source to a matching diffusers model
+ if (!qwenImageComponentSource) {
+ const availableQwenImageDiffusers = selectQwenImageDiffusersModels(state);
+
+ // Look up the new model's variant to match generate vs edit
+ const modelConfigsResult = selectModelConfigsQuery(state);
+ let selectedVariant: string | null = null;
+ if (modelConfigsResult.data) {
+ const newModelConfig = modelConfigsAdapterSelectors.selectById(modelConfigsResult.data, newModel.key);
+ if (newModelConfig && 'variant' in newModelConfig && typeof newModelConfig.variant === 'string') {
+ selectedVariant = newModelConfig.variant;
+ }
+ }
+
+ // Find a diffusers model matching the variant; if no variant on denoiser, prefer "generate" then "edit"
+ const variantToMatch = selectedVariant ?? 'generate';
+ const matchingModel = availableQwenImageDiffusers.find(
+ (m) => 'variant' in m && m.variant === variantToMatch
+ );
+ const fallbackModel = availableQwenImageDiffusers.find(
+ (m) => 'variant' in m && m.variant !== variantToMatch
+ );
+ const diffusersModel = matchingModel ?? fallbackModel ?? availableQwenImageDiffusers[0];
+
+ if (diffusersModel) {
+ dispatch(qwenImageComponentSourceSelected(zModelIdentifierField.parse(diffusersModel)));
+ }
+ }
+ }
+
if (newModel.base !== 'external' && SUPPORTS_REF_IMAGES_BASE_MODELS.includes(newModel.base)) {
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
// to choose the best available model based on the new main model.
@@ -280,6 +326,20 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
continue;
}
+ if (newBase === 'qwen-image') {
+ // Switching TO Qwen Image Edit - convert any non-qwen configs to qwen_image_reference_image
+ if (!isQwenImageReferenceImageConfig(entity.config)) {
+ dispatch(
+ refImageConfigChanged({
+ id: entity.id,
+ config: { ...initialQwenImageReferenceImage },
+ })
+ );
+ modelsUpdatedDisabledOrCleared += 1;
+ }
+ continue;
+ }
+
if (isFlux2ReferenceImageConfig(entity.config)) {
// Switching AWAY from FLUX.2 - convert flux2_reference_image to the appropriate config type
let newConfig;
@@ -304,6 +364,30 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
continue;
}
+ if (isQwenImageReferenceImageConfig(entity.config)) {
+ // Switching AWAY from Qwen Image Edit - convert to the appropriate config type
+ let newConfig;
+ if (newGlobalRefImageModel) {
+ const parsedModel = zModelIdentifierField.parse(newGlobalRefImageModel);
+ if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
+ newConfig = { ...initialFluxKontextReferenceImage, model: parsedModel };
+ } else if (newGlobalRefImageModel.type === 'flux_redux') {
+ newConfig = { ...initialFLUXRedux, model: parsedModel };
+ } else {
+ newConfig = { ...initialIPAdapter, model: parsedModel };
+ if (parsedModel.base === 'flux') {
+ newConfig.clipVisionModel = 'ViT-L';
+ }
+ }
+ } else {
+ // No compatible model found - fall back to an empty IP adapter config
+ newConfig = { ...initialIPAdapter };
+ }
+ dispatch(refImageConfigChanged({ id: entity.id, config: newConfig }));
+ modelsUpdatedDisabledOrCleared += 1;
+ continue;
+ }
+
// Standard handling for non-flux2 configs
const shouldUpdateModel =
(entity.config.model && entity.config.model.base !== newBase) ||
@@ -391,6 +475,32 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
}
}
+ // Handle Qwen Image model changes within the same base (variant may change between generate/edit)
+ // Auto-update the component source diffusers model to match the new variant
+ if (
+ newBase === 'qwen-image' &&
+ state.params.model?.base === 'qwen-image' &&
+ newModel.key !== state.params.model?.key
+ ) {
+ const modelConfigsResult = selectModelConfigsQuery(state);
+ if (modelConfigsResult.data) {
+ const newModelConfig = modelConfigsAdapterSelectors.selectById(modelConfigsResult.data, newModel.key);
+ const newVariant =
+ newModelConfig && 'variant' in newModelConfig && typeof newModelConfig.variant === 'string'
+ ? newModelConfig.variant
+ : 'generate';
+
+ const availableQwenImageDiffusers = selectQwenImageDiffusersModels(state);
+ const matchingModel = availableQwenImageDiffusers.find((m) => 'variant' in m && m.variant === newVariant);
+ const fallbackModel = availableQwenImageDiffusers.find((m) => 'variant' in m && m.variant !== newVariant);
+ const diffusersModel = matchingModel ?? fallbackModel ?? availableQwenImageDiffusers[0];
+
+ if (diffusersModel) {
+ dispatch(qwenImageComponentSourceSelected(zModelIdentifierField.parse(diffusersModel)));
+ }
+ }
+ }
+
// Handle Z-Image scheduler when switching to Z-Image Base (zbase) model
// LCM is not supported for undistilled models, so reset to euler
if (newBase === 'z-image' && state.params.zImageScheduler === 'lcm') {
diff --git a/invokeai/frontend/web/src/common/components/Picker/Picker.tsx b/invokeai/frontend/web/src/common/components/Picker/Picker.tsx
index ffd0b30242..b70e44dd64 100644
--- a/invokeai/frontend/web/src/common/components/Picker/Picker.tsx
+++ b/invokeai/frontend/web/src/common/components/Picker/Picker.tsx
@@ -867,7 +867,7 @@ const GroupToggleButtons = typedMemo(() => {
}
return (
-
+
{groups.map((group) => (
))}
@@ -927,6 +927,7 @@ const GroupToggleButton = typedMemo(({ group }: { group: Group
size="xs"
variant="solid"
userSelect="none"
+ flexShrink={0}
bg={bg}
color={color}
borderColor={groupColor}
diff --git a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
index 00217eb796..5ac6ffcb7c 100644
--- a/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
+++ b/invokeai/frontend/web/src/features/changeBoardModal/components/ChangeBoardModal.tsx
@@ -3,6 +3,7 @@ import { Combobox, ConfirmationAlertDialog, Flex, FormControl, Text } from '@inv
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import {
changeBoardReset,
isModalOpenChanged,
@@ -13,6 +14,7 @@ import { memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
import { useAddImagesToBoardMutation, useRemoveImagesFromBoardMutation } from 'services/api/endpoints/images';
+import type { BoardDTO } from 'services/api/types';
const selectImagesToChange = createSelector(
selectChangeBoardModalSlice,
@@ -28,6 +30,7 @@ const ChangeBoardModal = () => {
useAssertSingleton('ChangeBoardModal');
const dispatch = useAppDispatch();
const currentBoardId = useAppSelector(selectSelectedBoardId);
+ const currentUser = useAppSelector(selectCurrentUser);
const [selectedBoardId, setSelectedBoardId] = useState();
const { data: boards, isFetching } = useListAllBoardsQuery({ include_archived: true });
const isModalOpen = useAppSelector(selectIsModalOpen);
@@ -36,10 +39,20 @@ const ChangeBoardModal = () => {
const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation();
const { t } = useTranslation();
+ // Returns true if the current user can write images to the given board.
+ const canWriteToBoard = useCallback(
+ (board: BoardDTO): boolean => {
+ const isOwnerOrAdmin = !currentUser || currentUser.is_admin || board.user_id === currentUser.user_id;
+ return isOwnerOrAdmin || board.board_visibility === 'public';
+ },
+ [currentUser]
+ );
+
const options = useMemo(() => {
return [{ label: t('boards.uncategorized'), value: 'none' }]
.concat(
(boards ?? [])
+ .filter(canWriteToBoard)
.map((board) => ({
label: board.board_name,
value: board.board_id,
@@ -47,7 +60,7 @@ const ChangeBoardModal = () => {
.sort((a, b) => a.label.localeCompare(b.label))
)
.filter((board) => board.value !== currentBoardId);
- }, [boards, currentBoardId, t]);
+ }, [boards, canWriteToBoard, currentBoardId, t]);
const value = useMemo(() => options.find((o) => o.value === selectedBoardId), [options, selectedBoardId]);
diff --git a/invokeai/frontend/web/src/features/controlLayers/components/RefImage/RefImageSettings.tsx b/invokeai/frontend/web/src/features/controlLayers/components/RefImage/RefImageSettings.tsx
index 1a5d7bbebd..54b345361d 100644
--- a/invokeai/frontend/web/src/features/controlLayers/components/RefImage/RefImageSettings.tsx
+++ b/invokeai/frontend/web/src/features/controlLayers/components/RefImage/RefImageSettings.tsx
@@ -34,7 +34,12 @@ import type {
FLUXReduxImageInfluence as FLUXReduxImageInfluenceType,
IPMethodV2,
} from 'features/controlLayers/store/types';
-import { isFlux2ReferenceImageConfig, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
+import {
+ isFlux2ReferenceImageConfig,
+ isFLUXReduxConfig,
+ isIPAdapterConfig,
+ isQwenImageReferenceImageConfig,
+} from 'features/controlLayers/store/types';
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
@@ -124,8 +129,9 @@ const RefImageSettingsContent = memo(() => {
const isFLUX = useAppSelector(selectIsFLUX);
const isExternalModel = !!mainModelConfig && isExternalApiModelConfig(mainModelConfig);
- // FLUX.2 Klein and external API models do not require a ref image model selection.
- const showModelSelector = !isFlux2ReferenceImageConfig(config) && !isExternalModel;
+ // FLUX.2 Klein, Qwen Image Edit and external API models do not require a ref image model selection.
+ const showModelSelector =
+ !isFlux2ReferenceImageConfig(config) && !isQwenImageReferenceImageConfig(config) && !isExternalModel;
return (
diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts
index 038af19603..2027ff4174 100644
--- a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts
@@ -29,6 +29,7 @@ import type {
Flux2ReferenceImageConfig,
FluxKontextReferenceImageConfig,
IPAdapterConfig,
+ QwenImageReferenceImageConfig,
RegionalGuidanceIPAdapterConfig,
T2IAdapterConfig,
} from 'features/controlLayers/store/types';
@@ -37,6 +38,7 @@ import {
initialFlux2ReferenceImage,
initialFluxKontextReferenceImage,
initialIPAdapter,
+ initialQwenImageReferenceImage,
initialRegionalGuidanceIPAdapter,
initialT2IAdapter,
} from 'features/controlLayers/store/util';
@@ -78,7 +80,7 @@ export const selectDefaultControlAdapter = createSelector(
export const getDefaultRefImageConfig = (
getState: AppGetState
-): IPAdapterConfig | FluxKontextReferenceImageConfig | Flux2ReferenceImageConfig => {
+): IPAdapterConfig | FluxKontextReferenceImageConfig | Flux2ReferenceImageConfig | QwenImageReferenceImageConfig => {
const state = getState();
const mainModelConfig = selectMainModelConfig(state);
@@ -91,6 +93,11 @@ export const getDefaultRefImageConfig = (
return deepClone(initialFlux2ReferenceImage);
}
+ // Qwen Image Edit has built-in reference image support - no model needed
+ if (base === 'qwen-image') {
+ return deepClone(initialQwenImageReferenceImage);
+ }
+
if (base === 'flux' && mainModelConfig?.name?.toLowerCase().includes('kontext')) {
const config = deepClone(initialFluxKontextReferenceImage);
config.model = zModelIdentifierField.parse(mainModelConfig);
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts
index 15a53cd037..07cee8211c 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts
@@ -261,6 +261,19 @@ const slice = createSlice({
}
state.kleinQwen3EncoderModel = result.data;
},
+ qwenImageComponentSourceSelected: (state, action: PayloadAction) => {
+ const result = zParamsState.shape.qwenImageComponentSource.safeParse(action.payload);
+ if (!result.success) {
+ return;
+ }
+ state.qwenImageComponentSource = result.data;
+ },
+ qwenImageQuantizationChanged: (state, action: PayloadAction<'none' | 'int8' | 'nf4'>) => {
+ state.qwenImageQuantization = action.payload;
+ },
+ qwenImageShiftChanged: (state, action: PayloadAction) => {
+ state.qwenImageShift = action.payload;
+ },
vaePrecisionChanged: (state, action: PayloadAction) => {
state.vaePrecision = action.payload;
},
@@ -566,6 +579,9 @@ const resetState = (state: ParamsState): ParamsState => {
newState.animaT5EncoderModel = oldState.animaT5EncoderModel;
newState.kleinVaeModel = oldState.kleinVaeModel;
newState.kleinQwen3EncoderModel = oldState.kleinQwen3EncoderModel;
+ newState.qwenImageComponentSource = oldState.qwenImageComponentSource;
+ newState.qwenImageQuantization = oldState.qwenImageQuantization;
+ newState.qwenImageShift = oldState.qwenImageShift;
return newState;
};
@@ -613,6 +629,9 @@ export const {
zImageQwen3SourceModelSelected,
kleinVaeModelSelected,
kleinQwen3EncoderModelSelected,
+ qwenImageComponentSourceSelected,
+ qwenImageQuantizationChanged,
+ qwenImageShiftChanged,
setClipSkip,
shouldUseCpuNoiseChanged,
setColorCompensation,
@@ -691,6 +710,7 @@ export const selectIsZImage = createParamsSelector((params) => params.model?.bas
export const selectIsAnima = createParamsSelector((params) => params.model?.base === 'anima');
export const selectIsFlux2 = createParamsSelector((params) => params.model?.base === 'flux2');
export const selectIsExternal = createParamsSelector((params) => params.model?.base === 'external');
+export const selectIsQwenImage = createParamsSelector((params) => params.model?.base === 'qwen-image');
export const selectIsFluxKontext = createParamsSelector((params) => {
if (params.model?.base === 'flux' && params.model?.name.toLowerCase().includes('kontext')) {
return true;
@@ -717,6 +737,9 @@ export const selectAnimaT5EncoderModel = createParamsSelector((params) => params
export const selectAnimaScheduler = createParamsSelector((params) => params.animaScheduler);
export const selectKleinVaeModel = createParamsSelector((params) => params.kleinVaeModel);
export const selectKleinQwen3EncoderModel = createParamsSelector((params) => params.kleinQwen3EncoderModel);
+export const selectQwenImageComponentSource = createParamsSelector((params) => params.qwenImageComponentSource);
+export const selectQwenImageQuantization = createParamsSelector((params) => params.qwenImageQuantization);
+export const selectQwenImageShift = createParamsSelector((params) => params.qwenImageShift);
export const selectCFGScale = createParamsSelector((params) => params.cfgScale);
export const selectGuidance = createParamsSelector((params) => params.guidance);
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts
index ab21db3fec..2b7c0f7d17 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/refImagesSlice.ts
@@ -22,6 +22,7 @@ import {
isFlux2ReferenceImageConfig,
isFLUXReduxConfig,
isIPAdapterConfig,
+ isQwenImageReferenceImageConfig,
zRefImagesState,
} from './types';
import { getReferenceImageState, initialFluxKontextReferenceImage, initialFLUXRedux, initialIPAdapter } from './util';
@@ -106,8 +107,8 @@ const slice = createSlice({
return;
}
- // FLUX.2 reference images don't have a model field - they use built-in support
- if (isFlux2ReferenceImageConfig(entity.config)) {
+ // FLUX.2 and Qwen Image Edit reference images don't have a model field - they use built-in support
+ if (isFlux2ReferenceImageConfig(entity.config) || isQwenImageReferenceImageConfig(entity.config)) {
return;
}
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts
index dad1893911..eb5329dc10 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts
@@ -370,6 +370,13 @@ const zFlux2ReferenceImageConfig = z.object({
});
export type Flux2ReferenceImageConfig = z.infer;
+// Qwen Image Edit has built-in reference image support - no separate model needed
+const zQwenImageReferenceImageConfig = z.object({
+ type: z.literal('qwen_image_reference_image'),
+ image: zCroppableImageWithDims.nullable(),
+});
+export type QwenImageReferenceImageConfig = z.infer;
+
const zCanvasEntityBase = z.object({
id: zId,
name: zName,
@@ -385,6 +392,7 @@ export const zRefImageState = z.object({
zFLUXReduxConfig,
zFluxKontextReferenceImageConfig,
zFlux2ReferenceImageConfig,
+ zQwenImageReferenceImageConfig,
]),
});
export type RefImageState = z.infer;
@@ -402,6 +410,10 @@ export const isFluxKontextReferenceImageConfig = (
export const isFlux2ReferenceImageConfig = (config: RefImageState['config']): config is Flux2ReferenceImageConfig =>
config.type === 'flux2_reference_image';
+export const isQwenImageReferenceImageConfig = (
+ config: RefImageState['config']
+): config is QwenImageReferenceImageConfig => config.type === 'qwen_image_reference_image';
+
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
export type FillStyle = z.infer;
export const isFillStyle = (v: unknown): v is FillStyle => zFillStyle.safeParse(v).success;
@@ -782,6 +794,10 @@ export const zParamsState = z.object({
// Flux2 Klein model components - uses Qwen3 instead of CLIP+T5
kleinVaeModel: zParameterVAEModel.nullable(), // Optional: Separate FLUX.2 VAE for Klein
kleinQwen3EncoderModel: zModelIdentifierField.nullable(), // Optional: Separate Qwen3 Encoder for Klein
+ // Qwen Image Edit model components - GGUF transformer needs a Diffusers source for VAE/encoder
+ qwenImageComponentSource: zParameterModel.nullable(), // Diffusers model providing VAE + text encoder
+ qwenImageQuantization: z.enum(['none', 'int8', 'nf4']), // BitsAndBytes quantization for Qwen VL encoder
+ qwenImageShift: z.number().nullable(), // Sigma schedule shift override (e.g. 3.0 for Lightning LoRAs)
// Z-Image Seed Variance Enhancer settings
zImageSeedVarianceEnabled: z.boolean(),
zImageSeedVarianceStrength: z.number().min(0).max(2),
@@ -859,6 +875,9 @@ export const getInitialParamsState = (): ParamsState => ({
animaScheduler: 'euler',
kleinVaeModel: null,
kleinQwen3EncoderModel: null,
+ qwenImageComponentSource: null,
+ qwenImageQuantization: 'none' as const,
+ qwenImageShift: null,
zImageSeedVarianceEnabled: false,
zImageSeedVarianceStrength: 0.1,
zImageSeedVarianceRandomizePercent: 50,
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/util.ts b/invokeai/frontend/web/src/features/controlLayers/store/util.ts
index f14af4feee..2aae90e72a 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/util.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/util.ts
@@ -15,6 +15,7 @@ import type {
FLUXReduxConfig,
ImageWithDims,
IPAdapterConfig,
+ QwenImageReferenceImageConfig,
RasterLayerAdjustments,
RefImageState,
RegionalGuidanceIPAdapterConfig,
@@ -117,6 +118,10 @@ export const initialFlux2ReferenceImage: Flux2ReferenceImageConfig = {
type: 'flux2_reference_image',
image: null,
};
+export const initialQwenImageReferenceImage: QwenImageReferenceImageConfig = {
+ type: 'qwen_image_reference_image',
+ image: null,
+};
export const initialT2IAdapter: T2IAdapterConfig = {
type: 't2i_adapter',
model: null,
diff --git a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts
index f3aa68d588..db5ad4f766 100644
--- a/invokeai/frontend/web/src/features/controlLayers/store/validators.ts
+++ b/invokeai/frontend/web/src/features/controlLayers/store/validators.ts
@@ -147,8 +147,8 @@ export const getGlobalReferenceImageWarnings = (
const { config } = entity;
- // FLUX.2 reference images don't require a model - it's built-in
- if (config.type !== 'flux2_reference_image') {
+ // FLUX.2 and Qwen Image Edit reference images don't require a model - it's built-in
+ if (config.type !== 'flux2_reference_image' && config.type !== 'qwen_image_reference_image') {
if (!('model' in config) || !config.model) {
// No model selected
warnings.push(WARNINGS.IP_ADAPTER_NO_MODEL_SELECTED);
@@ -159,8 +159,10 @@ export const getGlobalReferenceImageWarnings = (
}
if (!entity.config.image) {
- // No image selected
- warnings.push(WARNINGS.IP_ADAPTER_NO_IMAGE_SELECTED);
+ // No image selected - for Qwen Image Edit, an image is optional (txt2img works without one)
+ if (config.type !== 'qwen_image_reference_image') {
+ warnings.push(WARNINGS.IP_ADAPTER_NO_IMAGE_SELECTED);
+ }
}
}
diff --git a/invokeai/frontend/web/src/features/dnd/dnd.ts b/invokeai/frontend/web/src/features/dnd/dnd.ts
index f5e38d4b94..ee648e82ef 100644
--- a/invokeai/frontend/web/src/features/dnd/dnd.ts
+++ b/invokeai/frontend/web/src/features/dnd/dnd.ts
@@ -434,6 +434,49 @@ export const replaceCanvasEntityObjectsWithImageDndTarget: DndTarget<
//#endregion
//#region Add To Board
+/**
+ * Check whether the current user can move images out of their source board.
+ * Returns false if the source board is a shared board not owned by the current user
+ * (and the user is not an admin). In that case, images can be viewed/used but not moved.
+ */
+const canMoveFromSourceBoard = (sourceBoardId: BoardId, getState: AppGetState): boolean => {
+ const state = getState();
+ // In single-user mode (no auth), always allow
+ const currentUser = state.auth?.user;
+ if (!currentUser) {
+ return true;
+ }
+ // Admins can always move
+ if (currentUser.is_admin) {
+ return true;
+ }
+ // "Uncategorized" (none) — user's own uncategorized images, allow
+ if (sourceBoardId === 'none') {
+ return true;
+ }
+ // Look up the board from the RTK Query cache
+ const boardsQueryState = state.api?.queries;
+ if (boardsQueryState) {
+ for (const query of Object.values(boardsQueryState)) {
+ if (query?.data && Array.isArray(query.data)) {
+ const board = (query.data as Array<{ board_id: string; user_id?: string; board_visibility?: string }>).find(
+ (b) => b.board_id === sourceBoardId
+ );
+ if (board) {
+ // Owner can always move
+ if (board.user_id === currentUser.user_id) {
+ return true;
+ }
+ // Non-owner can only move from public boards
+ return board.board_visibility === 'public';
+ }
+ }
+ }
+ }
+ // Board not found in cache — allow by default to avoid blocking legitimate operations
+ return true;
+};
+
const _addToBoard = buildTypeAndKey('add-to-board');
export type AddImageToBoardDndTargetData = DndData<
typeof _addToBoard.type,
@@ -447,16 +490,23 @@ export const addImageToBoardDndTarget: DndTarget<
..._addToBoard,
typeGuard: buildTypeGuard(_addToBoard.key),
getData: buildGetData(_addToBoard.key, _addToBoard.type),
- isValid: ({ sourceData, targetData }) => {
+ isValid: ({ sourceData, targetData, getState }) => {
if (singleImageDndSource.typeGuard(sourceData)) {
const currentBoard = sourceData.payload.imageDTO.board_id ?? 'none';
const destinationBoard = targetData.payload.boardId;
- return currentBoard !== destinationBoard;
+ if (currentBoard === destinationBoard) {
+ return false;
+ }
+ // Don't allow moving images from shared boards the user doesn't own
+ return canMoveFromSourceBoard(currentBoard, getState);
}
if (multipleImageDndSource.typeGuard(sourceData)) {
const currentBoard = sourceData.payload.board_id;
const destinationBoard = targetData.payload.boardId;
- return currentBoard !== destinationBoard;
+ if (currentBoard === destinationBoard) {
+ return false;
+ }
+ return canMoveFromSourceBoard(currentBoard, getState);
}
return false;
},
@@ -491,15 +541,22 @@ export const removeImageFromBoardDndTarget: DndTarget<
..._removeFromBoard,
typeGuard: buildTypeGuard(_removeFromBoard.key),
getData: buildGetData(_removeFromBoard.key, _removeFromBoard.type),
- isValid: ({ sourceData }) => {
+ isValid: ({ sourceData, getState }) => {
if (singleImageDndSource.typeGuard(sourceData)) {
const currentBoard = sourceData.payload.imageDTO.board_id ?? 'none';
- return currentBoard !== 'none';
+ if (currentBoard === 'none') {
+ return false;
+ }
+ // Don't allow removing images from shared boards the user doesn't own
+ return canMoveFromSourceBoard(currentBoard, getState);
}
if (multipleImageDndSource.typeGuard(sourceData)) {
const currentBoard = sourceData.payload.board_id;
- return currentBoard !== 'none';
+ if (currentBoard === 'none') {
+ return false;
+ }
+ return canMoveFromSourceBoard(currentBoard, getState);
}
return false;
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
index 5cc25f6c03..d10dde6ee4 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx
@@ -2,15 +2,26 @@ import type { ContextMenuProps } from '@invoke-ai/ui-library';
import { ContextMenu, MenuGroup, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import { $boardToDelete } from 'features/gallery/components/Boards/DeleteBoardModal';
import { selectAutoAddBoardId, selectAutoAssignBoardOnClick } from 'features/gallery/store/gallerySelectors';
import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
import { toast } from 'features/toast/toast';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import { PiArchiveBold, PiArchiveFill, PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
+import {
+ PiArchiveBold,
+ PiArchiveFill,
+ PiDownloadBold,
+ PiGlobeBold,
+ PiLockBold,
+ PiPlusBold,
+ PiShareNetworkBold,
+ PiTrashSimpleBold,
+} from 'react-icons/pi';
import { useUpdateBoardMutation } from 'services/api/endpoints/boards';
import { useBulkDownloadImagesMutation } from 'services/api/endpoints/images';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
import { useBoardName } from 'services/api/hooks/useBoardName';
import type { BoardDTO } from 'services/api/types';
@@ -23,6 +34,7 @@ const BoardContextMenu = ({ board, children }: Props) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const autoAssignBoardOnClick = useAppSelector(selectAutoAssignBoardOnClick);
+ const currentUser = useAppSelector(selectCurrentUser);
const selectIsSelectedForAutoAdd = useMemo(
() => createSelector(selectAutoAddBoardId, (autoAddBoardId) => board.board_id === autoAddBoardId),
[board.board_id]
@@ -35,6 +47,11 @@ const BoardContextMenu = ({ board, children }: Props) => {
const [bulkDownload] = useBulkDownloadImagesMutation();
+ // Only the board owner or admin can modify visibility
+ const canChangeVisibility = currentUser !== null && (currentUser.is_admin || board.user_id === currentUser.user_id);
+
+ const { canDeleteBoard } = useBoardAccess(board);
+
const handleSetAutoAdd = useCallback(() => {
dispatch(autoAddBoardIdChanged(board.board_id));
}, [board.board_id, dispatch]);
@@ -64,6 +81,26 @@ const BoardContextMenu = ({ board, children }: Props) => {
});
}, [board.board_id, updateBoard]);
+ const handleSetVisibility = useCallback(
+ async (visibility: 'private' | 'shared' | 'public') => {
+ try {
+ await updateBoard({
+ board_id: board.board_id,
+ changes: { board_visibility: visibility },
+ }).unwrap();
+ } catch {
+ toast({ status: 'error', title: t('boards.updateBoardVisibilityError') });
+ }
+ },
+ [board.board_id, t, updateBoard]
+ );
+
+ const handleSetVisibilityPrivate = useCallback(() => handleSetVisibility('private'), [handleSetVisibility]);
+
+ const handleSetVisibilityShared = useCallback(() => handleSetVisibility('shared'), [handleSetVisibility]);
+
+ const handleSetVisibilityPublic = useCallback(() => handleSetVisibility('public'), [handleSetVisibility]);
+
const setAsBoardToDelete = useCallback(() => {
$boardToDelete.set(board);
}, [board]);
@@ -83,18 +120,50 @@ const BoardContextMenu = ({ board, children }: Props) => {
{board.archived && (
- } onClick={handleUnarchive}>
+ } onClick={handleUnarchive} isDisabled={!canDeleteBoard}>
{t('boards.unarchiveBoard')}
)}
{!board.archived && (
- } onClick={handleArchive}>
+ } onClick={handleArchive} isDisabled={!canDeleteBoard}>
{t('boards.archiveBoard')}
)}
- } onClick={setAsBoardToDelete} isDestructive>
+ {canChangeVisibility && (
+ <>
+ }
+ onClick={handleSetVisibilityPrivate}
+ isDisabled={board.board_visibility === 'private'}
+ >
+ {t('boards.setVisibilityPrivate')}
+
+ }
+ onClick={handleSetVisibilityShared}
+ isDisabled={board.board_visibility === 'shared'}
+ >
+ {t('boards.setVisibilityShared')}
+
+ }
+ onClick={handleSetVisibilityPublic}
+ isDisabled={board.board_visibility === 'public'}
+ >
+ {t('boards.setVisibilityPublic')}
+
+ >
+ )}
+
+ }
+ onClick={setAsBoardToDelete}
+ isDestructive
+ isDisabled={!canDeleteBoard}
+ >
{t('boards.deleteBoard')}
@@ -108,8 +177,14 @@ const BoardContextMenu = ({ board, children }: Props) => {
t,
handleBulkDownload,
board.archived,
+ board.board_visibility,
handleUnarchive,
handleArchive,
+ canChangeVisibility,
+ handleSetVisibilityPrivate,
+ handleSetVisibilityShared,
+ handleSetVisibilityPublic,
+ canDeleteBoard,
setAsBoardToDelete,
]
);
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardEditableTitle.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardEditableTitle.tsx
index 67c7dad6ed..cf2749e340 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardEditableTitle.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardEditableTitle.tsx
@@ -7,6 +7,7 @@ import { memo, useCallback, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPencilBold } from 'react-icons/pi';
import { useUpdateBoardMutation } from 'services/api/endpoints/boards';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
import type { BoardDTO } from 'services/api/types';
type Props = {
@@ -19,6 +20,7 @@ export const BoardEditableTitle = memo(({ board, isSelected }: Props) => {
const isHovering = useBoolean(false);
const inputRef = useRef(null);
const [updateBoard, updateBoardResult] = useUpdateBoardMutation();
+ const { canRenameBoard } = useBoardAccess(board);
const onChange = useCallback(
async (board_name: string) => {
@@ -51,13 +53,13 @@ export const BoardEditableTitle = memo(({ board, isSelected }: Props) => {
fontWeight="semibold"
userSelect="none"
color={isSelected ? 'base.100' : 'base.300'}
- onDoubleClick={editable.startEditing}
- cursor="text"
+ onDoubleClick={canRenameBoard ? editable.startEditing : undefined}
+ cursor={canRenameBoard ? 'text' : 'default'}
noOfLines={1}
>
{editable.value}
- {isHovering.isTrue && (
+ {canRenameBoard && isHovering.isTrue && (
}
diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx
index 4d821f819c..10fbe61832 100644
--- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx
@@ -18,8 +18,9 @@ import {
import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
-import { PiArchiveBold, PiImageSquare } from 'react-icons/pi';
+import { PiArchiveBold, PiGlobeBold, PiImageSquare, PiShareNetworkBold } from 'react-icons/pi';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
import type { BoardDTO } from 'services/api/types';
const _hover: SystemStyleObject = {
@@ -62,6 +63,8 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => {
const showOwner = currentUser?.is_admin && board.owner_username;
+ const { canWriteImages } = useBoardAccess(board);
+
return (
@@ -99,6 +102,20 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => {
{autoAddBoardId === board.board_id && }
{board.archived && }
+ {board.board_visibility === 'shared' && (
+
+
+
+
+
+ )}
+ {board.board_visibility === 'public' && (
+
+
+
+
+
+ )}
{board.image_count} | {board.asset_count}
@@ -108,7 +125,12 @@ const GalleryBoard = ({ board, isSelected }: GalleryBoardProps) => {
)}
-
+
);
};
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx
index 7176487015..f5c044132e 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemChangeBoard.tsx
@@ -5,11 +5,15 @@ import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFoldersBold } from 'react-icons/pi';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
+import { useSelectedBoard } from 'services/api/hooks/useSelectedBoard';
export const ContextMenuItemChangeBoard = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const imageDTO = useImageDTOContext();
+ const selectedBoard = useSelectedBoard();
+ const { canWriteImages } = useBoardAccess(selectedBoard);
const onClick = useCallback(() => {
dispatch(imagesToChangeSelected([imageDTO.image_name]));
@@ -17,7 +21,7 @@ export const ContextMenuItemChangeBoard = memo(() => {
}, [dispatch, imageDTO]);
return (
- } onClickCapture={onClick}>
+ } onClickCapture={onClick} isDisabled={!canWriteImages}>
{t('boards.changeBoard')}
);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx
index e20221f342..5dfa7116b1 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MenuItems/ContextMenuItemDeleteImage.tsx
@@ -4,11 +4,15 @@ import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiTrashSimpleBold } from 'react-icons/pi';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
+import { useSelectedBoard } from 'services/api/hooks/useSelectedBoard';
export const ContextMenuItemDeleteImage = memo(() => {
const { t } = useTranslation();
const deleteImageModal = useDeleteImageModalApi();
const imageDTO = useImageDTOContext();
+ const selectedBoard = useSelectedBoard();
+ const { canWriteImages } = useBoardAccess(selectedBoard);
const onClick = useCallback(async () => {
try {
@@ -18,6 +22,10 @@ export const ContextMenuItemDeleteImage = memo(() => {
}
}, [deleteImageModal, imageDTO]);
+ if (!canWriteImages) {
+ return null;
+ }
+
return (
}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx
index d148332943..ee3c8e4e98 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ContextMenu/MultipleSelectionMenuItems.tsx
@@ -10,12 +10,16 @@ import {
useStarImagesMutation,
useUnstarImagesMutation,
} from 'services/api/endpoints/images';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
+import { useSelectedBoard } from 'services/api/hooks/useSelectedBoard';
const MultipleSelectionMenuItems = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selection = useAppSelector((s) => s.gallery.selection);
const deleteImageModal = useDeleteImageModalApi();
+ const selectedBoard = useSelectedBoard();
+ const { canWriteImages } = useBoardAccess(selectedBoard);
const [starImages] = useStarImagesMutation();
const [unstarImages] = useUnstarImagesMutation();
@@ -53,11 +57,16 @@ const MultipleSelectionMenuItems = () => {
} onClickCapture={handleBulkDownload}>
{t('gallery.downloadSelection')}
- } onClickCapture={handleChangeBoard}>
+ } onClickCapture={handleChangeBoard} isDisabled={!canWriteImages}>
{t('boards.changeBoard')}
- } onClickCapture={handleDeleteSelection}>
+ }
+ onClickCapture={handleDeleteSelection}
+ isDisabled={!canWriteImages}
+ >
{t('gallery.deleteSelection')}
>
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
index ccd58992ef..af1d376887 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryImage.tsx
@@ -108,6 +108,25 @@ export const GalleryImage = memo(({ imageDTO }: Props) => {
if (!element) {
return;
}
+
+ const monitorBinding = monitorForElements({
+ // This is a "global" drag start event, meaning that it is called for all drag events.
+ onDragStart: ({ source }) => {
+ // When we start dragging multiple images, set the dragging state to true if the dragged image is part of the
+ // selection. This is called for all drag events.
+ if (
+ multipleImageDndSource.typeGuard(source.data) &&
+ source.data.payload.image_names.includes(imageDTO.image_name)
+ ) {
+ setIsDragging(true);
+ }
+ },
+ onDrop: () => {
+ // Always set the dragging state to false when a drop event occurs.
+ setIsDragging(false);
+ },
+ });
+
return combine(
firefoxDndFix(element),
draggable({
@@ -153,23 +172,7 @@ export const GalleryImage = memo(({ imageDTO }: Props) => {
}
},
}),
- monitorForElements({
- // This is a "global" drag start event, meaning that it is called for all drag events.
- onDragStart: ({ source }) => {
- // When we start dragging multiple images, set the dragging state to true if the dragged image is part of the
- // selection. This is called for all drag events.
- if (
- multipleImageDndSource.typeGuard(source.data) &&
- source.data.payload.image_names.includes(imageDTO.image_name)
- ) {
- setIsDragging(true);
- }
- },
- onDrop: () => {
- // Always set the dragging state to false when a drop event occurs.
- setIsDragging(false);
- },
- })
+ monitorBinding
);
}, [imageDTO, store]);
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx
index 0a97bf819d..612e6361b1 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/GalleryItemDeleteIconButton.tsx
@@ -5,6 +5,8 @@ import type { MouseEvent } from 'react';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiTrashSimpleFill } from 'react-icons/pi';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
+import { useSelectedBoard } from 'services/api/hooks/useSelectedBoard';
import type { ImageDTO } from 'services/api/types';
type Props = {
@@ -15,6 +17,8 @@ export const GalleryItemDeleteIconButton = memo(({ imageDTO }: Props) => {
const shift = useShiftModifier();
const { t } = useTranslation();
const deleteImageModal = useDeleteImageModalApi();
+ const selectedBoard = useSelectedBoard();
+ const { canWriteImages } = useBoardAccess(selectedBoard);
const onClick = useCallback(
(e: MouseEvent) => {
@@ -24,7 +28,7 @@ export const GalleryItemDeleteIconButton = memo(({ imageDTO }: Props) => {
[deleteImageModal, imageDTO]
);
- if (!shift) {
+ if (!shift || !canWriteImages) {
return null;
}
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.test.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.test.tsx
new file mode 100644
index 0000000000..c743315382
--- /dev/null
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.test.tsx
@@ -0,0 +1,24 @@
+import { ImageMetadataHandlers } from 'features/metadata/parsing';
+import { describe, expect, it } from 'vitest';
+
+import { ImageMetadataActions } from './ImageMetadataActions';
+
+describe('ImageMetadataActions', () => {
+ it('includes Qwen metadata handlers in the recall parameters UI', () => {
+ const element = (ImageMetadataActions as unknown as { type: (props: { metadata: unknown }) => unknown }).type({
+ metadata: { model: { key: 'test' } },
+ }) as {
+ props: {
+ children: Array<{ props?: { handler?: unknown } }>;
+ };
+ };
+
+ const handlers = element.props.children
+ .map((child) => child.props?.handler)
+ .filter((handler): handler is unknown => handler !== undefined);
+
+ expect(handlers).toContain(ImageMetadataHandlers.QwenImageComponentSource);
+ expect(handlers).toContain(ImageMetadataHandlers.QwenImageQuantization);
+ expect(handlers).toContain(ImageMetadataHandlers.QwenImageShift);
+ });
+});
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
index 8123db4b0b..e123d0ebd0 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
@@ -58,6 +58,9 @@ export const ImageMetadataActions = memo((props: Props) => {
+
+
+
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx
index b8a522c3a6..c301922df9 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageViewer/NoContentForViewer.tsx
@@ -1,7 +1,9 @@
import type { ButtonProps } from '@invoke-ai/ui-library';
import { Alert, AlertDescription, AlertIcon, Button, Divider, Flex, Link, Spinner, Text } from '@invoke-ai/ui-library';
+import { useAppSelector } from 'app/store/storeHooks';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import { InvokeLogoIcon } from 'common/components/InvokeLogoIcon';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import { LOADING_SYMBOL, useHasImages } from 'features/gallery/hooks/useHasImages';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -9,16 +11,26 @@ import type { PropsWithChildren } from 'react';
import { memo, useCallback, useMemo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
import { PiArrowSquareOutBold, PiImageBold } from 'react-icons/pi';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
import { useMainModels } from 'services/api/hooks/modelsByType';
export const NoContentForViewer = memo(() => {
const hasImages = useHasImages();
const [mainModels, { data }] = useMainModels();
+ const { data: setupStatus } = useGetSetupStatusQuery();
+ const user = useAppSelector(selectCurrentUser);
const { t } = useTranslation();
+ const isMultiuser = setupStatus?.multiuser_enabled ?? false;
+ const isAdmin = !isMultiuser || (user?.is_admin ?? false);
+ const adminEmail = setupStatus?.admin_email ?? null;
+
+ const modelsLoaded = data !== undefined;
+ const hasModels = mainModels.length > 0;
+
const showStarterBundles = useMemo(() => {
- return data && mainModels.length === 0;
- }, [mainModels.length, data]);
+ return modelsLoaded && !hasModels && isAdmin;
+ }, [modelsLoaded, hasModels, isAdmin]);
if (hasImages === LOADING_SYMBOL) {
// Blank bg w/ a spinner. The new user experience components below have an invoke logo, but it's not centered.
@@ -36,10 +48,18 @@ export const NoContentForViewer = memo(() => {
-
- {showStarterBundles && }
-
-
+ {isAdmin ? (
+ // Admin / single-user mode
+ <>
+ {modelsLoaded && hasModels ? : }
+ {showStarterBundles && }
+
+
+ >
+ ) : (
+ // Non-admin user in multiuser mode
+ <>{modelsLoaded && hasModels ? : }>
+ )}
);
@@ -99,6 +119,32 @@ const GetStartedLocal = () => {
);
};
+const GetStartedWithModels = () => {
+ return (
+
+
+
+ );
+};
+
+const GetStartedNonAdmin = ({ adminEmail }: { adminEmail: string | null }) => {
+ const AdminEmailLink = adminEmail ? (
+
+ {adminEmail}
+
+ ) : (
+
+ your administrator
+
+ );
+
+ return (
+
+
+
+ );
+};
+
const StarterBundlesCallout = () => {
const handleClickDownloadStarterModels = useCallback(() => {
navigationApi.switchToTab('models');
diff --git a/invokeai/frontend/web/src/features/metadata/parsing.test.ts b/invokeai/frontend/web/src/features/metadata/parsing.test.ts
new file mode 100644
index 0000000000..a8eb2cb8af
--- /dev/null
+++ b/invokeai/frontend/web/src/features/metadata/parsing.test.ts
@@ -0,0 +1,94 @@
+import { describe, expect, it, vi } from 'vitest';
+
+import { ImageMetadataHandlers, MetadataUtils } from './parsing';
+
+const createMockStore = () => ({
+ dispatch: vi.fn(),
+ getState: vi.fn(() => ({
+ params: { model: null },
+ })),
+});
+
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+const createStore = () => createMockStore() as any;
+
+describe('Qwen metadata parsing', () => {
+ it('does not report missing Qwen metadata keys as available', async () => {
+ const store = createStore();
+
+ const hasMetadata = await MetadataUtils.hasMetadataByHandlers({
+ metadata: {},
+ handlers: [
+ ImageMetadataHandlers.QwenImageComponentSource,
+ ImageMetadataHandlers.QwenImageQuantization,
+ ImageMetadataHandlers.QwenImageShift,
+ ],
+ store,
+ require: 'all',
+ });
+
+ // Handlers reject when keys are absent, so hasMetadata should be false
+ expect(hasMetadata).toBe(false);
+ });
+
+ it('does not recall Qwen values when metadata keys are absent', async () => {
+ const store = createStore();
+
+ const recalled = await MetadataUtils.recallByHandlers({
+ metadata: {},
+ handlers: [
+ ImageMetadataHandlers.QwenImageComponentSource,
+ ImageMetadataHandlers.QwenImageQuantization,
+ ImageMetadataHandlers.QwenImageShift,
+ ],
+ store,
+ silent: true,
+ });
+
+ // No keys present → handlers reject → 0 recalls, no dispatches
+ expect(recalled.size).toBe(0);
+ const mockStore = store as ReturnType;
+ expect(mockStore.dispatch).not.toHaveBeenCalled();
+ });
+
+ it('recalls Qwen handlers with actual values when metadata keys are present', async () => {
+ const store = createStore();
+
+ const recalled = await MetadataUtils.recallByHandlers({
+ metadata: {
+ qwen_image_component_source: { key: 'test-key', hash: 'test', name: 'Test', base: 'qwen-image', type: 'main' },
+ qwen_image_quantization: 'nf4',
+ qwen_image_shift: 3.0,
+ },
+ handlers: [
+ ImageMetadataHandlers.QwenImageComponentSource,
+ ImageMetadataHandlers.QwenImageQuantization,
+ ImageMetadataHandlers.QwenImageShift,
+ ],
+ store,
+ silent: true,
+ });
+
+ expect(recalled.size).toBe(3);
+ const mockStore = store as ReturnType;
+ expect(mockStore.dispatch).toHaveBeenCalledTimes(3);
+ });
+
+ it('recalls Qwen component source as null when key is present but value is null', async () => {
+ const store = createStore();
+
+ const recalled = await MetadataUtils.recallByHandlers({
+ metadata: {
+ qwen_image_component_source: null,
+ },
+ handlers: [ImageMetadataHandlers.QwenImageComponentSource],
+ store,
+ silent: true,
+ });
+
+ // Key is present with null value → handler resolves with null → 1 recall
+ expect(recalled.size).toBe(1);
+ const mockStore = store as ReturnType;
+ expect(mockStore.dispatch).toHaveBeenCalledTimes(1);
+ });
+});
diff --git a/invokeai/frontend/web/src/features/metadata/parsing.tsx b/invokeai/frontend/web/src/features/metadata/parsing.tsx
index edf6270e13..4f643123be 100644
--- a/invokeai/frontend/web/src/features/metadata/parsing.tsx
+++ b/invokeai/frontend/web/src/features/metadata/parsing.tsx
@@ -16,6 +16,9 @@ import {
kleinVaeModelSelected,
negativePromptChanged,
positivePromptChanged,
+ qwenImageComponentSourceSelected,
+ qwenImageQuantizationChanged,
+ qwenImageShiftChanged,
refinerModelChanged,
selectBase,
setAnimaScheduler,
@@ -687,6 +690,83 @@ const ZImageSeedVarianceRandomizePercent: SingleMetadataHandler = {
};
//#endregion ZImageSeedVarianceRandomizePercent
+//#region QwenImageComponentSource
+const QwenImageComponentSource: SingleMetadataHandler = {
+ [SingleMetadataKey]: true,
+ type: 'QwenImageComponentSource',
+ parse: (metadata, _store) => {
+ const raw = getProperty(metadata, 'qwen_image_component_source');
+ // Reject when the key is absent so the handler is not rendered for non-Qwen images
+ if (raw === undefined) {
+ return Promise.reject();
+ }
+ if (raw === null) {
+ return Promise.resolve(null);
+ }
+ return Promise.resolve(zModelIdentifierField.parse(raw));
+ },
+ recall: (value, store) => {
+ store.dispatch(qwenImageComponentSourceSelected(value));
+ },
+ i18nKey: 'modelManager.qwenImageComponentSource',
+ LabelComponent: MetadataLabel,
+ ValueComponent: ({ value }: SingleMetadataValueProps) => (
+
+ ),
+};
+//#endregion QwenImageComponentSource
+
+//#region QwenImageQuantization
+const QwenImageQuantization: SingleMetadataHandler<'none' | 'int8' | 'nf4'> = {
+ [SingleMetadataKey]: true,
+ type: 'QwenImageQuantization',
+ parse: (metadata, _store) => {
+ const raw = getProperty(metadata, 'qwen_image_quantization');
+ // Reject when the key is absent so the handler is not rendered for non-Qwen images
+ if (raw === undefined) {
+ return Promise.reject();
+ }
+ const parsed = z.enum(['none', 'int8', 'nf4']).parse(raw);
+ return Promise.resolve(parsed);
+ },
+ recall: (value, store) => {
+ store.dispatch(qwenImageQuantizationChanged(value));
+ },
+ i18nKey: 'modelManager.qwenImageQuantization',
+ LabelComponent: MetadataLabel,
+ ValueComponent: ({ value }: SingleMetadataValueProps<'none' | 'int8' | 'nf4'>) => (
+
+ ),
+};
+//#endregion QwenImageQuantization
+
+//#region QwenImageShift
+const QwenImageShift: SingleMetadataHandler = {
+ [SingleMetadataKey]: true,
+ type: 'QwenImageShift',
+ parse: (metadata, _store) => {
+ const raw = getProperty(metadata, 'qwen_image_shift');
+ // Reject when the key is absent so the handler is not rendered for non-Qwen images
+ if (raw === undefined) {
+ return Promise.reject();
+ }
+ if (raw === null) {
+ return Promise.resolve(null);
+ }
+ const parsed = z.number().parse(raw);
+ return Promise.resolve(parsed);
+ },
+ recall: (value, store) => {
+ store.dispatch(qwenImageShiftChanged(value));
+ },
+ i18nKey: 'modelManager.qwenImageShift',
+ LabelComponent: MetadataLabel,
+ ValueComponent: ({ value }: SingleMetadataValueProps) => (
+
+ ),
+};
+//#endregion QwenImageShift
+
//#region ZImageShift
const ZImageShift: SingleMetadataHandler = {
[SingleMetadataKey]: true,
@@ -1334,6 +1414,9 @@ export const ImageMetadataHandlers = {
ZImageSeedVarianceEnabled,
ZImageSeedVarianceStrength,
ZImageSeedVarianceRandomizePercent,
+ QwenImageComponentSource,
+ QwenImageQuantization,
+ QwenImageShift,
ZImageShift,
LoRAs,
CanvasLayers,
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useBuildModelsToInstall.ts b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useBuildModelsToInstall.ts
index 457d48ce19..85e24a3d07 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useBuildModelsToInstall.ts
+++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useBuildModelsToInstall.ts
@@ -4,7 +4,7 @@ import { modelConfigsAdapterSelectors, useGetModelConfigsQuery } from 'services/
import type { StarterModel } from 'services/api/types';
type ModelInstallArg = {
- config: Pick;
+ config: Pick;
source: string;
};
@@ -32,7 +32,7 @@ export const useBuildModelInstallArg = () => {
);
const buildModelInstallArg = useCallback((starterModel: StarterModel): ModelInstallArg => {
- const { name, base, type, source, description, format } = starterModel;
+ const { name, base, type, source, description, format, variant } = starterModel;
return {
config: {
@@ -41,6 +41,7 @@ export const useBuildModelInstallArg = () => {
type,
description,
format,
+ variant,
},
source,
};
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx
index d1774f9ded..9b76fbbde6 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/hooks/useStarterModelsToast.tsx
@@ -1,10 +1,11 @@
import { Button, Text, useToast } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
-import { selectIsAuthenticated } from 'features/auth/store/authSlice';
+import { selectCurrentUser, selectIsAuthenticated } from 'features/auth/store/authSlice';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { useCallback, useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
import { useMainModels } from 'services/api/hooks/modelsByType';
const TOAST_ID = 'starterModels';
@@ -15,6 +16,11 @@ export const useStarterModelsToast = () => {
const [mainModels, { data }] = useMainModels();
const toast = useToast();
const isAuthenticated = useAppSelector(selectIsAuthenticated);
+ const { data: setupStatus } = useGetSetupStatusQuery();
+ const user = useAppSelector(selectCurrentUser);
+
+ const isMultiuser = setupStatus?.multiuser_enabled ?? false;
+ const isAdmin = !isMultiuser || (user?.is_admin ?? false);
useEffect(() => {
// Only show the toast if the user is authenticated
@@ -33,17 +39,17 @@ export const useStarterModelsToast = () => {
toast({
id: TOAST_ID,
title: t('modelManager.noModelsInstalled'),
- description: ,
+ description: isAdmin ? : ,
status: 'info',
isClosable: true,
duration: null,
onCloseComplete: () => setDidToast(true),
});
}
- }, [data, didToast, isAuthenticated, mainModels.length, t, toast]);
+ }, [data, didToast, isAuthenticated, isAdmin, mainModels.length, t, toast]);
};
-const ToastDescription = () => {
+const AdminToastDescription = () => {
const { t } = useTranslation();
const toast = useToast();
@@ -62,3 +68,9 @@ const ToastDescription = () => {
);
};
+
+const NonAdminToastDescription = () => {
+ const { t } = useTranslation();
+
+ return {t('modelManager.noModelsInstalledAskAdmin')};
+};
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/models.ts b/invokeai/frontend/web/src/features/modelManagerV2/models.ts
index a63f38cafd..9cc4ed24d9 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/models.ts
+++ b/invokeai/frontend/web/src/features/modelManagerV2/models.ts
@@ -148,6 +148,7 @@ export const MODEL_BASE_TO_COLOR: Record = {
flux: 'gold',
flux2: 'gold',
cogview4: 'red',
+ 'qwen-image': 'orange',
'z-image': 'cyan',
external: 'orange',
anima: 'invokePurple',
@@ -192,6 +193,7 @@ export const MODEL_BASE_TO_LONG_NAME: Record = {
flux: 'FLUX',
flux2: 'FLUX.2',
cogview4: 'CogView4',
+ 'qwen-image': 'Qwen Image',
'z-image': 'Z-Image',
external: 'External',
anima: 'Anima',
@@ -211,6 +213,7 @@ export const MODEL_BASE_TO_SHORT_NAME: Record = {
flux: 'FLUX',
flux2: 'FLUX.2',
cogview4: 'CogView4',
+ 'qwen-image': 'QwenImg',
'z-image': 'Z-Image',
external: 'External',
anima: 'Anima',
@@ -231,6 +234,8 @@ export const MODEL_VARIANT_TO_LONG_NAME: Record = {
zbase: 'Z-Image Base',
large: 'CLIP L',
gigantic: 'CLIP G',
+ generate: 'Qwen Image',
+ edit: 'Qwen Image Edit',
qwen3_4b: 'Qwen3 4B',
qwen3_8b: 'Qwen3 8B',
qwen3_06b: 'Qwen3 0.6B',
@@ -257,13 +262,14 @@ export const MODEL_FORMAT_TO_LONG_NAME: Record = {
export const SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS: BaseModelType[] = ['flux', 'sd-3', 'z-image'];
-export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = ['sd-1', 'sdxl', 'flux', 'flux2'];
+export const SUPPORTS_REF_IMAGES_BASE_MODELS: BaseModelType[] = ['sd-1', 'sdxl', 'flux', 'flux2', 'qwen-image'];
export const SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS: BaseModelType[] = [
'sd-1',
'sd-2',
'sdxl',
'cogview4',
+ 'qwen-image',
'sd-3',
'z-image',
'anima',
diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManager.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManager.tsx
index f6e1a18f6f..60200c8801 100644
--- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManager.tsx
+++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManager.tsx
@@ -37,7 +37,7 @@ export const ModelManager = memo(() => {
{t('common.modelManager')}
-
+ {canManageModels && }
{!!selectedModelKey && canManageModels && (
} onClick={handleClickAddModel}>
{t('modelManager.addModels')}
diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton.tsx
index 91c6c1dae3..fe4b889f54 100644
--- a/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/flow/panels/TopPanel/SaveWorkflowButton.tsx
@@ -1,5 +1,6 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
+import { useIsCurrentWorkflowOwner } from 'features/workflowLibrary/hooks/useIsCurrentWorkflowOwner';
import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -8,6 +9,7 @@ import { PiFloppyDiskBold } from 'react-icons/pi';
const SaveWorkflowButton = () => {
const { t } = useTranslation();
const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
+ const isCurrentWorkflowOwner = useIsCurrentWorkflowOwner();
const saveOrSaveAsWorkflow = useSaveOrSaveAsWorkflow();
return (
@@ -15,7 +17,7 @@ const SaveWorkflowButton = () => {
tooltip={t('workflows.saveWorkflow')}
aria-label={t('workflows.saveWorkflow')}
icon={}
- isDisabled={!doesWorkflowHaveUnsavedChanges}
+ isDisabled={!doesWorkflowHaveUnsavedChanges || !isCurrentWorkflowOwner}
onClick={saveOrSaveAsWorkflow}
pointerEvents="auto"
/>
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/SaveWorkflowButton.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/SaveWorkflowButton.tsx
index 39a93e4a38..779d6f018e 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/SaveWorkflowButton.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/WorkflowListMenu/SaveWorkflowButton.tsx
@@ -1,4 +1,6 @@
import { IconButton } from '@invoke-ai/ui-library';
+import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
+import { useIsCurrentWorkflowOwner } from 'features/workflowLibrary/hooks/useIsCurrentWorkflowOwner';
import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -7,12 +9,15 @@ import { PiFloppyDiskBold } from 'react-icons/pi';
const SaveWorkflowButton = () => {
const { t } = useTranslation();
const saveOrSaveAsWorkflow = useSaveOrSaveAsWorkflow();
+ const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
+ const isCurrentWorkflowOwner = useIsCurrentWorkflowOwner();
return (
}
+ isDisabled={!doesWorkflowHaveUnsavedChanges || !isCurrentWorkflowOwner}
onClick={saveOrSaveAsWorkflow}
pointerEvents="auto"
variant="ghost"
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx
index c1094abf86..11d2733535 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowGeneralTab.tsx
@@ -1,8 +1,19 @@
import type { FormControlProps } from '@invoke-ai/ui-library';
-import { Box, Flex, FormControl, FormControlGroup, FormLabel, Image, Input, Textarea } from '@invoke-ai/ui-library';
+import {
+ Box,
+ Checkbox,
+ Flex,
+ FormControl,
+ FormControlGroup,
+ FormLabel,
+ Image,
+ Input,
+ Textarea,
+} from '@invoke-ai/ui-library';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import {
workflowAuthorChanged,
workflowContactChanged,
@@ -25,7 +36,8 @@ import {
import type { ChangeEvent } from 'react';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
-import { useGetWorkflowQuery } from 'services/api/endpoints/workflows';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
+import { useGetWorkflowQuery, useUpdateWorkflowIsPublicMutation } from 'services/api/endpoints/workflows';
import { WorkflowThumbnailEditor } from './WorkflowThumbnail/WorkflowThumbnailEditor';
@@ -95,6 +107,7 @@ const WorkflowGeneralTab = () => {
{t('nodes.workflowName')}
+
{t('nodes.workflowVersion')}
@@ -187,3 +200,40 @@ const Thumbnail = ({ id }: { id?: string | null }) => {
// This is a default workflow and it does not have a thumbnail set. Users may not edit the thumbnail.
return null;
};
+
+const ShareWorkflowCheckbox = ({ id }: { id?: string | null }) => {
+ const { t } = useTranslation();
+ const currentUser = useAppSelector(selectCurrentUser);
+ const { data: setupStatus } = useGetSetupStatusQuery();
+ const { data } = useGetWorkflowQuery(id ?? skipToken);
+ const [updateIsPublic, { isLoading }] = useUpdateWorkflowIsPublicMutation();
+
+ const handleChange = useCallback(
+ (e: ChangeEvent) => {
+ if (!id) {
+ return;
+ }
+ updateIsPublic({ workflow_id: id, is_public: e.target.checked });
+ },
+ [id, updateIsPublic]
+ );
+
+ // Only show for saved user workflows in multiuser mode when the current user is the owner or admin
+ if (!data || !id || data.workflow.meta.category !== 'user') {
+ return null;
+ }
+ if (setupStatus?.multiuser_enabled) {
+ const isOwner = currentUser !== null && data.user_id === currentUser.user_id;
+ const isAdmin = currentUser?.is_admin ?? false;
+ if (!isOwner && !isAdmin) {
+ return null;
+ }
+ }
+
+ return (
+
+
+ {t('workflows.shareWorkflow')}
+
+ );
+};
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx
index 73b046c83a..501b8365db 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibrarySideNav.tsx
@@ -41,6 +41,7 @@ export const WorkflowLibrarySideNav = () => {
{t('workflows.recentlyOpened')}
+ {t('workflows.sharedWorkflows')}
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx
index 79dff535b0..e6605d2076 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowList.tsx
@@ -32,6 +32,8 @@ const getCategories = (view: WorkflowLibraryView): WorkflowCategory[] => {
return ['user', 'default'];
case 'yours':
return ['user'];
+ case 'shared':
+ return ['user'];
default:
assert>(false);
}
@@ -44,6 +46,13 @@ const getHasBeenOpened = (view: WorkflowLibraryView): boolean | undefined => {
return undefined;
};
+const getIsPublic = (view: WorkflowLibraryView): boolean | undefined => {
+ if (view === 'shared') {
+ return true;
+ }
+ return undefined;
+};
+
const useInfiniteQueryAry = () => {
const orderBy = useAppSelector(selectWorkflowLibraryOrderBy);
const direction = useAppSelector(selectWorkflowLibraryDirection);
@@ -62,6 +71,7 @@ const useInfiniteQueryAry = () => {
query: debouncedSearchTerm,
tags: view === 'defaults' || view === 'yours' ? selectedTags : [],
has_been_opened: getHasBeenOpened(view),
+ is_public: getIsPublic(view),
} satisfies Parameters[0];
}, [orderBy, direction, view, debouncedSearchTerm, selectedTags]);
diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx
index a1767765c9..a184f04039 100644
--- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx
+++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowListItem.tsx
@@ -1,13 +1,15 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
-import { Badge, Flex, Icon, Image, Spacer, Text } from '@invoke-ai/ui-library';
+import { Badge, Flex, Icon, Image, Spacer, Switch, Text, Tooltip } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import { selectWorkflowId } from 'features/nodes/store/selectors';
import { workflowModeChanged } from 'features/nodes/store/workflowLibrarySlice';
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import InvokeLogo from 'public/assets/images/invoke-symbol-wht-lrg.svg';
-import { memo, useCallback, useMemo } from 'react';
+import { type ChangeEvent, memo, type MouseEvent, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiImage } from 'react-icons/pi';
+import { useUpdateWorkflowIsPublicMutation } from 'services/api/endpoints/workflows';
import type { WorkflowRecordListItemWithThumbnailDTO } from 'services/api/types';
import { DeleteWorkflow } from './WorkflowLibraryListItemActions/DeleteWorkflow';
@@ -33,12 +35,21 @@ export const WorkflowListItem = memo(({ workflow }: { workflow: WorkflowRecordLi
const { t } = useTranslation();
const dispatch = useAppDispatch();
const workflowId = useAppSelector(selectWorkflowId);
+ const currentUser = useAppSelector(selectCurrentUser);
const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
const isActive = useMemo(() => {
return workflowId === workflow.workflow_id;
}, [workflowId, workflow.workflow_id]);
+ const isOwner = useMemo(() => {
+ return currentUser !== null && workflow.user_id === currentUser.user_id;
+ }, [currentUser, workflow.user_id]);
+
+ const canEditOrDelete = useMemo(() => {
+ return isOwner || (currentUser?.is_admin ?? false);
+ }, [isOwner, currentUser]);
+
const tags = useMemo(() => {
if (!workflow.tags) {
return [];
@@ -102,6 +113,18 @@ export const WorkflowListItem = memo(({ workflow }: { workflow: WorkflowRecordLi
{t('workflows.opened')}
)}
+ {workflow.is_public && workflow.category !== 'default' && (
+
+ {t('workflows.shared')}
+
+ )}
{workflow.category === 'default' && (
)}
+ {isOwner && }
{workflow.category === 'default' && }
{workflow.category !== 'default' && (
<>
-
+ {canEditOrDelete && }
-
+ {canEditOrDelete && }
>
)}
@@ -152,6 +176,35 @@ export const WorkflowListItem = memo(({ workflow }: { workflow: WorkflowRecordLi
});
WorkflowListItem.displayName = 'WorkflowListItem';
+const ShareWorkflowToggle = memo(({ workflow }: { workflow: WorkflowRecordListItemWithThumbnailDTO }) => {
+ const { t } = useTranslation();
+ const [updateIsPublic, { isLoading }] = useUpdateWorkflowIsPublicMutation();
+
+ const handleChange = useCallback(
+ (e: ChangeEvent) => {
+ e.stopPropagation();
+ updateIsPublic({ workflow_id: workflow.workflow_id, is_public: e.target.checked });
+ },
+ [updateIsPublic, workflow.workflow_id]
+ );
+
+ const handleClick = useCallback((e: MouseEvent) => {
+ e.stopPropagation();
+ }, []);
+
+ return (
+
+
+
+ {t('workflows.shared')}
+
+
+
+
+ );
+});
+ShareWorkflowToggle.displayName = 'ShareWorkflowToggle';
+
const UserThumbnailFallback = memo(() => {
return (
;
const isOrderBy = (v: unknown): v is OrderBy => zOrderBy.safeParse(v).success;
@@ -32,6 +32,7 @@ export const WorkflowSortControl = () => {
created_at: t('workflows.created'),
updated_at: t('workflows.updated'),
name: t('workflows.name'),
+ is_public: t('workflows.shared'),
}),
[t]
);
diff --git a/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts b/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts
index ee85a03c18..1d5d8554ae 100644
--- a/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts
+++ b/invokeai/frontend/web/src/features/nodes/store/workflowLibrarySlice.ts
@@ -11,7 +11,7 @@ import {
} from 'services/api/types';
import z from 'zod';
-const zWorkflowLibraryView = z.enum(['recent', 'yours', 'defaults']);
+const zWorkflowLibraryView = z.enum(['recent', 'yours', 'shared', 'defaults']);
export type WorkflowLibraryView = z.infer;
const zWorkflowLibraryState = z.object({
@@ -55,6 +55,9 @@ const slice = createSlice({
if (action.payload === 'recent') {
state.orderBy = 'opened_at';
state.direction = 'DESC';
+ } else if (action.payload === 'shared') {
+ state.orderBy = 'name';
+ state.direction = 'ASC';
}
},
workflowLibraryTagToggled: (state, action: PayloadAction) => {
@@ -121,5 +124,11 @@ export const WORKFLOW_LIBRARY_TAG_CATEGORIES: WorkflowTagCategory[] = [
];
export const WORKFLOW_LIBRARY_TAGS = WORKFLOW_LIBRARY_TAG_CATEGORIES.flatMap(({ tags }) => tags);
-type WorkflowSortOption = 'opened_at' | 'created_at' | 'updated_at' | 'name';
-export const WORKFLOW_LIBRARY_SORT_OPTIONS: WorkflowSortOption[] = ['opened_at', 'created_at', 'updated_at', 'name'];
+type WorkflowSortOption = 'opened_at' | 'created_at' | 'updated_at' | 'name' | 'is_public';
+export const WORKFLOW_LIBRARY_SORT_OPTIONS: WorkflowSortOption[] = [
+ 'opened_at',
+ 'created_at',
+ 'updated_at',
+ 'name',
+ 'is_public',
+];
diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts
index bd5cc7d207..75c3415cef 100644
--- a/invokeai/frontend/web/src/features/nodes/types/common.ts
+++ b/invokeai/frontend/web/src/features/nodes/types/common.ts
@@ -95,13 +95,25 @@ export const zBaseModelType = z.enum([
'flux',
'flux2',
'cogview4',
+ 'qwen-image',
'z-image',
'external',
'anima',
'unknown',
]);
export type BaseModelType = z.infer;
-export const zMainModelBase = z.enum(['sd-1', 'sd-2', 'sd-3', 'sdxl', 'flux', 'flux2', 'cogview4', 'z-image', 'anima']);
+export const zMainModelBase = z.enum([
+ 'sd-1',
+ 'sd-2',
+ 'sd-3',
+ 'sdxl',
+ 'flux',
+ 'flux2',
+ 'cogview4',
+ 'qwen-image',
+ 'z-image',
+ 'anima',
+]);
type MainModelBase = z.infer;
export const isMainModelBase = (base: unknown): base is MainModelBase => zMainModelBase.safeParse(base).success;
export const zModelType = z.enum([
@@ -147,6 +159,7 @@ export const zModelVariantType = z.enum(['normal', 'inpaint', 'depth']);
export const zFluxVariantType = z.enum(['dev', 'dev_fill', 'schnell']);
export const zFlux2VariantType = z.enum(['klein_4b', 'klein_9b', 'klein_9b_base']);
export const zZImageVariantType = z.enum(['turbo', 'zbase']);
+const zQwenImageVariantType = z.enum(['generate', 'edit']);
export const zQwen3VariantType = z.enum(['qwen3_4b', 'qwen3_8b', 'qwen3_06b']);
export const zAnyModelVariant = z.union([
zModelVariantType,
@@ -154,6 +167,7 @@ export const zAnyModelVariant = z.union([
zFluxVariantType,
zFlux2VariantType,
zZImageVariantType,
+ zQwenImageVariantType,
zQwen3VariantType,
]);
export type AnyModelVariant = z.infer;
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts
index 1c69cdc0d1..f17ff970f2 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addImageToImage.ts
@@ -22,7 +22,14 @@ type AddImageToImageArg = {
manager: CanvasManager;
l2i: Invocation;
i2l: Invocation<
- 'i2l' | 'flux_vae_encode' | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' | 'z_image_i2l' | 'anima_i2l'
+ | 'i2l'
+ | 'flux_vae_encode'
+ | 'flux2_vae_encode'
+ | 'sd3_i2l'
+ | 'cogview4_i2l'
+ | 'qwen_image_i2l'
+ | 'z_image_i2l'
+ | 'anima_i2l'
>;
noise?: Invocation<'noise'>;
denoise: Invocation;
@@ -46,6 +53,7 @@ export const addImageToImage = async ({
| 'flux2_vae_decode'
| 'sd3_l2i'
| 'cogview4_l2i'
+ | 'qwen_image_l2i'
| 'z_image_l2i'
| 'anima_l2i'
>
@@ -58,6 +66,7 @@ export const addImageToImage = async ({
if (
denoise.type === 'cogview4_denoise' ||
+ denoise.type === 'qwen_image_denoise' ||
denoise.type === 'flux_denoise' ||
denoise.type === 'flux2_denoise' ||
denoise.type === 'sd3_denoise' ||
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts
index b53d79e2a5..fa01db67e6 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addInpaint.ts
@@ -25,7 +25,14 @@ type AddInpaintArg = {
manager: CanvasManager;
l2i: Invocation;
i2l: Invocation<
- 'i2l' | 'flux_vae_encode' | 'flux2_vae_encode' | 'sd3_i2l' | 'cogview4_i2l' | 'z_image_i2l' | 'anima_i2l'
+ | 'i2l'
+ | 'flux_vae_encode'
+ | 'flux2_vae_encode'
+ | 'sd3_i2l'
+ | 'cogview4_i2l'
+ | 'qwen_image_i2l'
+ | 'z_image_i2l'
+ | 'anima_i2l'
>;
noise?: Invocation<'noise'>;
denoise: Invocation;
@@ -57,6 +64,7 @@ export const addInpaint = async ({
if (
denoise.type === 'cogview4_denoise' ||
+ denoise.type === 'qwen_image_denoise' ||
denoise.type === 'flux_denoise' ||
denoise.type === 'flux2_denoise' ||
denoise.type === 'sd3_denoise' ||
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts
index 14be20c70e..0c57087eaa 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addOutpaint.ts
@@ -57,6 +57,7 @@ export const addOutpaint = async ({
if (
denoise.type === 'cogview4_denoise' ||
+ denoise.type === 'qwen_image_denoise' ||
denoise.type === 'flux_denoise' ||
denoise.type === 'flux2_denoise' ||
denoise.type === 'sd3_denoise' ||
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addQwenImageLoRAs.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addQwenImageLoRAs.ts
new file mode 100644
index 0000000000..ef94bb672d
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addQwenImageLoRAs.ts
@@ -0,0 +1,59 @@
+import type { RootState } from 'app/store/store';
+import { getPrefixedId } from 'features/controlLayers/konva/util';
+import { zModelIdentifierField } from 'features/nodes/types/common';
+import type { Graph } from 'features/nodes/util/graph/generation/Graph';
+import type { Invocation, S } from 'services/api/types';
+
+export const addQwenImageLoRAs = (
+ state: RootState,
+ g: Graph,
+ denoise: Invocation<'qwen_image_denoise'>,
+ modelLoader: Invocation<'qwen_image_model_loader'>
+): void => {
+ const enabledLoRAs = state.loras.loras.filter((l) => l.isEnabled && l.model.base === 'qwen-image');
+ const loraCount = enabledLoRAs.length;
+
+ if (loraCount === 0) {
+ return;
+ }
+
+ const loraMetadata: S['LoRAMetadataField'][] = [];
+
+ // Collect LoRAs into a single collection node, then pass them to the LoRA collection loader
+ const loraCollector = g.addNode({
+ id: getPrefixedId('lora_collector'),
+ type: 'collect',
+ });
+ const loraCollectionLoader = g.addNode({
+ type: 'qwen_image_lora_collection_loader',
+ id: getPrefixedId('qwen_image_lora_collection_loader'),
+ });
+
+ g.addEdge(loraCollector, 'collection', loraCollectionLoader, 'loras');
+ // Use model loader as transformer input
+ g.addEdge(modelLoader, 'transformer', loraCollectionLoader, 'transformer');
+ // Reroute transformer connection through the LoRA collection loader
+ g.deleteEdgesTo(denoise, ['transformer']);
+ g.addEdge(loraCollectionLoader, 'transformer', denoise, 'transformer');
+
+ for (const lora of enabledLoRAs) {
+ const { weight } = lora;
+ const parsedModel = zModelIdentifierField.parse(lora.model);
+
+ const loraSelector = g.addNode({
+ type: 'lora_selector',
+ id: getPrefixedId('lora_selector'),
+ lora: parsedModel,
+ weight,
+ });
+
+ loraMetadata.push({
+ model: parsedModel,
+ weight,
+ });
+
+ g.addEdge(loraSelector, 'lora', loraCollector, 'item');
+ }
+
+ g.upsertMetadata({ loras: loraMetadata });
+};
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts
index 9cfd5e3b55..06ece522da 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/addTextToImage.ts
@@ -28,6 +28,7 @@ export const addTextToImage = ({
| 'flux2_vae_decode'
| 'sd3_l2i'
| 'cogview4_l2i'
+ | 'qwen_image_l2i'
| 'z_image_l2i'
| 'anima_l2i'
> => {
@@ -38,6 +39,7 @@ export const addTextToImage = ({
if (
denoise.type === 'cogview4_denoise' ||
+ denoise.type === 'qwen_image_denoise' ||
denoise.type === 'flux_denoise' ||
denoise.type === 'flux2_denoise' ||
denoise.type === 'sd3_denoise' ||
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildQwenImageGraph.test.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildQwenImageGraph.test.ts
new file mode 100644
index 0000000000..3a5c2cde34
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildQwenImageGraph.test.ts
@@ -0,0 +1,417 @@
+import { afterEach, describe, expect, it, vi } from 'vitest';
+
+vi.mock('app/logging/logger', () => ({
+ logger: () => ({
+ debug: vi.fn(),
+ }),
+}));
+
+let nextId = 0;
+vi.mock('features/controlLayers/konva/util', () => ({
+ getPrefixedId: (prefix: string) => `${prefix}:${nextId++}`,
+}));
+
+const model = {
+ key: 'qwen-model',
+ hash: 'qwen-hash',
+ name: 'Qwen Image Generate',
+ base: 'qwen-image',
+ type: 'main',
+ variant: 'generate',
+};
+
+const defaultParams: {
+ cfgScale: number | number[];
+ steps: number;
+ qwenImageComponentSource: null;
+ qwenImageQuantization: string;
+ qwenImageShift: number;
+} = {
+ cfgScale: 4,
+ steps: 20,
+ qwenImageComponentSource: null,
+ qwenImageQuantization: 'none',
+ qwenImageShift: 1,
+};
+
+let params = { ...defaultParams };
+
+const refImagesSlice = {
+ entities: [
+ {
+ id: 'ref-image-1',
+ isEnabled: true,
+ config: {
+ type: 'qwen_image_reference_image',
+ image: {
+ original: {
+ image: {
+ image_name: 'reference.png',
+ width: 512,
+ height: 512,
+ },
+ },
+ },
+ },
+ },
+ ],
+};
+
+vi.mock('features/controlLayers/store/paramsSlice', () => ({
+ selectMainModelConfig: vi.fn(() => model),
+ selectParamsSlice: vi.fn(() => params),
+}));
+
+vi.mock('features/controlLayers/store/refImagesSlice', () => ({
+ selectRefImagesSlice: vi.fn(() => refImagesSlice),
+}));
+
+vi.mock('features/controlLayers/store/selectors', () => ({
+ selectCanvasMetadata: vi.fn(() => ({})),
+}));
+
+vi.mock('features/controlLayers/store/types', () => ({
+ isQwenImageReferenceImageConfig: vi.fn((config: { type?: string }) => config.type === 'qwen_image_reference_image'),
+}));
+
+vi.mock('features/controlLayers/store/validators', () => ({
+ getGlobalReferenceImageWarnings: vi.fn(() => []),
+}));
+
+vi.mock('features/metadata/util/modelFetchingHelpers', () => ({
+ fetchModelConfigWithTypeGuard: vi.fn(() => Promise.resolve(model)),
+}));
+
+vi.mock('features/nodes/types/common', async () => {
+ const actual = await vi.importActual('features/nodes/types/common');
+ return {
+ ...actual,
+ zImageField: {
+ parse: vi.fn((image) => image),
+ },
+ };
+});
+
+vi.mock('features/nodes/util/graph/generation/addImageToImage', () => ({
+ addImageToImage: vi.fn(),
+}));
+
+vi.mock('features/nodes/util/graph/generation/addInpaint', () => ({
+ addInpaint: vi.fn(),
+}));
+
+vi.mock('features/nodes/util/graph/generation/addNSFWChecker', () => ({
+ addNSFWChecker: vi.fn((_g, node) => node),
+}));
+
+vi.mock('features/nodes/util/graph/generation/addOutpaint', () => ({
+ addOutpaint: vi.fn(),
+}));
+
+vi.mock('features/nodes/util/graph/generation/addQwenImageLoRAs', () => ({
+ addQwenImageLoRAs: vi.fn(),
+}));
+
+vi.mock('features/nodes/util/graph/generation/addTextToImage', () => ({
+ addTextToImage: vi.fn(({ l2i }) => l2i),
+}));
+
+vi.mock('features/nodes/util/graph/generation/addWatermarker', () => ({
+ addWatermarker: vi.fn((_g, node) => node),
+}));
+
+vi.mock('features/nodes/util/graph/graphBuilderUtils', () => ({
+ selectCanvasOutputFields: vi.fn(() => ({})),
+ selectPresetModifiedPrompts: vi.fn(() => ({
+ positive: 'a prompt',
+ negative: 'a negative prompt',
+ })),
+}));
+
+vi.mock('features/ui/store/uiSelectors', () => ({
+ selectActiveTab: vi.fn(() => 'generation'),
+}));
+
+vi.mock('services/api/types', async () => {
+ const actual = await vi.importActual('services/api/types');
+ return {
+ ...actual,
+ isNonRefinerMainModelConfig: vi.fn(() => true),
+ };
+});
+
+import { buildQwenImageGraph, isQwenImageEditModel, shouldUseCfg } from './buildQwenImageGraph';
+
+describe('isQwenImageEditModel', () => {
+ afterEach(() => {
+ nextId = 0;
+ params = { ...defaultParams };
+ });
+
+ it('returns true for edit variant', () => {
+ expect(isQwenImageEditModel({ variant: 'edit' })).toBe(true);
+ });
+
+ it('returns false for generate variant', () => {
+ expect(isQwenImageEditModel({ variant: 'generate' })).toBe(false);
+ });
+
+ it('returns false when variant is null', () => {
+ expect(isQwenImageEditModel({ variant: null })).toBe(false);
+ });
+
+ it('returns false when variant is undefined', () => {
+ expect(isQwenImageEditModel({ variant: undefined })).toBe(false);
+ });
+
+ it('returns false when variant field is absent', () => {
+ expect(isQwenImageEditModel({})).toBe(false);
+ });
+
+ it('returns false when model is null', () => {
+ expect(isQwenImageEditModel(null)).toBe(false);
+ });
+
+ it('returns false for unrelated variant values', () => {
+ expect(isQwenImageEditModel({ variant: 'schnell' })).toBe(false);
+ expect(isQwenImageEditModel({ variant: 'dev' })).toBe(false);
+ expect(isQwenImageEditModel({ variant: 'turbo' })).toBe(false);
+ });
+
+ describe('reference image filtering regression', () => {
+ it('prevents reference images from leaking to generate models when switching from edit', () => {
+ const editModel = { variant: 'edit' as const };
+ const generateModel = { variant: 'generate' as const };
+
+ expect(isQwenImageEditModel(editModel)).toBe(true);
+ expect(isQwenImageEditModel(generateModel)).toBe(false);
+ });
+
+ it('prevents reference images from leaking to GGUF models without variant', () => {
+ const ggufModelNoVariant = {};
+ expect(isQwenImageEditModel(ggufModelNoVariant)).toBe(false);
+ });
+ });
+});
+
+describe('shouldUseCfg', () => {
+ afterEach(() => {
+ nextId = 0;
+ params = { ...defaultParams };
+ });
+
+ describe('negative conditioning is included when cfgScale > 1', () => {
+ it('returns true for cfgScale = 4', () => {
+ expect(shouldUseCfg(4)).toBe(true);
+ });
+
+ it('returns true for cfgScale = 1.5', () => {
+ expect(shouldUseCfg(1.5)).toBe(true);
+ });
+
+ it('returns true for cfgScale = 1.01', () => {
+ expect(shouldUseCfg(1.01)).toBe(true);
+ });
+ });
+
+ describe('negative conditioning is excluded when cfgScale <= 1', () => {
+ it('returns false for cfgScale = 1', () => {
+ expect(shouldUseCfg(1)).toBe(false);
+ });
+
+ it('returns false for cfgScale = 0.5', () => {
+ expect(shouldUseCfg(0.5)).toBe(false);
+ });
+
+ it('returns false for cfgScale = 0', () => {
+ expect(shouldUseCfg(0)).toBe(false);
+ });
+ });
+
+ describe('array cfgScale (per-step)', () => {
+ it('returns true for per-step arrays with values > 1', () => {
+ expect(shouldUseCfg([4, 3, 2, 1])).toBe(true);
+ });
+
+ it('returns true when any per-step cfg value is > 1', () => {
+ expect(shouldUseCfg([1, 1.1, 1])).toBe(true);
+ expect(shouldUseCfg([0.5, 2, 0.5])).toBe(true);
+ });
+
+ it('returns false when every per-step cfg value is <= 1', () => {
+ expect(shouldUseCfg([1, 1, 1])).toBe(false);
+ expect(shouldUseCfg([0.5, 0.75, 1])).toBe(false);
+ });
+ });
+
+ describe('CFG gating regression', () => {
+ it('with cfgScale=1, neg_prompt is absent from the graph (no wasted compute)', () => {
+ expect(shouldUseCfg(1)).toBe(false);
+ });
+
+ it('with cfgScale=4, neg_prompt is present in the graph for classifier-free guidance', () => {
+ expect(shouldUseCfg(4)).toBe(true);
+ });
+
+ it('omits negative conditioning edges from the graph when per-step cfg never exceeds 1', async () => {
+ params = {
+ ...defaultParams,
+ cfgScale: [1, 1, 1],
+ };
+
+ const { g } = await buildQwenImageGraph({
+ generationMode: 'txt2img',
+ manager: null,
+ state: {
+ system: {
+ shouldUseNSFWChecker: false,
+ shouldUseWatermarker: false,
+ },
+ } as never,
+ });
+
+ const graph = g.getGraph();
+ const nodeIds = Object.keys(graph.nodes);
+ const hasNegativePromptNode = nodeIds.some((id) => id.startsWith('neg_prompt:'));
+ const hasNegativeConditioningEdge = graph.edges.some(
+ (edge) => edge.destination.field === 'negative_conditioning'
+ );
+
+ expect(hasNegativePromptNode).toBe(false);
+ expect(hasNegativeConditioningEdge).toBe(false);
+ });
+
+ it('includes negative conditioning edges in the graph when any per-step cfg exceeds 1', async () => {
+ params = {
+ ...defaultParams,
+ cfgScale: [1, 2, 1],
+ };
+
+ const { g } = await buildQwenImageGraph({
+ generationMode: 'txt2img',
+ manager: null,
+ state: {
+ system: {
+ shouldUseNSFWChecker: false,
+ shouldUseWatermarker: false,
+ },
+ } as never,
+ });
+
+ const graph = g.getGraph();
+ const nodeIds = Object.keys(graph.nodes);
+ const hasNegativePromptNode = nodeIds.some((id) => id.startsWith('neg_prompt:'));
+ const hasNegativeConditioningEdge = graph.edges.some(
+ (edge) => edge.destination.field === 'negative_conditioning'
+ );
+
+ expect(hasNegativePromptNode).toBe(true);
+ expect(hasNegativeConditioningEdge).toBe(true);
+ });
+ });
+});
+
+describe('buildQwenImageGraph', () => {
+ afterEach(() => {
+ nextId = 0;
+ params = { ...defaultParams };
+ });
+
+ it('uses chained collectors to preserve reference image ordering for edit-variant models', async () => {
+ // Override the model to be an edit variant
+ const { selectMainModelConfig } = await import('features/controlLayers/store/paramsSlice');
+ const editModel = { ...model, variant: 'edit' };
+ vi.mocked(selectMainModelConfig).mockReturnValue(editModel as never);
+
+ // Also need fetchModelConfigWithTypeGuard to return edit variant
+ const { fetchModelConfigWithTypeGuard } = await import('features/metadata/util/modelFetchingHelpers');
+ vi.mocked(fetchModelConfigWithTypeGuard).mockResolvedValue(editModel as never);
+
+ // Add a second reference image to the mock slice
+ const { selectRefImagesSlice } = await import('features/controlLayers/store/refImagesSlice');
+ vi.mocked(selectRefImagesSlice).mockReturnValue({
+ entities: [
+ {
+ id: 'ref-image-1',
+ isEnabled: true,
+ config: {
+ type: 'qwen_image_reference_image',
+ image: { original: { image: { image_name: 'ref1.png', width: 512, height: 512 } } },
+ },
+ },
+ {
+ id: 'ref-image-2',
+ isEnabled: true,
+ config: {
+ type: 'qwen_image_reference_image',
+ image: { original: { image: { image_name: 'ref2.png', width: 512, height: 512 } } },
+ },
+ },
+ ],
+ } as never);
+
+ const { g } = await buildQwenImageGraph({
+ generationMode: 'txt2img',
+ manager: null,
+ state: {
+ system: { shouldUseNSFWChecker: false, shouldUseWatermarker: false },
+ } as never,
+ });
+
+ const graph = g.getGraph();
+ const nodeIds = Object.keys(graph.nodes);
+
+ // Should have exactly 2 collect nodes (one per reference image, chained)
+ const collectNodeIds = nodeIds.filter((id) => id.startsWith('qwen_ref_img_collect:'));
+ expect(collectNodeIds).toHaveLength(2);
+
+ // Each collect node should receive exactly one image as 'item'
+ for (const collectId of collectNodeIds) {
+ const itemEdges = graph.edges.filter(
+ (edge) => edge.destination.node_id === collectId && edge.destination.field === 'item'
+ );
+ expect(itemEdges).toHaveLength(1);
+ }
+
+ // The second collect node should chain from the first via collection → collection
+ const chainEdges = graph.edges.filter(
+ (edge) => edge.source.field === 'collection' && edge.destination.field === 'collection'
+ );
+ expect(chainEdges).toHaveLength(1);
+ expect(chainEdges[0]!.source.node_id).toBe(collectNodeIds[0]);
+ expect(chainEdges[0]!.destination.node_id).toBe(collectNodeIds[1]);
+
+ // The final collect node should connect to the text encoder's reference_images input
+ const refImagesEdge = graph.edges.find((edge) => edge.destination.field === 'reference_images');
+ expect(refImagesEdge).toBeDefined();
+ expect(refImagesEdge!.source.node_id).toBe(collectNodeIds[1]);
+
+ // Restore original mocks
+ vi.mocked(selectMainModelConfig).mockReturnValue(model as never);
+ vi.mocked(fetchModelConfigWithTypeGuard).mockResolvedValue(model as never);
+ vi.mocked(selectRefImagesSlice).mockReturnValue(refImagesSlice as never);
+ });
+
+ it('does not include hidden Qwen reference images for generate-variant models', async () => {
+ const { g } = await buildQwenImageGraph({
+ generationMode: 'txt2img',
+ manager: null,
+ state: {
+ system: {
+ shouldUseNSFWChecker: false,
+ shouldUseWatermarker: false,
+ },
+ } as never,
+ });
+
+ const graph = g.getGraph();
+ const nodeIds = Object.keys(graph.nodes);
+ const hasReferenceCollectionNode = nodeIds.some((id) => id.startsWith('qwen_ref_img_collect:'));
+ const hasReferenceImagesEdge = graph.edges.some((edge) => edge.destination.field === 'reference_images');
+ const hasReferenceLatentsEdge = graph.edges.some((edge) => edge.destination.field === 'reference_latents');
+
+ expect(hasReferenceCollectionNode).toBe(false);
+ expect(hasReferenceImagesEdge).toBe(false);
+ expect(hasReferenceLatentsEdge).toBe(false);
+ });
+});
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildQwenImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildQwenImageGraph.ts
new file mode 100644
index 0000000000..6cad9dfaa0
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildQwenImageGraph.ts
@@ -0,0 +1,303 @@
+import { logger } from 'app/logging/logger';
+import { getPrefixedId } from 'features/controlLayers/konva/util';
+import { selectMainModelConfig, selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
+import { selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
+import { selectCanvasMetadata } from 'features/controlLayers/store/selectors';
+import { isQwenImageReferenceImageConfig } from 'features/controlLayers/store/types';
+import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
+import { fetchModelConfigWithTypeGuard } from 'features/metadata/util/modelFetchingHelpers';
+import { zImageField } from 'features/nodes/types/common';
+import { addImageToImage } from 'features/nodes/util/graph/generation/addImageToImage';
+import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
+import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
+import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
+import { addQwenImageLoRAs } from 'features/nodes/util/graph/generation/addQwenImageLoRAs';
+import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
+import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
+import { Graph } from 'features/nodes/util/graph/generation/Graph';
+import { selectCanvasOutputFields, selectPresetModifiedPrompts } from 'features/nodes/util/graph/graphBuilderUtils';
+import type { GraphBuilderArg, GraphBuilderReturn, ImageOutputNodes } from 'features/nodes/util/graph/types';
+import { selectActiveTab } from 'features/ui/store/uiSelectors';
+import type { Invocation } from 'services/api/types';
+import { isNonRefinerMainModelConfig } from 'services/api/types';
+import type { Equals } from 'tsafe';
+import { assert } from 'tsafe';
+
+const log = logger('system');
+
+/**
+ * Determine whether the given model config represents a Qwen Image Edit model.
+ * Only edit-variant models should use reference images for conditioning.
+ * Generate (txt2img) models should never receive reference images, even if
+ * they exist in state from a previous edit session.
+ */
+export const isQwenImageEditModel = (model: { variant?: string | null } | null): boolean => {
+ if (!model) {
+ return false;
+ }
+ return 'variant' in model && model.variant === 'edit';
+};
+
+/**
+ * Determine whether classifier-free guidance (negative conditioning) should be used.
+ * CFG is only enabled when cfg_scale > 1. With cfg_scale <= 1, the negative prompt
+ * is mathematically unused and the model runs once per step instead of twice.
+ */
+export const shouldUseCfg = (cfgScale: number | number[]): boolean => {
+ if (typeof cfgScale === 'number') {
+ return cfgScale > 1;
+ }
+ // For per-step CFG arrays, enable CFG if any value exceeds 1
+ return cfgScale.some((value) => value > 1);
+};
+
+export const buildQwenImageGraph = async (arg: GraphBuilderArg): Promise => {
+ const { generationMode, state, manager } = arg;
+
+ log.debug({ generationMode, manager: manager?.id }, 'Building Qwen Image Edit graph');
+
+ const model = selectMainModelConfig(state);
+ assert(model, 'No model selected');
+ assert(model.base === 'qwen-image', 'Selected model is not a Qwen Image Edit model');
+
+ const params = selectParamsSlice(state);
+
+ const { cfgScale: cfg_scale, steps } = params;
+
+ const prompts = selectPresetModifiedPrompts(state);
+
+ const g = new Graph(getPrefixedId('qwen_image_graph'));
+
+ const modelLoader = g.addNode({
+ type: 'qwen_image_model_loader',
+ id: getPrefixedId('qwen_image_model_loader'),
+ model,
+ component_source: params.qwenImageComponentSource,
+ });
+
+ const positivePrompt = g.addNode({
+ id: getPrefixedId('positive_prompt'),
+ type: 'string',
+ });
+ const posCond = g.addNode({
+ type: 'qwen_image_text_encoder',
+ id: getPrefixedId('pos_prompt'),
+ quantization: params.qwenImageQuantization,
+ });
+
+ // Negative conditioning for CFG (only when cfg_scale > 1)
+ const useCfg = shouldUseCfg(cfg_scale);
+ const negCond = useCfg
+ ? g.addNode({
+ type: 'qwen_image_text_encoder',
+ id: getPrefixedId('neg_prompt'),
+ prompt: prompts.negative || ' ',
+ quantization: params.qwenImageQuantization,
+ })
+ : null;
+
+ const seed = g.addNode({
+ id: getPrefixedId('seed'),
+ type: 'integer',
+ });
+ const denoise = g.addNode({
+ type: 'qwen_image_denoise',
+ id: getPrefixedId('denoise_latents'),
+ cfg_scale,
+ steps,
+ shift: params.qwenImageShift,
+ });
+ const l2i = g.addNode({
+ type: 'qwen_image_l2i',
+ id: getPrefixedId('l2i'),
+ });
+
+ g.addEdge(modelLoader, 'transformer', denoise, 'transformer');
+ g.addEdge(modelLoader, 'qwen_vl_encoder', posCond, 'qwen_vl_encoder');
+ g.addEdge(modelLoader, 'vae', l2i, 'vae');
+
+ g.addEdge(positivePrompt, 'value', posCond, 'prompt');
+ g.addEdge(posCond, 'conditioning', denoise, 'positive_conditioning');
+
+ if (negCond) {
+ g.addEdge(modelLoader, 'qwen_vl_encoder', negCond, 'qwen_vl_encoder');
+ g.addEdge(negCond, 'conditioning', denoise, 'negative_conditioning');
+ }
+
+ g.addEdge(seed, 'value', denoise, 'seed');
+ g.addEdge(denoise, 'latents', l2i, 'latents');
+
+ // Add Qwen Image Edit LoRAs if any are enabled
+ addQwenImageLoRAs(state, g, denoise, modelLoader);
+
+ // Only collect reference images for edit-variant models.
+ // For txt2img (generate) models, reference images are not used even if they exist in state.
+ const isEditModel = isQwenImageEditModel(model);
+ const validRefImageConfigs = isEditModel
+ ? selectRefImagesSlice(state).entities.filter(
+ (entity) =>
+ entity.isEnabled &&
+ isQwenImageReferenceImageConfig(entity.config) &&
+ entity.config.image !== null &&
+ getGlobalReferenceImageWarnings(entity, model).length === 0
+ )
+ : [];
+
+ if (validRefImageConfigs.length > 0) {
+ // Use collector chaining to preserve reference image ordering.
+ // Each image gets its own collect node; each subsequent collector chains
+ // from the previous one via collection → collection edge.
+ // (Same pattern as FLUX.2 Klein's kontext conditioning.)
+ let prevCollect: Invocation<'collect'> | null = null;
+ for (const { config } of validRefImageConfigs) {
+ const imgField = zImageField.parse(config.image?.crop?.image ?? config.image?.original.image);
+ const imageNode = g.addNode({
+ type: 'image',
+ id: getPrefixedId('qwen_ref_img'),
+ image: imgField,
+ });
+ const collectNode = g.addNode({
+ type: 'collect',
+ id: getPrefixedId('qwen_ref_img_collect'),
+ });
+ g.addEdge(imageNode, 'image', collectNode, 'item');
+ if (prevCollect !== null) {
+ g.addEdge(prevCollect, 'collection', collectNode, 'collection');
+ }
+ prevCollect = collectNode;
+ }
+ assert(prevCollect !== null);
+ // Pass reference images to text encoder for vision-language conditioning
+ g.addEdge(prevCollect, 'collection', posCond, 'reference_images');
+
+ // Also VAE-encode the first reference image as latents for the denoising transformer.
+ // The transformer expects [noisy_patches ; ref_patches] in its sequence.
+ const firstConfig = validRefImageConfigs[0]!;
+ const firstImgField = zImageField.parse(
+ firstConfig.config.image?.crop?.image ?? firstConfig.config.image?.original.image
+ );
+ // Don't force-resize the reference image to the output dimensions — that would
+ // distort the aspect ratio when they differ. The I2L encodes at the image's
+ // native size; the denoise node handles dimension mismatches via interpolation.
+ const refI2l = g.addNode({
+ type: 'qwen_image_i2l',
+ id: getPrefixedId('qwen_ref_i2l'),
+ });
+ const refImageNode = g.addNode({
+ type: 'image',
+ id: getPrefixedId('qwen_ref_img_for_vae'),
+ image: firstImgField,
+ });
+ g.addEdge(refImageNode, 'image', refI2l, 'image');
+ g.addEdge(modelLoader, 'vae', refI2l, 'vae');
+ g.addEdge(refI2l, 'latents', denoise, 'reference_latents');
+
+ g.upsertMetadata({ ref_images: validRefImageConfigs }, 'merge');
+ }
+
+ const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
+ assert(modelConfig.base === 'qwen-image');
+
+ g.upsertMetadata({
+ cfg_scale,
+ negative_prompt: prompts.negative,
+ model: Graph.getModelMetadataField(modelConfig),
+ qwen_image_component_source: params.qwenImageComponentSource,
+ qwen_image_quantization: params.qwenImageQuantization,
+ qwen_image_shift: params.qwenImageShift,
+ steps,
+ });
+ g.addEdgeToMetadata(seed, 'value', 'seed');
+ g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
+
+ let canvasOutput: Invocation = l2i;
+
+ if (generationMode === 'txt2img') {
+ canvasOutput = addTextToImage({
+ g,
+ state,
+ denoise,
+ l2i,
+ });
+ g.upsertMetadata({ generation_mode: 'qwen_image_txt2img' });
+ } else if (generationMode === 'img2img') {
+ assert(manager !== null);
+ const i2l = g.addNode({
+ type: 'qwen_image_i2l',
+ id: getPrefixedId('qwen_image_i2l'),
+ });
+
+ canvasOutput = await addImageToImage({
+ g,
+ state,
+ manager,
+ denoise,
+ l2i,
+ i2l,
+ vaeSource: modelLoader,
+ });
+ g.upsertMetadata({ generation_mode: 'qwen_image_img2img' });
+ } else if (generationMode === 'inpaint') {
+ assert(manager !== null);
+ const i2l = g.addNode({
+ type: 'qwen_image_i2l',
+ id: getPrefixedId('qwen_image_i2l'),
+ });
+
+ canvasOutput = await addInpaint({
+ g,
+ state,
+ manager,
+ l2i,
+ i2l,
+ denoise,
+ vaeSource: modelLoader,
+ modelLoader,
+ seed,
+ });
+ g.upsertMetadata({ generation_mode: 'qwen_image_inpaint' });
+ } else if (generationMode === 'outpaint') {
+ assert(manager !== null);
+ const i2l = g.addNode({
+ type: 'qwen_image_i2l',
+ id: getPrefixedId('qwen_image_i2l'),
+ });
+
+ canvasOutput = await addOutpaint({
+ g,
+ state,
+ manager,
+ l2i,
+ i2l,
+ denoise,
+ vaeSource: modelLoader,
+ modelLoader,
+ seed,
+ });
+ g.upsertMetadata({ generation_mode: 'qwen_image_outpaint' });
+ } else {
+ assert>(false);
+ }
+
+ if (state.system.shouldUseNSFWChecker) {
+ canvasOutput = addNSFWChecker(g, canvasOutput);
+ }
+
+ if (state.system.shouldUseWatermarker) {
+ canvasOutput = addWatermarker(g, canvasOutput);
+ }
+
+ g.updateNode(canvasOutput, selectCanvasOutputFields(state));
+
+ if (selectActiveTab(state) === 'canvas') {
+ g.upsertMetadata(selectCanvasMetadata(state));
+ }
+
+ g.setMetadataReceivingNode(canvasOutput);
+
+ return {
+ g,
+ seed,
+ positivePrompt,
+ };
+};
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts b/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts
index 4804345c40..892b47a408 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/graphBuilderUtils.ts
@@ -215,6 +215,7 @@ export const isMainModelWithoutUnet = (modelLoader: Invocation {
+ const dispatch = useAppDispatch();
+ const { t } = useTranslation();
+ const componentSource = useAppSelector(selectQwenImageComponentSource);
+ const [modelConfigs, { isLoading }] = useQwenImageDiffusersModels();
+
+ const _onChange = useCallback(
+ (model: MainModelConfig | null) => {
+ if (model) {
+ dispatch(qwenImageComponentSourceSelected(zModelIdentifierField.parse(model)));
+ } else {
+ dispatch(qwenImageComponentSourceSelected(null));
+ }
+ },
+ [dispatch]
+ );
+
+ const { options, value, onChange, noOptionsMessage } = useModelCombobox({
+ modelConfigs,
+ onChange: _onChange,
+ selectedModel: componentSource,
+ isLoading,
+ });
+
+ return (
+
+ {t('modelManager.qwenImageComponentSource')}
+
+
+ );
+});
+
+ParamQwenImageComponentSourceSelect.displayName = 'ParamQwenImageComponentSourceSelect';
+
+export default ParamQwenImageComponentSourceSelect;
diff --git a/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamQwenImageQuantization.tsx b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamQwenImageQuantization.tsx
new file mode 100644
index 0000000000..09bc212e92
--- /dev/null
+++ b/invokeai/frontend/web/src/features/parameters/components/Advanced/ParamQwenImageQuantization.tsx
@@ -0,0 +1,48 @@
+import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
+import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
+import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { qwenImageQuantizationChanged, selectQwenImageQuantization } from 'features/controlLayers/store/paramsSlice';
+import { memo, useCallback, useMemo } from 'react';
+import { useTranslation } from 'react-i18next';
+
+const isValidQuantization = (value: string | undefined): value is 'none' | 'int8' | 'nf4' => {
+ return value === 'none' || value === 'int8' || value === 'nf4';
+};
+
+const ParamQwenImageQuantization = memo(() => {
+ const dispatch = useAppDispatch();
+ const { t } = useTranslation();
+ const quantization = useAppSelector(selectQwenImageQuantization);
+
+ const options = useMemo(
+ () => [
+ { value: 'none', label: t('modelManager.qwenImageQuantizationNone') },
+ { value: 'int8', label: t('modelManager.qwenImageQuantizationInt8') },
+ { value: 'nf4', label: t('modelManager.qwenImageQuantizationNf4') },
+ ],
+ [t]
+ );
+
+ const value = useMemo(() => options.find((o) => o.value === quantization), [options, quantization]);
+
+ const onChange = useCallback(
+ (v) => {
+ if (!isValidQuantization(v?.value)) {
+ return;
+ }
+ dispatch(qwenImageQuantizationChanged(v.value));
+ },
+ [dispatch]
+ );
+
+ return (
+
+ {t('modelManager.qwenImageQuantization')}
+
+
+ );
+});
+
+ParamQwenImageQuantization.displayName = 'ParamQwenImageQuantization';
+
+export default ParamQwenImageQuantization;
diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamQwenImageShift.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamQwenImageShift.tsx
new file mode 100644
index 0000000000..9702e4bcb9
--- /dev/null
+++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamQwenImageShift.tsx
@@ -0,0 +1,75 @@
+import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel, Text } from '@invoke-ai/ui-library';
+import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { qwenImageShiftChanged, selectQwenImageShift } from 'features/controlLayers/store/paramsSlice';
+import type React from 'react';
+import { memo, useCallback } from 'react';
+import { useTranslation } from 'react-i18next';
+import { PiXBold } from 'react-icons/pi';
+
+const CONSTRAINTS = {
+ initial: 3,
+ sliderMin: 1,
+ sliderMax: 7,
+ numberInputMin: 0,
+ numberInputMax: 10,
+ fineStep: 0.1,
+ coarseStep: 0.5,
+};
+
+const MARKS = [1, 2, 3, 4, 5, 6, 7];
+
+const ParamQwenImageShift = () => {
+ const { t } = useTranslation();
+ const shift = useAppSelector(selectQwenImageShift);
+ const dispatch = useAppDispatch();
+
+ const onChange = useCallback((v: number) => dispatch(qwenImageShiftChanged(v)), [dispatch]);
+ const onReset = useCallback(
+ (e: React.MouseEvent) => {
+ e.preventDefault();
+ e.stopPropagation();
+ dispatch(qwenImageShiftChanged(null));
+ },
+ [dispatch]
+ );
+
+ const displayValue = shift ?? CONSTRAINTS.initial;
+
+ return (
+
+
+ {t('parameters.shift')}{' '}
+ {shift !== null ? (
+
+
+
+ ) : (
+
+ ({t('common.auto').toLowerCase()})
+
+ )}
+
+
+
+
+ );
+};
+
+export default memo(ParamQwenImageShift);
diff --git a/invokeai/frontend/web/src/features/parameters/components/Core/ParamZImageShift.tsx b/invokeai/frontend/web/src/features/parameters/components/Core/ParamZImageShift.tsx
index 21dd02412e..308a315127 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Core/ParamZImageShift.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Core/ParamZImageShift.tsx
@@ -3,6 +3,7 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { selectZImageShift, setZImageShift } from 'features/controlLayers/store/paramsSlice';
import type React from 'react';
import { memo, useCallback } from 'react';
+import { useTranslation } from 'react-i18next';
import { PiXBold } from 'react-icons/pi';
const CONSTRAINTS = {
@@ -18,6 +19,7 @@ const CONSTRAINTS = {
const MARKS = [1, 2, 3, 4, 5, 6, 7];
const ParamZImageShift = () => {
+ const { t } = useTranslation();
const shift = useAppSelector(selectZImageShift);
const dispatch = useAppDispatch();
@@ -36,14 +38,14 @@ const ParamZImageShift = () => {
return (
- Shift{' '}
+ {t('parameters.shift')}{' '}
{shift !== null ? (
) : (
- (auto)
+ ({t('common.auto').toLowerCase()})
)}
diff --git a/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx b/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx
index ea26a218ea..6ecbf1c4a6 100644
--- a/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/ModelPicker.tsx
@@ -4,6 +4,7 @@ import {
Button,
Flex,
Icon,
+ Link,
Popover,
PopoverArrow,
PopoverBody,
@@ -21,6 +22,7 @@ import { buildGroup, getRegex, isGroup, Picker, usePickerContext } from 'common/
import { useDisclosure } from 'common/hooks/useBoolean';
import { typedMemo } from 'common/util/typedMemo';
import { uniq } from 'es-toolkit/compat';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import { selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { MODEL_BASE_TO_COLOR, MODEL_BASE_TO_LONG_NAME, MODEL_BASE_TO_SHORT_NAME } from 'features/modelManagerV2/models';
@@ -33,6 +35,7 @@ import { filesize } from 'filesize';
import { memo, useCallback, useMemo, useRef } from 'react';
import { Trans, useTranslation } from 'react-i18next';
import { PiCaretDownBold, PiLinkSimple } from 'react-icons/pi';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
import { useGetRelatedModelIdsBatchQuery } from 'services/api/endpoints/modelRelationships';
import {
type AnyModelConfigWithExternal,
@@ -87,6 +90,32 @@ const components = {
const NoOptionsFallback = memo(({ noOptionsText }: { noOptionsText?: string }) => {
const { t } = useTranslation();
+ const { data: setupStatus } = useGetSetupStatusQuery();
+ const user = useAppSelector(selectCurrentUser);
+
+ const isMultiuser = setupStatus?.multiuser_enabled ?? false;
+ const isAdmin = !isMultiuser || (user?.is_admin ?? false);
+ const adminEmail = setupStatus?.admin_email ?? null;
+
+ if (!isAdmin) {
+ const AdminEmailLink = adminEmail ? (
+
+ {adminEmail}
+
+ ) : (
+
+ your administrator
+
+ );
+
+ return (
+
+
+
+
+
+ );
+ }
return (
@@ -222,7 +251,7 @@ export const ModelPicker = typedMemo(
const _options: Group>[] = [];
// Add groups in the original order
- for (const groupId of ['api', 'flux', 'z-image', 'cogview4', 'sdxl', 'sd-3', 'sd-2', 'sd-1']) {
+ for (const groupId of ['api', 'flux', 'z-image', 'qwen-image', 'cogview4', 'sdxl', 'sd-3', 'sd-2', 'sd-1']) {
const group = groups[groupId];
if (group) {
// Sort options within each group so starred ones come first
diff --git a/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx b/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx
index 9de7326270..c93841d77b 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Prompts/Prompts.tsx
@@ -8,18 +8,34 @@ import {
} from 'features/controlLayers/store/paramsSlice';
import { ParamNegativePrompt } from 'features/parameters/components/Core/ParamNegativePrompt';
import { ParamPositivePrompt } from 'features/parameters/components/Core/ParamPositivePrompt';
-import { memo } from 'react';
+import { memo, useMemo } from 'react';
+import { useSelectedModelConfig } from 'services/api/hooks/useSelectedModelConfig';
export const Prompts = memo(() => {
const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt);
const modelSupportsRefImages = useAppSelector(selectModelSupportsRefImages);
const hasNegativePrompt = useAppSelector(selectHasNegativePrompt);
+ const modelConfig = useSelectedModelConfig();
+
+ // Qwen Image models only support ref images in the "edit" variant
+ const showRefImages = useMemo(() => {
+ if (!modelSupportsRefImages) {
+ return false;
+ }
+ if (modelConfig?.base === 'qwen-image') {
+ const variant = 'variant' in modelConfig ? modelConfig.variant : null;
+ if (variant !== 'edit') {
+ return false;
+ }
+ }
+ return true;
+ }, [modelSupportsRefImages, modelConfig]);
return (
{modelSupportsNegativePrompt && hasNegativePrompt && }
- {modelSupportsRefImages && }
+ {showRefImages && }
);
});
diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts
index 03441c1d76..0d55785174 100644
--- a/invokeai/frontend/web/src/features/parameters/types/constants.ts
+++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts
@@ -37,6 +37,10 @@ export const CLIP_SKIP_MAP: { [key in BaseModelType]?: { maxClip: number; marker
maxClip: 0,
markers: [],
},
+ 'qwen-image': {
+ maxClip: 0,
+ markers: [],
+ },
'z-image': {
maxClip: 0,
markers: [],
diff --git a/invokeai/frontend/web/src/features/parameters/util/optimalDimension.test.ts b/invokeai/frontend/web/src/features/parameters/util/optimalDimension.test.ts
new file mode 100644
index 0000000000..c980dcb5ee
--- /dev/null
+++ b/invokeai/frontend/web/src/features/parameters/util/optimalDimension.test.ts
@@ -0,0 +1,120 @@
+import { describe, expect, it } from 'vitest';
+
+import {
+ getGridSize,
+ getIsSizeOptimal,
+ getIsSizeTooLarge,
+ getIsSizeTooSmall,
+ getOptimalDimension,
+} from './optimalDimension';
+
+describe('getOptimalDimension', () => {
+ it('returns 512 for sd-1', () => {
+ expect(getOptimalDimension('sd-1')).toBe(512);
+ });
+
+ it('returns 512 for sd-2', () => {
+ expect(getOptimalDimension('sd-2')).toBe(512);
+ });
+
+ it('returns 1024 for qwen-image', () => {
+ expect(getOptimalDimension('qwen-image')).toBe(1024);
+ });
+
+ it('returns 1024 for flux', () => {
+ expect(getOptimalDimension('flux')).toBe(1024);
+ });
+
+ it('returns 1024 for sdxl', () => {
+ expect(getOptimalDimension('sdxl')).toBe(1024);
+ });
+
+ it('returns 1024 for z-image', () => {
+ expect(getOptimalDimension('z-image')).toBe(1024);
+ });
+
+ it('returns 1024 for null/undefined', () => {
+ expect(getOptimalDimension(null)).toBe(1024);
+ expect(getOptimalDimension(undefined)).toBe(1024);
+ });
+});
+
+describe('getGridSize', () => {
+ it('returns 16 for qwen-image', () => {
+ expect(getGridSize('qwen-image')).toBe(16);
+ });
+
+ it('returns 16 for flux', () => {
+ expect(getGridSize('flux')).toBe(16);
+ });
+
+ it('returns 16 for z-image', () => {
+ expect(getGridSize('z-image')).toBe(16);
+ });
+
+ it('returns 32 for cogview4', () => {
+ expect(getGridSize('cogview4')).toBe(32);
+ });
+
+ it('returns 8 for sd-1', () => {
+ expect(getGridSize('sd-1')).toBe(8);
+ });
+
+ it('returns 8 for sdxl', () => {
+ expect(getGridSize('sdxl')).toBe(8);
+ });
+
+ it('returns 8 for null/undefined', () => {
+ expect(getGridSize(null)).toBe(8);
+ expect(getGridSize(undefined)).toBe(8);
+ });
+});
+
+describe('getIsSizeOptimal', () => {
+ it('returns true for dimensions near optimal area for qwen-image (1024x1024)', () => {
+ expect(getIsSizeOptimal(1024, 1024, 'qwen-image')).toBe(true);
+ });
+
+ it('returns true for non-square dimensions within 20% of optimal area', () => {
+ // 896x1152 = 1,032,192 vs optimal 1,048,576 (~1.6% diff)
+ expect(getIsSizeOptimal(896, 1152, 'qwen-image')).toBe(true);
+ });
+
+ it('returns false for dimensions too small (< 80% of optimal area)', () => {
+ // 512x512 = 262,144 vs optimal 1,048,576 (~75% too small)
+ expect(getIsSizeOptimal(512, 512, 'qwen-image')).toBe(false);
+ });
+
+ it('returns false for dimensions too large (> 120% of optimal area)', () => {
+ // 2048x2048 = 4,194,304 vs optimal 1,048,576 (~300% too large)
+ expect(getIsSizeOptimal(2048, 2048, 'qwen-image')).toBe(false);
+ });
+
+ it('returns true for sd-1 at 512x512', () => {
+ expect(getIsSizeOptimal(512, 512, 'sd-1')).toBe(true);
+ });
+
+ it('returns false for sd-1 at 1024x1024 (too large)', () => {
+ expect(getIsSizeOptimal(1024, 1024, 'sd-1')).toBe(false);
+ });
+});
+
+describe('getIsSizeTooSmall', () => {
+ it('returns true when area is below 80% of optimal', () => {
+ expect(getIsSizeTooSmall(400, 400, 1024)).toBe(true);
+ });
+
+ it('returns false when area is at or above 80% of optimal', () => {
+ expect(getIsSizeTooSmall(920, 920, 1024)).toBe(false);
+ });
+});
+
+describe('getIsSizeTooLarge', () => {
+ it('returns true when area exceeds 120% of optimal', () => {
+ expect(getIsSizeTooLarge(1200, 1200, 1024)).toBe(true);
+ });
+
+ it('returns false when area is at or below 120% of optimal', () => {
+ expect(getIsSizeTooLarge(1100, 1024, 1024)).toBe(false);
+ });
+});
diff --git a/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts b/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts
index 5a9622a12f..2ac59a32e2 100644
--- a/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts
+++ b/invokeai/frontend/web/src/features/parameters/util/optimalDimension.ts
@@ -3,7 +3,7 @@ import type { BaseModelType } from 'features/nodes/types/common';
/**
* Gets the optimal dimension for a given base model:
* - sd-1, sd-2: 512
- * - sdxl, flux, sd-3, cogview4, z-image, anima: 1024
+ * - sdxl, flux, sd-3, cogview4, qwen-image, z-image, anima: 1024
* - default: 1024
* @param base The base model
* @returns The optimal dimension for the model, defaulting to 1024
@@ -18,6 +18,7 @@ export const getOptimalDimension = (base?: BaseModelType | null): number => {
case 'flux2':
case 'sd-3':
case 'cogview4':
+ case 'qwen-image':
case 'z-image':
case 'anima':
default:
@@ -62,7 +63,7 @@ export const isInSDXLTrainingDimensions = (width: number, height: number): boole
/**
* Gets the grid size for a given base model. For Flux, the grid size is 16, otherwise it is 8.
* - sd-1, sd-2, sdxl, anima: 8
- * - flux, sd-3, z-image: 16
+ * - flux, sd-3, qwen-image, z-image: 16
* - cogview4: 32
* - default: 8
* @param base The base model
@@ -75,6 +76,7 @@ export const getGridSize = (base?: BaseModelType | null): number => {
case 'flux':
case 'flux2':
case 'sd-3':
+ case 'qwen-image':
case 'z-image':
return 16;
case 'sd-1':
diff --git a/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx b/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx
index 9f1d004ba8..61553910e2 100644
--- a/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx
+++ b/invokeai/frontend/web/src/features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip.tsx
@@ -17,6 +17,8 @@ import type { PropsWithChildren } from 'react';
import { memo, useEffect, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { enqueueMutationFixedCacheKeyOptions, useEnqueueBatchMutation } from 'services/api/endpoints/queue';
+import { useAutoAddBoard } from 'services/api/hooks/useAutoAddBoard';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
import { useBoardName } from 'services/api/hooks/useBoardName';
type Props = TooltipProps & {
@@ -53,19 +55,25 @@ TooltipContent.displayName = 'TooltipContent';
const CanvasTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
const isReady = useStore($isReadyToEnqueue);
const reasons = useStore($reasonsWhyCannotEnqueue);
+ const autoAddBoard = useAutoAddBoard();
+ const { canWriteImages } = useBoardAccess(autoAddBoard);
return (
-
+
- {reasons.length > 0 && (
+ {(reasons.length > 0 || !canWriteImages) && (
<>
-
+
+ >
+ )}
+ {canWriteImages && (
+ <>
+
+
>
)}
-
-
);
});
@@ -74,15 +82,17 @@ CanvasTabTooltipContent.displayName = 'CanvasTabTooltipContent';
const UpscaleTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
const isReady = useStore($isReadyToEnqueue);
const reasons = useStore($reasonsWhyCannotEnqueue);
+ const autoAddBoard = useAutoAddBoard();
+ const { canWriteImages } = useBoardAccess(autoAddBoard);
return (
-
+
- {reasons.length > 0 && (
+ {(reasons.length > 0 || !canWriteImages) && (
<>
-
+
>
)}
@@ -195,12 +205,23 @@ const IsReadyText = memo(({ isReady, prepend }: { isReady: boolean; prepend: boo
});
IsReadyText.displayName = 'IsReadyText';
-const ReasonsList = memo(({ reasons }: { reasons: Reason[] }) => {
+const ReasonsList = memo(({ reasons, canWriteImages = true }: { reasons: Reason[]; canWriteImages?: boolean }) => {
+ const { t } = useTranslation();
+ const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
+ const autoAddBoardName = useBoardName(autoAddBoardId);
+
return (
{reasons.map((reason, i) => (
))}
+ {!canWriteImages && (
+
+
+ {t('parameters.invoke.boardNotWritable', { boardName: autoAddBoardName || autoAddBoardId })}
+
+
+ )}
);
});
diff --git a/invokeai/frontend/web/src/features/queue/components/InvokeQueueBackButton.tsx b/invokeai/frontend/web/src/features/queue/components/InvokeQueueBackButton.tsx
index b175e4d8b0..a363d159e1 100644
--- a/invokeai/frontend/web/src/features/queue/components/InvokeQueueBackButton.tsx
+++ b/invokeai/frontend/web/src/features/queue/components/InvokeQueueBackButton.tsx
@@ -5,6 +5,8 @@ import { QueueIterationsNumberInput } from 'features/queue/components/QueueItera
import { useInvoke } from 'features/queue/hooks/useInvoke';
import { memo } from 'react';
import { PiLightningFill, PiSparkleFill } from 'react-icons/pi';
+import { useAutoAddBoard } from 'services/api/hooks/useAutoAddBoard';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
import { InvokeButtonTooltip } from './InvokeButtonTooltip/InvokeButtonTooltip';
@@ -14,6 +16,8 @@ export const InvokeButton = memo(() => {
const queue = useInvoke();
const shift = useShiftModifier();
const isLoadingDynamicPrompts = useAppSelector(selectDynamicPromptsIsLoading);
+ const autoAddBoard = useAutoAddBoard();
+ const { canWriteImages } = useBoardAccess(autoAddBoard);
return (
@@ -23,7 +27,7 @@ export const InvokeButton = memo(() => {
onClick={shift ? queue.enqueueFront : queue.enqueueBack}
isLoading={queue.isLoading || isLoadingDynamicPrompts}
loadingText={invoke}
- isDisabled={queue.isDisabled}
+ isDisabled={queue.isDisabled || !canWriteImages}
rightIcon={shift ? : }
variant="solid"
colorScheme="invokeYellow"
diff --git a/invokeai/frontend/web/src/features/queue/components/QueueCountBadge.tsx b/invokeai/frontend/web/src/features/queue/components/QueueCountBadge.tsx
index 3417488b09..e863646606 100644
--- a/invokeai/frontend/web/src/features/queue/components/QueueCountBadge.tsx
+++ b/invokeai/frontend/web/src/features/queue/components/QueueCountBadge.tsx
@@ -1,6 +1,4 @@
import { Badge, Portal } from '@invoke-ai/ui-library';
-import { useAppSelector } from 'app/store/storeHooks';
-import { selectIsAuthenticated } from 'features/auth/store/authSlice';
import type { RefObject } from 'react';
import { memo, useEffect, useMemo, useState } from 'react';
import { useGetQueueStatusQuery } from 'services/api/endpoints/queue';
@@ -13,53 +11,35 @@ type Props = {
type SessionQueueStatus = components['schemas']['SessionQueueStatus'];
/**
- * Determines if user-specific queue counts are available.
- */
-const hasUserCounts = (queueData: SessionQueueStatus): boolean => {
- return (
- queueData.user_pending !== undefined &&
- queueData.user_pending !== null &&
- queueData.user_in_progress !== undefined &&
- queueData.user_in_progress !== null
- );
-};
-
-/**
- * Calculates the appropriate badge text based on queue status and authentication state.
+ * Calculates the appropriate badge text based on queue status.
* Returns null if badge should be hidden.
+ *
+ * In multiuser mode, the backend already scopes counts to the current user for non-admins,
+ * so pending + in_progress reflects the user's own queue items.
*/
-const getBadgeText = (queueData: SessionQueueStatus | undefined, isAuthenticated: boolean): string | null => {
+const getBadgeText = (queueData: SessionQueueStatus | undefined): string | null => {
if (!queueData) {
return null;
}
const totalPending = queueData.pending + queueData.in_progress;
- // Hide badge if there are no pending jobs
if (totalPending === 0) {
return null;
}
- // In multiuser mode (authenticated user), show "X/Y" format where X is user's jobs and Y is total jobs
- if (isAuthenticated && hasUserCounts(queueData)) {
- const userPending = queueData.user_pending! + queueData.user_in_progress!;
- return `${userPending}/${totalPending}`;
- }
-
- // In single-user mode or when user counts aren't available, show total count only
return totalPending.toString();
};
export const QueueCountBadge = memo(({ targetRef }: Props) => {
const [badgePos, setBadgePos] = useState<{ x: string; y: string } | null>(null);
- const isAuthenticated = useAppSelector(selectIsAuthenticated);
const { queueData } = useGetQueueStatusQuery(undefined, {
selectFromResult: (res) => ({
queueData: res.data?.queue,
}),
});
- const badgeText = useMemo(() => getBadgeText(queueData, isAuthenticated), [queueData, isAuthenticated]);
+ const badgeText = useMemo(() => getBadgeText(queueData), [queueData]);
useEffect(() => {
if (!targetRef.current) {
diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts
index 8a88919260..68e1e9a382 100644
--- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts
+++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueCanvas.ts
@@ -13,6 +13,7 @@ import { buildAnimaGraph } from 'features/nodes/util/graph/generation/buildAnima
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
import { buildExternalGraph } from 'features/nodes/util/graph/generation/buildExternalGraph';
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
+import { buildQwenImageGraph } from 'features/nodes/util/graph/generation/buildQwenImageGraph';
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
@@ -60,6 +61,8 @@ const enqueueCanvas = async (store: AppStore, canvasManager: CanvasManager, prep
return await buildFLUXGraph(graphBuilderArg);
case 'cogview4':
return await buildCogView4Graph(graphBuilderArg);
+ case 'qwen-image':
+ return await buildQwenImageGraph(graphBuilderArg);
case 'z-image':
return await buildZImageGraph(graphBuilderArg);
case 'external':
diff --git a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts
index 68c12e1732..54b37e1b95 100644
--- a/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts
+++ b/invokeai/frontend/web/src/features/queue/hooks/useEnqueueGenerate.ts
@@ -11,6 +11,7 @@ import { buildAnimaGraph } from 'features/nodes/util/graph/generation/buildAnima
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
import { buildExternalGraph } from 'features/nodes/util/graph/generation/buildExternalGraph';
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
+import { buildQwenImageGraph } from 'features/nodes/util/graph/generation/buildQwenImageGraph';
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
@@ -53,6 +54,8 @@ const enqueueGenerate = async (store: AppStore, prepend: boolean) => {
return await buildFLUXGraph(graphBuilderArg);
case 'cogview4':
return await buildCogView4Graph(graphBuilderArg);
+ case 'qwen-image':
+ return await buildQwenImageGraph(graphBuilderArg);
case 'z-image':
return await buildZImageGraph(graphBuilderArg);
case 'external':
diff --git a/invokeai/frontend/web/src/features/queue/store/readiness.ts b/invokeai/frontend/web/src/features/queue/store/readiness.ts
index 7d2a85cfdf..67dfe3141c 100644
--- a/invokeai/frontend/web/src/features/queue/store/readiness.ts
+++ b/invokeai/frontend/web/src/features/queue/store/readiness.ts
@@ -262,6 +262,12 @@ const getReasonsWhyCannotEnqueueGenerateTab = (arg: {
// FLUX.2 (Klein) extracts Qwen3 encoder and VAE from main model - no separate selections needed
+ if (model?.base === 'qwen-image' && model.format === 'gguf_quantized') {
+ if (!params.qwenImageComponentSource) {
+ reasons.push({ content: i18n.t('parameters.invoke.noQwenImageComponentSourceSelected') });
+ }
+ }
+
if (model?.base === 'z-image') {
// Check if VAE source is available (either separate VAE or Qwen3 Source)
const hasVaeSource = params.zImageVaeModel !== null || params.zImageQwen3SourceModel !== null;
@@ -656,6 +662,57 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
}
}
+ if (model?.base === 'qwen-image') {
+ const { bbox } = canvas;
+ const gridSize = getGridSize('qwen-image');
+
+ if (bbox.scaleMethod === 'none') {
+ if (bbox.rect.width % gridSize !== 0) {
+ reasons.push({
+ content: i18n.t('parameters.invoke.modelIncompatibleBboxWidth', {
+ model: 'Qwen Image Edit',
+ width: bbox.rect.width,
+ multiple: gridSize,
+ }),
+ });
+ }
+ if (bbox.rect.height % gridSize !== 0) {
+ reasons.push({
+ content: i18n.t('parameters.invoke.modelIncompatibleBboxHeight', {
+ model: 'Qwen Image Edit',
+ height: bbox.rect.height,
+ multiple: gridSize,
+ }),
+ });
+ }
+ } else {
+ if (bbox.scaledSize.width % gridSize !== 0) {
+ reasons.push({
+ content: i18n.t('parameters.invoke.modelIncompatibleScaledBboxWidth', {
+ model: 'Qwen Image Edit',
+ width: bbox.scaledSize.width,
+ multiple: gridSize,
+ }),
+ });
+ }
+ if (bbox.scaledSize.height % gridSize !== 0) {
+ reasons.push({
+ content: i18n.t('parameters.invoke.modelIncompatibleScaledBboxHeight', {
+ model: 'Qwen Image Edit',
+ height: bbox.scaledSize.height,
+ multiple: gridSize,
+ }),
+ });
+ }
+ }
+ }
+
+ if (model?.base === 'qwen-image' && model.format === 'gguf_quantized') {
+ if (!params.qwenImageComponentSource) {
+ reasons.push({ content: i18n.t('parameters.invoke.noQwenImageComponentSourceSelected') });
+ }
+ }
+
if (model?.base === 'z-image') {
// Check if VAE source is available (either separate VAE or Qwen3 Source)
const hasVaeSource = params.zImageVaeModel !== null || params.zImageQwen3SourceModel !== null;
diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx
index b96b4c5e61..bfb69b945c 100644
--- a/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx
+++ b/invokeai/frontend/web/src/features/settingsAccordions/components/AdvancedSettingsAccordion/AdvancedSettingsAccordion.tsx
@@ -8,6 +8,7 @@ import {
selectIsExternal,
selectIsFLUX,
selectIsFlux2,
+ selectIsQwenImage,
selectIsSD3,
selectIsZImage,
selectParamsSlice,
@@ -20,6 +21,8 @@ import ParamCLIPGEmbedModelSelect from 'features/parameters/components/Advanced/
import ParamCLIPLEmbedModelSelect from 'features/parameters/components/Advanced/ParamCLIPLEmbedModelSelect';
import ParamClipSkip from 'features/parameters/components/Advanced/ParamClipSkip';
import ParamFlux2KleinModelSelect from 'features/parameters/components/Advanced/ParamFlux2KleinModelSelect';
+import ParamQwenImageComponentSourceSelect from 'features/parameters/components/Advanced/ParamQwenImageComponentSourceSelect';
+import ParamQwenImageQuantization from 'features/parameters/components/Advanced/ParamQwenImageQuantization';
import ParamT5EncoderModelSelect from 'features/parameters/components/Advanced/ParamT5EncoderModelSelect';
import ParamZImageQwen3VaeModelSelect from 'features/parameters/components/Advanced/ParamZImageQwen3VaeModelSelect';
import ParamSeamlessXAxis from 'features/parameters/components/Seamless/ParamSeamlessXAxis';
@@ -49,6 +52,7 @@ export const AdvancedSettingsAccordion = memo(() => {
const isSD3 = useAppSelector(selectIsSD3);
const isZImage = useAppSelector(selectIsZImage);
const isExternal = useAppSelector(selectIsExternal);
+ const isQwenImage = useAppSelector(selectIsQwenImage);
const isAnima = useAppSelector(selectIsAnima);
const selectBadges = useMemo(
@@ -103,13 +107,13 @@ export const AdvancedSettingsAccordion = memo(() => {
return (
- {!isZImage && !isAnima && !isFlux2 && (
+ {!isZImage && !isAnima && !isFlux2 && !isQwenImage && (
{isFLUX ? : }
{!isFLUX && !isSD3 && }
)}
- {!isFLUX && !isFlux2 && !isSD3 && !isZImage && !isAnima && (
+ {!isFLUX && !isFlux2 && !isSD3 && !isZImage && !isQwenImage && !isAnima && (
<>
@@ -151,6 +155,12 @@ export const AdvancedSettingsAccordion = memo(() => {
)}
+ {isQwenImage && (
+
+
+
+
+ )}
{isAnima && (
diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx
index 9d53331a2d..99bb544ccd 100644
--- a/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx
+++ b/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx
@@ -11,6 +11,7 @@ import {
selectIsExternal,
selectIsFLUX,
selectIsFlux2,
+ selectIsQwenImage,
selectIsSD3,
selectIsZImage,
selectModelSupportsGuidance,
@@ -25,6 +26,7 @@ import ParamFluxDypePreset from 'features/parameters/components/Core/ParamFluxDy
import ParamFluxDypeScale from 'features/parameters/components/Core/ParamFluxDypeScale';
import ParamFluxScheduler from 'features/parameters/components/Core/ParamFluxScheduler';
import ParamGuidance from 'features/parameters/components/Core/ParamGuidance';
+import ParamQwenImageShift from 'features/parameters/components/Core/ParamQwenImageShift';
import ParamScheduler from 'features/parameters/components/Core/ParamScheduler';
import ParamSteps from 'features/parameters/components/Core/ParamSteps';
import ParamZImageScheduler from 'features/parameters/components/Core/ParamZImageScheduler';
@@ -51,6 +53,7 @@ export const GenerationSettingsAccordion = memo(() => {
const isCogView4 = useAppSelector(selectIsCogView4);
const isZImage = useAppSelector(selectIsZImage);
const isExternal = useAppSelector(selectIsExternal);
+ const isQwenImage = useAppSelector(selectIsQwenImage);
const isAnima = useAppSelector(selectIsAnima);
const fluxDypePreset = useAppSelector(selectFluxDypePreset);
const modelSupportsGuidance = useAppSelector(selectModelSupportsGuidance);
@@ -94,7 +97,7 @@ export const GenerationSettingsAccordion = memo(() => {
- {!isExternal && !isFLUX && !isFlux2 && !isSD3 && !isCogView4 && !isZImage && !isAnima && (
+ {!isExternal && !isFLUX && !isFlux2 && !isSD3 && !isCogView4 && !isZImage && !isQwenImage && !isAnima && (
)}
{!isExternal && isFLUX && }
@@ -107,6 +110,7 @@ export const GenerationSettingsAccordion = memo(() => {
)}
{!isExternal && !isFLUX && !isFlux2 && }
{!isExternal && isZImage && }
+ {!isExternal && isQwenImage && }
{!isExternal && isFLUX && }
{!isExternal && isFLUX && fluxDypePreset === 'manual' && }
{!isExternal && isFLUX && fluxDypePreset === 'manual' && }
diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/UpscaleSettingsAccordion/UpscaleWarning.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/UpscaleSettingsAccordion/UpscaleWarning.tsx
index 7d0a7ee2de..ff19e7ebb3 100644
--- a/invokeai/frontend/web/src/features/settingsAccordions/components/UpscaleSettingsAccordion/UpscaleWarning.tsx
+++ b/invokeai/frontend/web/src/features/settingsAccordions/components/UpscaleSettingsAccordion/UpscaleWarning.tsx
@@ -1,5 +1,6 @@
-import { Button, Flex, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
+import { Button, Flex, Link, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
import { selectModel } from 'features/controlLayers/store/paramsSlice';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
import {
@@ -10,6 +11,7 @@ import {
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { useCallback, useEffect, useMemo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
import { useControlNetModels } from 'services/api/hooks/modelsByType';
export const UpscaleWarning = () => {
@@ -19,6 +21,12 @@ export const UpscaleWarning = () => {
const tileControlnetModel = useAppSelector(selectTileControlNetModel);
const dispatch = useAppDispatch();
const [modelConfigs, { isLoading }] = useControlNetModels();
+ const { data: setupStatus } = useGetSetupStatusQuery();
+ const user = useAppSelector(selectCurrentUser);
+
+ const isMultiuser = setupStatus?.multiuser_enabled ?? false;
+ const isAdmin = !isMultiuser || (user?.is_admin ?? false);
+ const adminEmail = setupStatus?.admin_email ?? null;
useEffect(() => {
const validModel = modelConfigs.find((cnetModel) => {
@@ -59,19 +67,33 @@ export const UpscaleWarning = () => {
return null;
}
+ const AdminEmailLink = adminEmail ? (
+
+ {adminEmail}
+
+ ) : (
+
+ your administrator
+
+ );
+
return (
{!isBaseModelCompatible && {t('upscaling.incompatibleBaseModelDesc')}}
{warnings.length > 0 && (
-
- ),
- }}
- />
+ {isAdmin ? (
+
+ ),
+ }}
+ />
+ ) : (
+
+ )}
)}
{warnings.length > 0 && (
diff --git a/invokeai/frontend/web/src/features/ui/components/FloatingLeftPanelButtons.tsx b/invokeai/frontend/web/src/features/ui/components/FloatingLeftPanelButtons.tsx
index 81e8930e40..c9620d84ac 100644
--- a/invokeai/frontend/web/src/features/ui/components/FloatingLeftPanelButtons.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/FloatingLeftPanelButtons.tsx
@@ -17,6 +17,8 @@ import {
PiXCircle,
} from 'react-icons/pi';
import { useGetQueueStatusQuery } from 'services/api/endpoints/queue';
+import { useAutoAddBoard } from 'services/api/hooks/useAutoAddBoard';
+import { useBoardAccess } from 'services/api/hooks/useBoardAccess';
export const FloatingLeftPanelButtons = memo(() => {
return (
@@ -71,6 +73,8 @@ const InvokeIconButton = memo(() => {
const { t } = useTranslation();
const queue = useInvoke();
const shift = useShiftModifier();
+ const autoAddBoard = useAutoAddBoard();
+ const { canWriteImages } = useBoardAccess(autoAddBoard);
return (
@@ -78,7 +82,7 @@ const InvokeIconButton = memo(() => {
aria-label={t('queue.queueBack')}
onClick={shift ? queue.enqueueFront : queue.enqueueBack}
isLoading={queue.isLoading}
- isDisabled={queue.isDisabled}
+ isDisabled={queue.isDisabled || !canWriteImages}
icon={}
colorScheme="invokeYellow"
flexGrow={1}
diff --git a/invokeai/frontend/web/src/features/ui/layouts/WorkflowsLaunchpadPanel.tsx b/invokeai/frontend/web/src/features/ui/layouts/WorkflowsLaunchpadPanel.tsx
index d432f3193e..b0d087528a 100644
--- a/invokeai/frontend/web/src/features/ui/layouts/WorkflowsLaunchpadPanel.tsx
+++ b/invokeai/frontend/web/src/features/ui/layouts/WorkflowsLaunchpadPanel.tsx
@@ -6,6 +6,7 @@ import { memo, useCallback } from 'react';
import { useDropzone } from 'react-dropzone';
import { useTranslation } from 'react-i18next';
import { PiFilePlusBold, PiFolderOpenBold, PiUploadBold } from 'react-icons/pi';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
import { LaunchpadButton } from './LaunchpadButton';
import { LaunchpadContainer } from './LaunchpadContainer';
@@ -14,6 +15,9 @@ export const WorkflowsLaunchpadPanel = memo(() => {
const { t } = useTranslation();
const workflowLibraryModal = useWorkflowLibraryModal();
const newWorkflow = useNewWorkflow();
+ const { data: setupStatus } = useGetSetupStatusQuery();
+
+ const isMultiuser = setupStatus?.multiuser_enabled ?? false;
const handleBrowseTemplates = useCallback(() => {
workflowLibraryModal.open();
@@ -45,11 +49,15 @@ export const WorkflowsLaunchpadPanel = memo(() => {
multiple: false,
});
+ const descriptionKey = isMultiuser
+ ? 'ui.launchpad.workflows.descriptionMultiuser'
+ : 'ui.launchpad.workflows.description';
+
return (
{/* Description */}
- {t('ui.launchpad.workflows.description')}
+ {t(descriptionKey)}
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx
index 6dab1e3f04..1637cf5678 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/SaveWorkflowAsDialog.tsx
@@ -5,6 +5,7 @@ import {
AlertDialogFooter,
AlertDialogHeader,
Button,
+ Checkbox,
Flex,
FormControl,
FormLabel,
@@ -19,6 +20,7 @@ import { t } from 'i18next';
import { atom, computed } from 'nanostores';
import type { ChangeEvent, RefObject } from 'react';
import { memo, useCallback, useRef, useState } from 'react';
+import { useUpdateWorkflowIsPublicMutation } from 'services/api/endpoints/workflows';
import { assert } from 'tsafe';
/**
@@ -87,8 +89,10 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
}
return '';
});
+ const [isPublic, setIsPublic] = useState(false);
const { createNewWorkflow } = useCreateLibraryWorkflow();
+ const [updateIsPublic] = useUpdateWorkflowIsPublicMutation();
const inputRef = useRef(null);
@@ -96,6 +100,10 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
setName(e.target.value);
}, []);
+ const onChangeIsPublic = useCallback((e: ChangeEvent) => {
+ setIsPublic(e.target.checked);
+ }, []);
+
const onClose = useCallback(() => {
$workflowToSave.set(null);
}, []);
@@ -110,10 +118,19 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
await createNewWorkflow({
workflow,
- onSuccess: onClose,
+ onSuccess: async (workflowId?: string) => {
+ if (isPublic && workflowId) {
+ try {
+ await updateIsPublic({ workflow_id: workflowId, is_public: true }).unwrap();
+ } catch {
+ // Sharing failed silently - workflow was saved, just not shared
+ }
+ }
+ onClose();
+ },
onError: onClose,
});
- }, [workflow, name, createNewWorkflow, onClose]);
+ }, [workflow, name, isPublic, createNewWorkflow, updateIsPublic, onClose]);
return (
@@ -126,6 +143,10 @@ const Content = memo(({ workflow, cancelRef }: { workflow: WorkflowV3; cancelRef
{t('workflows.workflowName')}
+
+
+ {t('workflows.shareWorkflow')}
+
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx
index 6f5acc431e..e683cfdbef 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx
+++ b/invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/SaveWorkflowMenuItem.tsx
@@ -1,5 +1,6 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useDoesWorkflowHaveUnsavedChanges } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
+import { useIsCurrentWorkflowOwner } from 'features/workflowLibrary/hooks/useIsCurrentWorkflowOwner';
import { useSaveOrSaveAsWorkflow } from 'features/workflowLibrary/hooks/useSaveOrSaveAsWorkflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
@@ -9,11 +10,12 @@ const SaveWorkflowMenuItem = () => {
const { t } = useTranslation();
const saveOrSaveAsWorkflow = useSaveOrSaveAsWorkflow();
const doesWorkflowHaveUnsavedChanges = useDoesWorkflowHaveUnsavedChanges();
+ const isCurrentWorkflowOwner = useIsCurrentWorkflowOwner();
return (
}
onClick={saveOrSaveAsWorkflow}
>
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useCreateNewWorkflow.ts b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useCreateNewWorkflow.ts
index 543283c779..37fe48726e 100644
--- a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useCreateNewWorkflow.ts
+++ b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useCreateNewWorkflow.ts
@@ -29,7 +29,7 @@ export const isDraftWorkflow = (workflow: WorkflowV3): workflow is DraftWorkflow
type CreateLibraryWorkflowArg = {
workflow: DraftWorkflow;
- onSuccess?: () => void;
+ onSuccess?: (workflowId?: string) => void;
onError?: () => void;
};
@@ -70,7 +70,7 @@ export const useCreateLibraryWorkflow = (): CreateLibraryWorkflowReturn => {
// When a workflow is saved, the form field initial values are updated to the current form field values
dispatch(formFieldInitialValuesChanged({ formFieldInitialValues: getFormFieldInitialValues() }));
updateOpenedAt({ workflow_id: id });
- onSuccess?.();
+ onSuccess?.(id);
toast.update(toastRef.current, {
title: t('workflows.workflowSaved'),
status: 'success',
diff --git a/invokeai/frontend/web/src/features/workflowLibrary/hooks/useIsCurrentWorkflowOwner.ts b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useIsCurrentWorkflowOwner.ts
new file mode 100644
index 0000000000..5183c9050b
--- /dev/null
+++ b/invokeai/frontend/web/src/features/workflowLibrary/hooks/useIsCurrentWorkflowOwner.ts
@@ -0,0 +1,48 @@
+import { skipToken } from '@reduxjs/toolkit/query';
+import { useAppSelector } from 'app/store/storeHooks';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
+import { selectWorkflowId } from 'features/nodes/store/selectors';
+import { useMemo } from 'react';
+import { useGetSetupStatusQuery } from 'services/api/endpoints/auth';
+import { useGetWorkflowQuery } from 'services/api/endpoints/workflows';
+
+/**
+ * Returns true if the current user can save the currently-loaded workflow directly (not as a copy).
+ *
+ * In single-user mode, this always returns true.
+ * In multiuser mode, returns true when:
+ * - The workflow has no ID (new, unsaved workflow — will open Save As)
+ * - The current user is the owner of the workflow
+ * - The current user is an admin
+ */
+export const useIsCurrentWorkflowOwner = (): boolean => {
+ const workflowId = useAppSelector(selectWorkflowId);
+ const currentUser = useAppSelector(selectCurrentUser);
+ const { data: setupStatus } = useGetSetupStatusQuery();
+ const { data: workflowData } = useGetWorkflowQuery(workflowId ?? skipToken);
+
+ return useMemo(() => {
+ // In single-user mode there is no concept of ownership, so saving is always allowed.
+ if (!setupStatus?.multiuser_enabled) {
+ return true;
+ }
+
+ // No authenticated user — be permissive.
+ if (!currentUser) {
+ return true;
+ }
+
+ // No workflow ID means this is a new/unsaved workflow. Clicking "Save" will open the
+ // Save As dialog, so we should not block it.
+ if (!workflowId) {
+ return true;
+ }
+
+ // API data not yet available — be permissive to avoid incorrect disabling during loading.
+ if (!workflowData) {
+ return true;
+ }
+
+ return workflowData.user_id === currentUser.user_id || currentUser.is_admin;
+ }, [setupStatus?.multiuser_enabled, workflowId, workflowData, currentUser]);
+};
diff --git a/invokeai/frontend/web/src/services/api/endpoints/auth.ts b/invokeai/frontend/web/src/services/api/endpoints/auth.ts
index 419e7c730c..ae7bfa7426 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/auth.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/auth.ts
@@ -34,6 +34,7 @@ type SetupStatusResponse = {
setup_required: boolean;
multiuser_enabled: boolean;
strict_password_checking: boolean;
+ admin_email: string | null;
};
export type UserDTO = components['schemas']['UserDTO'];
diff --git a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts
index f58d3281a2..176546c90f 100644
--- a/invokeai/frontend/web/src/services/api/endpoints/workflows.ts
+++ b/invokeai/frontend/web/src/services/api/endpoints/workflows.ts
@@ -157,6 +157,21 @@ export const workflowsApi = api.injectEndpoints({
}),
invalidatesTags: (result, error, workflow_id) => [{ type: 'Workflow', id: workflow_id }],
}),
+ updateWorkflowIsPublic: build.mutation<
+ paths['/api/v1/workflows/i/{workflow_id}/is_public']['patch']['responses']['200']['content']['application/json'],
+ { workflow_id: string; is_public: boolean }
+ >({
+ query: ({ workflow_id, is_public }) => ({
+ url: buildWorkflowsUrl(`i/${workflow_id}/is_public`),
+ method: 'PATCH',
+ body: { is_public },
+ }),
+ invalidatesTags: (result, error, { workflow_id }) => [
+ { type: 'Workflow', id: workflow_id },
+ { type: 'Workflow', id: LIST_TAG },
+ 'WorkflowCategoryCounts',
+ ],
+ }),
}),
});
@@ -173,4 +188,5 @@ export const {
useListWorkflowsInfiniteInfiniteQuery,
useSetWorkflowThumbnailMutation,
useDeleteWorkflowThumbnailMutation,
+ useUpdateWorkflowIsPublicMutation,
} = workflowsApi;
diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts
index eeb89b4922..55746e5294 100644
--- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts
+++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts
@@ -26,6 +26,7 @@ import {
isLoRAModelConfig,
isMainOrExternalModelConfig,
isQwen3EncoderModelConfig,
+ isQwenImageDiffusersMainModelConfig,
isRefinerMainModelModelConfig,
isSpandrelImageToImageModelConfig,
isT5EncoderModelConfigOrSubmodel,
@@ -100,6 +101,7 @@ export const useFlux2VAEModels = () => buildModelsHook(isFlux2VAEModelConfig)();
export const useAnimaVAEModels = () => buildModelsHook(isAnimaVAEModelConfig)();
export const useAnimaQwen3EncoderModels = () => buildModelsHook(isAnimaQwen3EncoderModelConfig)();
export const useZImageDiffusersModels = () => buildModelsHook(isZImageDiffusersMainModelConfig)();
+export const useQwenImageDiffusersModels = () => buildModelsHook(isQwenImageDiffusersMainModelConfig)();
export const useQwen3EncoderModels = () => buildModelsHook(isQwen3EncoderModelConfig)();
export const useGlobalReferenceImageModels = buildModelsHook(
(config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) || isFluxKontextModelConfig(config)
@@ -136,6 +138,7 @@ export const selectRegionalRefImageModels = buildModelsSelector(
);
export const selectAnimaQwen3EncoderModels = buildModelsSelector(isAnimaQwen3EncoderModelConfig);
export const selectQwen3EncoderModels = buildModelsSelector(isQwen3EncoderModelConfig);
+export const selectQwenImageDiffusersModels = buildModelsSelector(isQwenImageDiffusersMainModelConfig);
export const selectZImageDiffusersModels = buildModelsSelector(isZImageDiffusersMainModelConfig);
export const selectFluxVAEModels = buildModelsSelector(isFluxVAEModelConfig);
export const selectAnimaVAEModels = buildModelsSelector(isAnimaVAEModelConfig);
diff --git a/invokeai/frontend/web/src/services/api/hooks/useAutoAddBoard.ts b/invokeai/frontend/web/src/services/api/hooks/useAutoAddBoard.ts
new file mode 100644
index 0000000000..1ae2227007
--- /dev/null
+++ b/invokeai/frontend/web/src/services/api/hooks/useAutoAddBoard.ts
@@ -0,0 +1,21 @@
+import { useAppSelector } from 'app/store/storeHooks';
+import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
+import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
+
+/**
+ * Returns the `BoardDTO` for the board currently configured as the auto-add
+ * destination, or `null` when it is set to "Uncategorized" (`boardId === 'none'`)
+ * or when the board list has not yet loaded.
+ */
+export const useAutoAddBoard = () => {
+ const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
+ const { board } = useListAllBoardsQuery(
+ { include_archived: true },
+ {
+ selectFromResult: ({ data }) => ({
+ board: data?.find((b) => b.board_id === autoAddBoardId) ?? null,
+ }),
+ }
+ );
+ return board;
+};
diff --git a/invokeai/frontend/web/src/services/api/hooks/useBoardAccess.ts b/invokeai/frontend/web/src/services/api/hooks/useBoardAccess.ts
new file mode 100644
index 0000000000..9a22202425
--- /dev/null
+++ b/invokeai/frontend/web/src/services/api/hooks/useBoardAccess.ts
@@ -0,0 +1,32 @@
+import { useAppSelector } from 'app/store/storeHooks';
+import { selectCurrentUser } from 'features/auth/store/authSlice';
+import type { BoardDTO } from 'services/api/types';
+
+/**
+ * Returns permission flags for the given board based on the current user:
+ * - `canWriteImages`: can add / delete images in the board
+ * (owner or admin always; non-owner allowed only for public boards)
+ * - `canRenameBoard`: can rename the board (owner or admin only)
+ * - `canDeleteBoard`: can delete the board (owner or admin only)
+ *
+ * When `board` is null/undefined (e.g. "uncategorized"), all permissions are
+ * granted so that existing behaviour is preserved.
+ *
+ * When `currentUser` is null the app is running without authentication
+ * (single-user mode), so full access is granted unconditionally.
+ */
+export const useBoardAccess = (board: BoardDTO | null | undefined) => {
+ const currentUser = useAppSelector(selectCurrentUser);
+
+ if (!board) {
+ return { canWriteImages: true, canRenameBoard: true, canDeleteBoard: true };
+ }
+
+ const isOwnerOrAdmin = !currentUser || currentUser.is_admin || board.user_id === currentUser.user_id;
+
+ return {
+ canWriteImages: isOwnerOrAdmin || board.board_visibility === 'public',
+ canRenameBoard: isOwnerOrAdmin,
+ canDeleteBoard: isOwnerOrAdmin,
+ };
+};
diff --git a/invokeai/frontend/web/src/services/api/hooks/useSelectedBoard.ts b/invokeai/frontend/web/src/services/api/hooks/useSelectedBoard.ts
new file mode 100644
index 0000000000..40c6d77f37
--- /dev/null
+++ b/invokeai/frontend/web/src/services/api/hooks/useSelectedBoard.ts
@@ -0,0 +1,21 @@
+import { useAppSelector } from 'app/store/storeHooks';
+import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
+import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
+
+/**
+ * Returns the `BoardDTO` for the currently selected board, or `null` when the
+ * user is viewing "Uncategorized" (`boardId === 'none'`) or when the board list
+ * has not yet loaded.
+ */
+export const useSelectedBoard = () => {
+ const selectedBoardId = useAppSelector(selectSelectedBoardId);
+ const { board } = useListAllBoardsQuery(
+ { include_archived: true },
+ {
+ selectFromResult: ({ data }) => ({
+ board: data?.find((b) => b.board_id === selectedBoardId) ?? null,
+ }),
+ }
+ );
+ return board;
+};
diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts
index 50a400f026..8bd5d61eea 100644
--- a/invokeai/frontend/web/src/services/api/schema.ts
+++ b/invokeai/frontend/web/src/services/api/schema.ts
@@ -1042,14 +1042,14 @@ export type paths = {
};
/**
* Get Intermediates Count
- * @description Gets the count of intermediate images
+ * @description Gets the count of intermediate images. Non-admin users only see their own intermediates.
*/
get: operations["get_intermediates_count"];
put?: never;
post?: never;
/**
* Clear Intermediates
- * @description Clears all intermediates
+ * @description Clears all intermediates. Requires admin.
*/
delete: operations["clear_intermediates"];
options?: never;
@@ -1103,7 +1103,11 @@ export type paths = {
};
/**
* Get Image Full
- * @description Gets a full-resolution image file
+ * @description Gets a full-resolution image file.
+ *
+ * This endpoint is intentionally unauthenticated because browsers load images
+ * via
tags which cannot send Bearer tokens. Image names are UUIDs,
+ * providing security through unguessability.
*/
get: operations["get_image_full"];
put?: never;
@@ -1112,7 +1116,11 @@ export type paths = {
options?: never;
/**
* Get Image Full
- * @description Gets a full-resolution image file
+ * @description Gets a full-resolution image file.
+ *
+ * This endpoint is intentionally unauthenticated because browsers load images
+ * via
tags which cannot send Bearer tokens. Image names are UUIDs,
+ * providing security through unguessability.
*/
head: operations["get_image_full_head"];
patch?: never;
@@ -1127,7 +1135,11 @@ export type paths = {
};
/**
* Get Image Thumbnail
- * @description Gets a thumbnail image file
+ * @description Gets a thumbnail image file.
+ *
+ * This endpoint is intentionally unauthenticated because browsers load images
+ * via
tags which cannot send Bearer tokens. Image names are UUIDs,
+ * providing security through unguessability.
*/
get: operations["get_image_thumbnail"];
put?: never;
@@ -1187,7 +1199,7 @@ export type paths = {
post?: never;
/**
* Delete Uncategorized Images
- * @description Deletes all images that are uncategorized
+ * @description Deletes all uncategorized images owned by the current user (or all if admin)
*/
delete: operations["delete_uncategorized_images"];
options?: never;
@@ -1255,7 +1267,10 @@ export type paths = {
};
/**
* Get Bulk Download Item
- * @description Gets a bulk download zip file
+ * @description Gets a bulk download zip file.
+ *
+ * Requires authentication. The caller must be the user who initiated the
+ * download (tracked by the bulk download service) or an admin.
*/
get: operations["get_bulk_download_item"];
put?: never;
@@ -1779,7 +1794,7 @@ export type paths = {
};
/**
* Get Queue Item Ids
- * @description Gets all queue item ids that match the given parameters
+ * @description Gets all queue item ids that match the given parameters. Non-admin users only see their own items.
*/
get: operations["get_queue_item_ids"];
put?: never;
@@ -2039,7 +2054,7 @@ export type paths = {
};
/**
* Get Queue Status
- * @description Gets the status of the session queue
+ * @description Gets the status of the session queue. Non-admin users see only their own counts and cannot see current item details unless they own it.
*/
get: operations["get_queue_status"];
put?: never;
@@ -2059,7 +2074,7 @@ export type paths = {
};
/**
* Get Batch Status
- * @description Gets the status of the session queue
+ * @description Gets the status of a batch. Non-admin users only see their own batches.
*/
get: operations["get_batch_status"];
put?: never;
@@ -2123,7 +2138,7 @@ export type paths = {
};
/**
* Counts By Destination
- * @description Gets the counts of queue items by destination
+ * @description Gets the counts of queue items by destination. Non-admin users only see their own items.
*/
get: operations["counts_by_destination"];
put?: never;
@@ -2215,7 +2230,11 @@ export type paths = {
};
/**
* Get Workflow Thumbnail
- * @description Gets a workflow's thumbnail image
+ * @description Gets a workflow's thumbnail image.
+ *
+ * This endpoint is intentionally unauthenticated because browsers load images
+ * via
tags which cannot send Bearer tokens. Workflow IDs are UUIDs,
+ * providing security through unguessability.
*/
get: operations["get_workflow_thumbnail"];
/**
@@ -2234,6 +2253,26 @@ export type paths = {
patch?: never;
trace?: never;
};
+ "/api/v1/workflows/i/{workflow_id}/is_public": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ /**
+ * Update Workflow Is Public
+ * @description Updates whether a workflow is shared publicly
+ */
+ patch: operations["update_workflow_is_public"];
+ trace?: never;
+ };
"/api/v1/workflows/tags": {
parameters: {
query?: never;
@@ -3204,7 +3243,7 @@ export type components = {
*/
type: "anima_text_encoder";
};
- AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ AnyModelConfig: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
/**
* AppVersion
* @description App Version Response
@@ -3356,7 +3395,7 @@ export type components = {
* fallback/null value `BaseModelType.Any` for these models, instead of making the model base optional.
* @enum {string}
*/
- BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "flux2" | "cogview4" | "z-image" | "external" | "anima" | "unknown";
+ BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "flux2" | "cogview4" | "z-image" | "external" | "qwen-image" | "anima" | "unknown";
/** Batch */
Batch: {
/**
@@ -3449,6 +3488,12 @@ export type components = {
* @default null
*/
origin: string | null;
+ /**
+ * User Id
+ * @description The ID of the user who enqueued the batch
+ * @default system
+ */
+ user_id: string;
};
/** BatchStatus */
BatchStatus: {
@@ -3639,6 +3684,8 @@ export type components = {
* @description Whether or not the board is archived
*/
archived?: boolean | null;
+ /** @description The visibility of the board. */
+ board_visibility?: components["schemas"]["BoardVisibility"] | null;
};
/**
* BoardDTO
@@ -3685,6 +3732,11 @@ export type components = {
* @description Whether or not the board is archived.
*/
archived: boolean;
+ /**
+ * @description The visibility of the board.
+ * @default private
+ */
+ board_visibility?: components["schemas"]["BoardVisibility"];
/**
* Image Count
* @description The number of images in the board.
@@ -3718,6 +3770,12 @@ export type components = {
* @enum {string}
*/
BoardRecordOrderBy: "created_at" | "board_name";
+ /**
+ * BoardVisibility
+ * @description The visibility options for a board.
+ * @enum {string}
+ */
+ BoardVisibility: "private" | "shared" | "public";
/** Body_add_image_to_board */
Body_add_image_to_board: {
/**
@@ -3970,6 +4028,14 @@ export type components = {
/** @description The updated workflow */
workflow: components["schemas"]["Workflow"];
};
+ /** Body_update_workflow_is_public */
+ Body_update_workflow_is_public: {
+ /**
+ * Is Public
+ * @description Whether the workflow should be shared publicly
+ */
+ is_public: boolean;
+ };
/** Body_upload_image */
Body_upload_image: {
/**
@@ -4262,6 +4328,12 @@ export type components = {
* @description The name of the bulk image download item
*/
bulk_download_item_name: string;
+ /**
+ * User Id
+ * @description The ID of the user who initiated the download
+ * @default system
+ */
+ user_id: string;
};
/**
* BulkDownloadErrorEvent
@@ -4288,6 +4360,12 @@ export type components = {
* @description The name of the bulk image download item
*/
bulk_download_item_name: string;
+ /**
+ * User Id
+ * @description The ID of the user who initiated the download
+ * @default system
+ */
+ user_id: string;
/**
* Error
* @description The error message
@@ -4319,6 +4397,12 @@ export type components = {
* @description The name of the bulk image download item
*/
bulk_download_item_name: string;
+ /**
+ * User Id
+ * @description The ID of the user who initiated the download
+ * @default system
+ */
+ user_id: string;
};
/**
* BulkReidentifyModelsRequest
@@ -7079,7 +7163,7 @@ export type components = {
* @description The generation mode that output this image
* @default null
*/
- generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "flux2_txt2img" | "flux2_img2img" | "flux2_inpaint" | "flux2_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint" | "z_image_txt2img" | "z_image_img2img" | "z_image_inpaint" | "z_image_outpaint" | "anima_txt2img" | "anima_img2img" | "anima_inpaint" | "anima_outpaint") | null;
+ generation_mode?: ("txt2img" | "img2img" | "inpaint" | "outpaint" | "sdxl_txt2img" | "sdxl_img2img" | "sdxl_inpaint" | "sdxl_outpaint" | "flux_txt2img" | "flux_img2img" | "flux_inpaint" | "flux_outpaint" | "flux2_txt2img" | "flux2_img2img" | "flux2_inpaint" | "flux2_outpaint" | "sd3_txt2img" | "sd3_img2img" | "sd3_inpaint" | "sd3_outpaint" | "cogview4_txt2img" | "cogview4_img2img" | "cogview4_inpaint" | "cogview4_outpaint" | "z_image_txt2img" | "z_image_img2img" | "z_image_inpaint" | "z_image_outpaint" | "qwen_image_txt2img" | "qwen_image_img2img" | "qwen_image_inpaint" | "qwen_image_outpaint" | "anima_txt2img" | "anima_img2img" | "anima_inpaint" | "anima_outpaint") | null;
/**
* Positive Prompt
* @description The positive prompt parameter
@@ -11850,7 +11934,7 @@ export type components = {
* @description The nodes in this graph
*/
nodes?: {
- [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
+ [key: string]: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["QwenImageDenoiseInvocation"] | components["schemas"]["QwenImageImageToLatentsInvocation"] | components["schemas"]["QwenImageLatentsToImageInvocation"] | components["schemas"]["QwenImageLoRACollectionLoader"] | components["schemas"]["QwenImageLoRALoaderInvocation"] | components["schemas"]["QwenImageModelLoaderInvocation"] | components["schemas"]["QwenImageTextEncoderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
};
/**
* Edges
@@ -11887,7 +11971,7 @@ export type components = {
* @description The results of node executions
*/
results: {
- [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaLoRALoaderOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["IfInvocationOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"];
+ [key: string]: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaLoRALoaderOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["IfInvocationOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["QwenImageConditioningOutput"] | components["schemas"]["QwenImageLoRALoaderOutput"] | components["schemas"]["QwenImageModelLoaderOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"];
};
/**
* Errors
@@ -15134,7 +15218,7 @@ export type components = {
* Invocation
* @description The ID of the invocation
*/
- invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
+ invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["QwenImageDenoiseInvocation"] | components["schemas"]["QwenImageImageToLatentsInvocation"] | components["schemas"]["QwenImageLatentsToImageInvocation"] | components["schemas"]["QwenImageLoRACollectionLoader"] | components["schemas"]["QwenImageLoRALoaderInvocation"] | components["schemas"]["QwenImageModelLoaderInvocation"] | components["schemas"]["QwenImageTextEncoderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
/**
* Invocation Source Id
* @description The ID of the prepared invocation's source node
@@ -15144,7 +15228,7 @@ export type components = {
* Result
* @description The result of the invocation
*/
- result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaLoRALoaderOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["IfInvocationOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"];
+ result: components["schemas"]["AnimaConditioningOutput"] | components["schemas"]["AnimaLoRALoaderOutput"] | components["schemas"]["AnimaModelLoaderOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CogView4ConditioningOutput"] | components["schemas"]["CogView4ModelLoaderOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatGeneratorOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["Flux2KleinLoRALoaderOutput"] | components["schemas"]["Flux2KleinModelLoaderOutput"] | components["schemas"]["FluxConditioningCollectionOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxControlLoRALoaderOutput"] | components["schemas"]["FluxControlNetOutput"] | components["schemas"]["FluxFillOutput"] | components["schemas"]["FluxKontextOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["FluxReduxOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["IfInvocationOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageGeneratorOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ImagePanelCoordinateOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerGeneratorOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsMetaOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MDControlListOutput"] | components["schemas"]["MDIPAdapterListOutput"] | components["schemas"]["MDT2IAdapterListOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["MetadataToLorasCollectionOutput"] | components["schemas"]["MetadataToModelOutput"] | components["schemas"]["MetadataToSDXLModelOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PBRMapsOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["PromptTemplateOutput"] | components["schemas"]["QwenImageConditioningOutput"] | components["schemas"]["QwenImageLoRALoaderOutput"] | components["schemas"]["QwenImageModelLoaderOutput"] | components["schemas"]["SD3ConditioningOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["Sd3ModelLoaderOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringGeneratorOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["ZImageConditioningOutput"] | components["schemas"]["ZImageControlOutput"] | components["schemas"]["ZImageLoRALoaderOutput"] | components["schemas"]["ZImageModelLoaderOutput"];
};
/**
* InvocationErrorEvent
@@ -15198,7 +15282,7 @@ export type components = {
* Invocation
* @description The ID of the invocation
*/
- invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
+ invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["QwenImageDenoiseInvocation"] | components["schemas"]["QwenImageImageToLatentsInvocation"] | components["schemas"]["QwenImageLatentsToImageInvocation"] | components["schemas"]["QwenImageLoRACollectionLoader"] | components["schemas"]["QwenImageLoRALoaderInvocation"] | components["schemas"]["QwenImageModelLoaderInvocation"] | components["schemas"]["QwenImageTextEncoderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
/**
* Invocation Source Id
* @description The ID of the prepared invocation's source node
@@ -15412,6 +15496,13 @@ export type components = {
pidi_edge_detection: components["schemas"]["ImageOutput"];
prompt_from_file: components["schemas"]["StringCollectionOutput"];
prompt_template: components["schemas"]["PromptTemplateOutput"];
+ qwen_image_denoise: components["schemas"]["LatentsOutput"];
+ qwen_image_i2l: components["schemas"]["LatentsOutput"];
+ qwen_image_l2i: components["schemas"]["ImageOutput"];
+ qwen_image_lora_collection_loader: components["schemas"]["QwenImageLoRALoaderOutput"];
+ qwen_image_lora_loader: components["schemas"]["QwenImageLoRALoaderOutput"];
+ qwen_image_model_loader: components["schemas"]["QwenImageModelLoaderOutput"];
+ qwen_image_text_encoder: components["schemas"]["QwenImageConditioningOutput"];
rand_float: components["schemas"]["FloatOutput"];
rand_int: components["schemas"]["IntegerOutput"];
random_range: components["schemas"]["IntegerCollectionOutput"];
@@ -15517,7 +15608,7 @@ export type components = {
* Invocation
* @description The ID of the invocation
*/
- invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
+ invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["QwenImageDenoiseInvocation"] | components["schemas"]["QwenImageImageToLatentsInvocation"] | components["schemas"]["QwenImageLatentsToImageInvocation"] | components["schemas"]["QwenImageLoRACollectionLoader"] | components["schemas"]["QwenImageLoRALoaderInvocation"] | components["schemas"]["QwenImageModelLoaderInvocation"] | components["schemas"]["QwenImageTextEncoderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
/**
* Invocation Source Id
* @description The ID of the prepared invocation's source node
@@ -15592,7 +15683,7 @@ export type components = {
* Invocation
* @description The ID of the invocation
*/
- invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
+ invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["AnimaDenoiseInvocation"] | components["schemas"]["AnimaImageToLatentsInvocation"] | components["schemas"]["AnimaLatentsToImageInvocation"] | components["schemas"]["AnimaLoRACollectionLoader"] | components["schemas"]["AnimaLoRALoaderInvocation"] | components["schemas"]["AnimaModelLoaderInvocation"] | components["schemas"]["AnimaTextEncoderInvocation"] | components["schemas"]["ApplyMaskTensorToImageInvocation"] | components["schemas"]["ApplyMaskToImageInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CanvasOutputInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CogView4DenoiseInvocation"] | components["schemas"]["CogView4ImageToLatentsInvocation"] | components["schemas"]["CogView4LatentsToImageInvocation"] | components["schemas"]["CogView4ModelLoaderInvocation"] | components["schemas"]["CogView4TextEncoderInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropImageToBoundingBoxInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DecodeInvisibleWatermarkInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DenoiseLatentsMetaInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ExpandMaskWithFadeInvocation"] | components["schemas"]["ExternalImageGenerationInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatBatchInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatGenerator"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["Flux2DenoiseInvocation"] | components["schemas"]["Flux2KleinLoRACollectionLoader"] | components["schemas"]["Flux2KleinLoRALoaderInvocation"] | components["schemas"]["Flux2KleinModelLoaderInvocation"] | components["schemas"]["Flux2KleinTextEncoderInvocation"] | components["schemas"]["Flux2VaeDecodeInvocation"] | components["schemas"]["Flux2VaeEncodeInvocation"] | components["schemas"]["FluxControlLoRALoaderInvocation"] | components["schemas"]["FluxControlNetInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxDenoiseLatentsMetaInvocation"] | components["schemas"]["FluxFillInvocation"] | components["schemas"]["FluxIPAdapterInvocation"] | components["schemas"]["FluxKontextConcatenateImagesInvocation"] | components["schemas"]["FluxKontextInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxReduxInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GeminiImageGenerationInvocation"] | components["schemas"]["GetMaskBoundingBoxInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["IfInvocation"] | components["schemas"]["ImageBatchInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageGenerator"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageNoiseInvocation"] | components["schemas"]["ImagePanelLayoutInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerBatchInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerGenerator"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["InvokeAdjustImageHuePlusInvocation"] | components["schemas"]["InvokeEquivalentAchromaticLightnessInvocation"] | components["schemas"]["InvokeImageBlendInvocation"] | components["schemas"]["InvokeImageCompositorInvocation"] | components["schemas"]["InvokeImageDilateOrErodeInvocation"] | components["schemas"]["InvokeImageEnhanceInvocation"] | components["schemas"]["InvokeImageValueThresholdsInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LlavaOnevisionVllmInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataFieldExtractorInvocation"] | components["schemas"]["MetadataFromImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MetadataItemLinkedInvocation"] | components["schemas"]["MetadataToBoolCollectionInvocation"] | components["schemas"]["MetadataToBoolInvocation"] | components["schemas"]["MetadataToControlnetsInvocation"] | components["schemas"]["MetadataToFloatCollectionInvocation"] | components["schemas"]["MetadataToFloatInvocation"] | components["schemas"]["MetadataToIPAdaptersInvocation"] | components["schemas"]["MetadataToIntegerCollectionInvocation"] | components["schemas"]["MetadataToIntegerInvocation"] | components["schemas"]["MetadataToLorasCollectionInvocation"] | components["schemas"]["MetadataToLorasInvocation"] | components["schemas"]["MetadataToModelInvocation"] | components["schemas"]["MetadataToSDXLLorasInvocation"] | components["schemas"]["MetadataToSDXLModelInvocation"] | components["schemas"]["MetadataToSchedulerInvocation"] | components["schemas"]["MetadataToStringCollectionInvocation"] | components["schemas"]["MetadataToStringInvocation"] | components["schemas"]["MetadataToT2IAdaptersInvocation"] | components["schemas"]["MetadataToVAEInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["OpenAIImageGenerationInvocation"] | components["schemas"]["PBRMapsInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PasteImageIntoBoundingBoxInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PromptTemplateInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["QwenImageDenoiseInvocation"] | components["schemas"]["QwenImageImageToLatentsInvocation"] | components["schemas"]["QwenImageLatentsToImageInvocation"] | components["schemas"]["QwenImageLoRACollectionLoader"] | components["schemas"]["QwenImageLoRALoaderInvocation"] | components["schemas"]["QwenImageModelLoaderInvocation"] | components["schemas"]["QwenImageTextEncoderInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SD3DenoiseInvocation"] | components["schemas"]["SD3ImageToLatentsInvocation"] | components["schemas"]["SD3LatentsToImageInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["Sd3ModelLoaderInvocation"] | components["schemas"]["Sd3TextEncoderInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StringBatchInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringGenerator"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZImageControlInvocation"] | components["schemas"]["ZImageDenoiseInvocation"] | components["schemas"]["ZImageDenoiseMetaInvocation"] | components["schemas"]["ZImageImageToLatentsInvocation"] | components["schemas"]["ZImageLatentsToImageInvocation"] | components["schemas"]["ZImageLoRACollectionLoader"] | components["schemas"]["ZImageLoRALoaderInvocation"] | components["schemas"]["ZImageModelLoaderInvocation"] | components["schemas"]["ZImageSeedVarianceEnhancerInvocation"] | components["schemas"]["ZImageTextEncoderInvocation"];
/**
* Invocation Source Id
* @description The ID of the prepared invocation's source node
@@ -18082,6 +18173,84 @@ export type components = {
base: "flux2";
variant: components["schemas"]["Flux2VariantType"] | null;
};
+ /**
+ * LoRA_LyCORIS_QwenImage_Config
+ * @description Model config for Qwen Image Edit LoRA models in LyCORIS format.
+ */
+ LoRA_LyCORIS_QwenImage_Config: {
+ /**
+ * Key
+ * @description A unique key for this model.
+ */
+ key: string;
+ /**
+ * Hash
+ * @description The hash of the model file(s).
+ */
+ hash: string;
+ /**
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ */
+ path: string;
+ /**
+ * File Size
+ * @description The size of the model in bytes.
+ */
+ file_size: number;
+ /**
+ * Name
+ * @description Name of the model.
+ */
+ name: string;
+ /**
+ * Description
+ * @description Model description
+ */
+ description: string | null;
+ /**
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
+ */
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
+ /**
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
+ */
+ source_api_response: string | null;
+ /**
+ * Cover Image
+ * @description Url for image to preview model
+ */
+ cover_image: string | null;
+ /**
+ * Type
+ * @default lora
+ * @constant
+ */
+ type: "lora";
+ /**
+ * Trigger Phrases
+ * @description Set of trigger phrases for this model
+ */
+ trigger_phrases: string[] | null;
+ /** @description Default settings for this model */
+ default_settings: components["schemas"]["LoraModelDefaultSettings"] | null;
+ /**
+ * Format
+ * @default lycoris
+ * @constant
+ */
+ format: "lycoris";
+ /**
+ * Base
+ * @default qwen-image
+ * @constant
+ */
+ base: "qwen-image";
+ };
/** LoRA_LyCORIS_SD1_Config */
LoRA_LyCORIS_SD1_Config: {
/**
@@ -19792,6 +19961,87 @@ export type components = {
base: "flux2";
variant: components["schemas"]["Flux2VariantType"];
};
+ /**
+ * Main_Diffusers_QwenImage_Config
+ * @description Model config for Qwen Image diffusers models (both txt2img and edit).
+ */
+ Main_Diffusers_QwenImage_Config: {
+ /**
+ * Key
+ * @description A unique key for this model.
+ */
+ key: string;
+ /**
+ * Hash
+ * @description The hash of the model file(s).
+ */
+ hash: string;
+ /**
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ */
+ path: string;
+ /**
+ * File Size
+ * @description The size of the model in bytes.
+ */
+ file_size: number;
+ /**
+ * Name
+ * @description Name of the model.
+ */
+ name: string;
+ /**
+ * Description
+ * @description Model description
+ */
+ description: string | null;
+ /**
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
+ */
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
+ /**
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
+ */
+ source_api_response: string | null;
+ /**
+ * Cover Image
+ * @description Url for image to preview model
+ */
+ cover_image: string | null;
+ /**
+ * Type
+ * @default main
+ * @constant
+ */
+ type: "main";
+ /**
+ * Trigger Phrases
+ * @description Set of trigger phrases for this model
+ */
+ trigger_phrases: string[] | null;
+ /** @description Default settings for this model */
+ default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ /**
+ * Format
+ * @default diffusers
+ * @constant
+ */
+ format: "diffusers";
+ /** @default */
+ repo_variant: components["schemas"]["ModelRepoVariant"];
+ /**
+ * Base
+ * @default qwen-image
+ * @constant
+ */
+ base: "qwen-image";
+ variant: components["schemas"]["QwenImageVariantType"] | null;
+ };
/** Main_Diffusers_SD1_Config */
Main_Diffusers_SD1_Config: {
/**
@@ -20441,6 +20691,90 @@ export type components = {
format: "gguf_quantized";
variant: components["schemas"]["Flux2VariantType"];
};
+ /**
+ * Main_GGUF_QwenImage_Config
+ * @description Model config for GGUF-quantized Qwen Image transformer models.
+ */
+ Main_GGUF_QwenImage_Config: {
+ /**
+ * Key
+ * @description A unique key for this model.
+ */
+ key: string;
+ /**
+ * Hash
+ * @description The hash of the model file(s).
+ */
+ hash: string;
+ /**
+ * Path
+ * @description Path to the model on the filesystem. Relative paths are relative to the Invoke root directory.
+ */
+ path: string;
+ /**
+ * File Size
+ * @description The size of the model in bytes.
+ */
+ file_size: number;
+ /**
+ * Name
+ * @description Name of the model.
+ */
+ name: string;
+ /**
+ * Description
+ * @description Model description
+ */
+ description: string | null;
+ /**
+ * Source
+ * @description The original source of the model (path, URL or repo_id).
+ */
+ source: string;
+ /** @description The type of source */
+ source_type: components["schemas"]["ModelSourceType"];
+ /**
+ * Source Api Response
+ * @description The original API response from the source, as stringified JSON.
+ */
+ source_api_response: string | null;
+ /**
+ * Cover Image
+ * @description Url for image to preview model
+ */
+ cover_image: string | null;
+ /**
+ * Type
+ * @default main
+ * @constant
+ */
+ type: "main";
+ /**
+ * Trigger Phrases
+ * @description Set of trigger phrases for this model
+ */
+ trigger_phrases: string[] | null;
+ /** @description Default settings for this model */
+ default_settings: components["schemas"]["MainModelDefaultSettings"] | null;
+ /**
+ * Config Path
+ * @description Path to the config for this model, if any.
+ */
+ config_path: string | null;
+ /**
+ * Base
+ * @default qwen-image
+ * @constant
+ */
+ base: "qwen-image";
+ /**
+ * Format
+ * @default gguf_quantized
+ * @constant
+ */
+ format: "gguf_quantized";
+ variant: components["schemas"]["QwenImageVariantType"] | null;
+ };
/**
* Main_GGUF_ZImage_Config
* @description Model config for GGUF-quantized Z-Image transformer models.
@@ -22353,7 +22687,7 @@ export type components = {
* Config
* @description The installed model's config
*/
- config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
/**
* ModelInstallDownloadProgressEvent
@@ -22519,7 +22853,7 @@ export type components = {
* Config Out
* @description After successful installation, this will hold the configuration object.
*/
- config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"]) | null;
+ config_out?: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"]) | null;
/**
* Inplace
* @description Leave model in its current location; otherwise install under models directory
@@ -22605,7 +22939,7 @@ export type components = {
* Config
* @description The model's config
*/
- config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
/**
* @description The submodel type, if any
* @default null
@@ -22626,7 +22960,7 @@ export type components = {
* Config
* @description The model's config
*/
- config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ config: components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
/**
* @description The submodel type, if any
* @default null
@@ -22747,7 +23081,7 @@ export type components = {
* Variant
* @description The variant of the model.
*/
- variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null;
+ variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["QwenImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null;
/** @description The prediction type of the model. */
prediction_type?: components["schemas"]["SchedulerPredictionType"] | null;
/**
@@ -22812,7 +23146,7 @@ export type components = {
*/
ModelsList: {
/** Models */
- models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"])[];
+ models: (components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"])[];
};
/**
* Multiply Integers
@@ -24064,6 +24398,510 @@ export type components = {
* @enum {string}
*/
Qwen3VariantType: "qwen3_4b" | "qwen3_8b" | "qwen3_06b";
+ /**
+ * QwenImageConditioningField
+ * @description A Qwen Image Edit conditioning tensor primitive value
+ */
+ QwenImageConditioningField: {
+ /**
+ * Conditioning Name
+ * @description The name of conditioning tensor
+ */
+ conditioning_name: string;
+ };
+ /**
+ * QwenImageConditioningOutput
+ * @description Base class for nodes that output a Qwen Image Edit conditioning tensor.
+ */
+ QwenImageConditioningOutput: {
+ /** @description Conditioning tensor */
+ conditioning: components["schemas"]["QwenImageConditioningField"];
+ /**
+ * type
+ * @default qwen_image_conditioning_output
+ * @constant
+ */
+ type: "qwen_image_conditioning_output";
+ };
+ /**
+ * Denoise - Qwen Image
+ * @description Run the denoising process with a Qwen Image model.
+ */
+ QwenImageDenoiseInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * @description Latents tensor
+ * @default null
+ */
+ latents?: components["schemas"]["LatentsField"] | null;
+ /**
+ * @description Reference image latents to guide generation. Encoded through the VAE.
+ * @default null
+ */
+ reference_latents?: components["schemas"]["LatentsField"] | null;
+ /**
+ * @description A mask of the region to apply the denoising process to. Values of 0.0 represent the regions to be fully denoised, and 1.0 represent the regions to be preserved.
+ * @default null
+ */
+ denoise_mask?: components["schemas"]["DenoiseMaskField"] | null;
+ /**
+ * Denoising Start
+ * @description When to start denoising, expressed a percentage of total steps
+ * @default 0
+ */
+ denoising_start?: number;
+ /**
+ * Denoising End
+ * @description When to stop denoising, expressed a percentage of total steps
+ * @default 1
+ */
+ denoising_end?: number;
+ /**
+ * Transformer
+ * @description Qwen Image Edit model (Transformer) to load
+ * @default null
+ */
+ transformer?: components["schemas"]["TransformerField"] | null;
+ /**
+ * @description Positive conditioning tensor
+ * @default null
+ */
+ positive_conditioning?: components["schemas"]["QwenImageConditioningField"] | null;
+ /**
+ * @description Negative conditioning tensor
+ * @default null
+ */
+ negative_conditioning?: components["schemas"]["QwenImageConditioningField"] | null;
+ /**
+ * CFG Scale
+ * @description Classifier-Free Guidance scale
+ * @default 4
+ */
+ cfg_scale?: number | number[];
+ /**
+ * Width
+ * @description Width of the generated image.
+ * @default 1024
+ */
+ width?: number;
+ /**
+ * Height
+ * @description Height of the generated image.
+ * @default 1024
+ */
+ height?: number;
+ /**
+ * Steps
+ * @description Number of steps to run
+ * @default 40
+ */
+ steps?: number;
+ /**
+ * Seed
+ * @description Randomness seed for reproducibility.
+ * @default 0
+ */
+ seed?: number;
+ /**
+ * Shift
+ * @description Override the sigma schedule shift. When set, uses a fixed shift (e.g. 3.0 for Lightning LoRAs) instead of the default dynamic shifting. Leave unset for the base model's default schedule.
+ * @default null
+ */
+ shift?: number | null;
+ /**
+ * type
+ * @default qwen_image_denoise
+ * @constant
+ */
+ type: "qwen_image_denoise";
+ };
+ /**
+ * Image to Latents - Qwen Image
+ * @description Generates latents from an image using the Qwen Image VAE.
+ */
+ QwenImageImageToLatentsInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * @description The image to encode.
+ * @default null
+ */
+ image?: components["schemas"]["ImageField"] | null;
+ /**
+ * @description VAE
+ * @default null
+ */
+ vae?: components["schemas"]["VAEField"] | null;
+ /**
+ * Width
+ * @description Resize the image to this width before encoding. If not set, encodes at the image's original size.
+ * @default null
+ */
+ width?: number | null;
+ /**
+ * Height
+ * @description Resize the image to this height before encoding. If not set, encodes at the image's original size.
+ * @default null
+ */
+ height?: number | null;
+ /**
+ * type
+ * @default qwen_image_i2l
+ * @constant
+ */
+ type: "qwen_image_i2l";
+ };
+ /**
+ * Latents to Image - Qwen Image
+ * @description Generates an image from latents using the Qwen Image VAE.
+ */
+ QwenImageLatentsToImageInvocation: {
+ /**
+ * @description The board to save the image to
+ * @default null
+ */
+ board?: components["schemas"]["BoardField"] | null;
+ /**
+ * @description Optional metadata to be saved with the image
+ * @default null
+ */
+ metadata?: components["schemas"]["MetadataField"] | null;
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * @description Latents tensor
+ * @default null
+ */
+ latents?: components["schemas"]["LatentsField"] | null;
+ /**
+ * @description VAE
+ * @default null
+ */
+ vae?: components["schemas"]["VAEField"] | null;
+ /**
+ * type
+ * @default qwen_image_l2i
+ * @constant
+ */
+ type: "qwen_image_l2i";
+ };
+ /**
+ * Apply LoRA Collection - Qwen Image
+ * @description Applies a collection of LoRAs to a Qwen Image transformer.
+ */
+ QwenImageLoRACollectionLoader: {
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * LoRAs
+ * @description LoRA models and weights. May be a single LoRA or collection.
+ * @default null
+ */
+ loras?: components["schemas"]["LoRAField"] | components["schemas"]["LoRAField"][] | null;
+ /**
+ * Transformer
+ * @description Transformer
+ * @default null
+ */
+ transformer?: components["schemas"]["TransformerField"] | null;
+ /**
+ * type
+ * @default qwen_image_lora_collection_loader
+ * @constant
+ */
+ type: "qwen_image_lora_collection_loader";
+ };
+ /**
+ * Apply LoRA - Qwen Image
+ * @description Apply a LoRA model to a Qwen Image transformer.
+ */
+ QwenImageLoRALoaderInvocation: {
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * LoRA
+ * @description LoRA model to load
+ * @default null
+ */
+ lora?: components["schemas"]["ModelIdentifierField"] | null;
+ /**
+ * Weight
+ * @description The weight at which the LoRA is applied to each model
+ * @default 1
+ */
+ weight?: number;
+ /**
+ * Transformer
+ * @description Transformer
+ * @default null
+ */
+ transformer?: components["schemas"]["TransformerField"] | null;
+ /**
+ * type
+ * @default qwen_image_lora_loader
+ * @constant
+ */
+ type: "qwen_image_lora_loader";
+ };
+ /**
+ * QwenImageLoRALoaderOutput
+ * @description Qwen Image LoRA Loader Output
+ */
+ QwenImageLoRALoaderOutput: {
+ /**
+ * Transformer
+ * @description Transformer
+ * @default null
+ */
+ transformer: components["schemas"]["TransformerField"] | null;
+ /**
+ * type
+ * @default qwen_image_lora_loader_output
+ * @constant
+ */
+ type: "qwen_image_lora_loader_output";
+ };
+ /**
+ * Main Model - Qwen Image
+ * @description Loads a Qwen Image model, outputting its submodels.
+ *
+ * The transformer is always loaded from the main model (Diffusers or GGUF).
+ *
+ * For GGUF quantized models, the VAE and Qwen VL encoder must come from a
+ * separate Diffusers model specified in the "Component Source" field.
+ *
+ * For Diffusers models, all components are extracted from the main model
+ * automatically. The "Component Source" field is ignored.
+ */
+ QwenImageModelLoaderInvocation: {
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * Transformer
+ * @description Qwen Image Edit model (Transformer) to load
+ */
+ model: components["schemas"]["ModelIdentifierField"];
+ /**
+ * Component Source (Diffusers)
+ * @description Diffusers Qwen Image model to extract the VAE and Qwen VL encoder from. Required when using a GGUF quantized transformer. Ignored when the main model is already in Diffusers format.
+ * @default null
+ */
+ component_source?: components["schemas"]["ModelIdentifierField"] | null;
+ /**
+ * type
+ * @default qwen_image_model_loader
+ * @constant
+ */
+ type: "qwen_image_model_loader";
+ };
+ /**
+ * QwenImageModelLoaderOutput
+ * @description Qwen Image model loader output.
+ */
+ QwenImageModelLoaderOutput: {
+ /**
+ * Transformer
+ * @description Transformer
+ */
+ transformer: components["schemas"]["TransformerField"];
+ /**
+ * Qwen VL Encoder
+ * @description Qwen2.5-VL tokenizer, processor and text/vision encoder
+ */
+ qwen_vl_encoder: components["schemas"]["QwenVLEncoderField"];
+ /**
+ * VAE
+ * @description VAE
+ */
+ vae: components["schemas"]["VAEField"];
+ /**
+ * type
+ * @default qwen_image_model_loader_output
+ * @constant
+ */
+ type: "qwen_image_model_loader_output";
+ };
+ /**
+ * Prompt - Qwen Image
+ * @description Encodes text and reference images for Qwen Image using Qwen2.5-VL.
+ */
+ QwenImageTextEncoderInvocation: {
+ /**
+ * Id
+ * @description The id of this instance of an invocation. Must be unique among all instances of invocations.
+ */
+ id: string;
+ /**
+ * Is Intermediate
+ * @description Whether or not this is an intermediate invocation.
+ * @default false
+ */
+ is_intermediate?: boolean;
+ /**
+ * Use Cache
+ * @description Whether or not to use the cache
+ * @default true
+ */
+ use_cache?: boolean;
+ /**
+ * Prompt
+ * @description Text prompt describing the desired edit.
+ * @default null
+ */
+ prompt?: string | null;
+ /**
+ * Reference Images
+ * @description Reference images to guide the edit. The model can use multiple reference images.
+ * @default []
+ */
+ reference_images?: components["schemas"]["ImageField"][];
+ /**
+ * Qwen VL Encoder
+ * @description Qwen2.5-VL tokenizer, processor and text/vision encoder
+ * @default null
+ */
+ qwen_vl_encoder?: components["schemas"]["QwenVLEncoderField"] | null;
+ /**
+ * Quantization
+ * @description Quantize the Qwen VL encoder to reduce VRAM usage. 'nf4' (4-bit) saves the most memory, 'int8' (8-bit) is a middle ground.
+ * @default none
+ * @enum {string}
+ */
+ quantization?: "none" | "int8" | "nf4";
+ /**
+ * type
+ * @default qwen_image_text_encoder
+ * @constant
+ */
+ type: "qwen_image_text_encoder";
+ };
+ /**
+ * QwenImageVariantType
+ * @description Qwen Image model variants.
+ * @enum {string}
+ */
+ QwenImageVariantType: "generate" | "edit";
+ /**
+ * QwenVLEncoderField
+ * @description Field for Qwen2.5-VL encoder used by Qwen Image Edit models.
+ */
+ QwenVLEncoderField: {
+ /** @description Info to load tokenizer submodel */
+ tokenizer: components["schemas"]["ModelIdentifierField"];
+ /** @description Info to load text_encoder submodel */
+ text_encoder: components["schemas"]["ModelIdentifierField"];
+ };
/**
* Random Float
* @description Outputs a single random float
@@ -24445,6 +25283,11 @@ export type components = {
* @description The ID of the queue
*/
queue_id: string;
+ /**
+ * User Id
+ * @description The ID of the user whose recall parameters were updated
+ */
+ user_id: string;
/**
* Parameters
* @description The recall parameters that were updated
@@ -26045,16 +26888,6 @@ export type components = {
* @description Total number of queue items
*/
total: number;
- /**
- * User Pending
- * @description Number of queue items with status 'pending' for the current user
- */
- user_pending?: number | null;
- /**
- * User In Progress
- * @description Number of queue items with status 'in_progress' for the current user
- */
- user_in_progress?: number | null;
};
/**
* SetupRequest
@@ -26110,6 +26943,11 @@ export type components = {
* @description Whether strict password requirements are enforced
*/
strict_password_checking: boolean;
+ /**
+ * Admin Email
+ * @description Email of the first active admin user, if any
+ */
+ admin_email?: string | null;
};
/**
* Show Image
@@ -26442,6 +27280,8 @@ export type components = {
base: components["schemas"]["BaseModelType"];
type: components["schemas"]["ModelType"];
format?: components["schemas"]["ModelFormat"] | null;
+ /** Variant */
+ variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["QwenImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null;
/**
* Is Installed
* @default false
@@ -26485,6 +27325,8 @@ export type components = {
base: components["schemas"]["BaseModelType"];
type: components["schemas"]["ModelType"];
format?: components["schemas"]["ModelFormat"] | null;
+ /** Variant */
+ variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["QwenImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null;
/**
* Is Installed
* @default false
@@ -27004,7 +27846,7 @@ export type components = {
path_or_prefix: string;
model_type: components["schemas"]["ModelType"];
/** Variant */
- variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null;
+ variant?: components["schemas"]["ModelVariantType"] | components["schemas"]["ClipVariantType"] | components["schemas"]["FluxVariantType"] | components["schemas"]["Flux2VariantType"] | components["schemas"]["ZImageVariantType"] | components["schemas"]["QwenImageVariantType"] | components["schemas"]["Qwen3VariantType"] | null;
};
/**
* Subtract Integers
@@ -29379,6 +30221,16 @@ export type components = {
* @description The opened timestamp of the workflow.
*/
opened_at?: string | null;
+ /**
+ * User Id
+ * @description The id of the user who owns this workflow.
+ */
+ user_id: string;
+ /**
+ * Is Public
+ * @description Whether this workflow is shared with all users.
+ */
+ is_public: boolean;
/** @description The workflow. */
workflow: components["schemas"]["Workflow"];
};
@@ -29409,6 +30261,16 @@ export type components = {
* @description The opened timestamp of the workflow.
*/
opened_at?: string | null;
+ /**
+ * User Id
+ * @description The id of the user who owns this workflow.
+ */
+ user_id: string;
+ /**
+ * Is Public
+ * @description Whether this workflow is shared with all users.
+ */
+ is_public: boolean;
/**
* Description
* @description The description of the workflow.
@@ -29432,7 +30294,7 @@ export type components = {
* @description The order by options for workflow records
* @enum {string}
*/
- WorkflowRecordOrderBy: "created_at" | "updated_at" | "opened_at" | "name";
+ WorkflowRecordOrderBy: "created_at" | "updated_at" | "opened_at" | "name" | "is_public";
/** WorkflowRecordWithThumbnailDTO */
WorkflowRecordWithThumbnailDTO: {
/**
@@ -29460,6 +30322,16 @@ export type components = {
* @description The opened timestamp of the workflow.
*/
opened_at?: string | null;
+ /**
+ * User Id
+ * @description The id of the user who owns this workflow.
+ */
+ user_id: string;
+ /**
+ * Is Public
+ * @description Whether this workflow is shared with all users.
+ */
+ is_public: boolean;
/** @description The workflow. */
workflow: components["schemas"]["Workflow"];
/**
@@ -30809,7 +31681,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Validation Error */
@@ -30841,7 +31713,7 @@ export interface operations {
[name: string]: unknown;
};
content: {
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Validation Error */
@@ -30891,7 +31763,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Bad request */
@@ -30996,7 +31868,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Bad request */
@@ -31067,7 +31939,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Bad request */
@@ -31800,7 +32672,7 @@ export interface operations {
* "repo_variant": "fp16",
* "upcast_attention": false
* } */
- "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
+ "application/json": components["schemas"]["Main_Diffusers_SD1_Config"] | components["schemas"]["Main_Diffusers_SD2_Config"] | components["schemas"]["Main_Diffusers_SDXL_Config"] | components["schemas"]["Main_Diffusers_SDXLRefiner_Config"] | components["schemas"]["Main_Diffusers_SD3_Config"] | components["schemas"]["Main_Diffusers_FLUX_Config"] | components["schemas"]["Main_Diffusers_Flux2_Config"] | components["schemas"]["Main_Diffusers_CogView4_Config"] | components["schemas"]["Main_Diffusers_QwenImage_Config"] | components["schemas"]["Main_Diffusers_ZImage_Config"] | components["schemas"]["Main_Checkpoint_SD1_Config"] | components["schemas"]["Main_Checkpoint_SD2_Config"] | components["schemas"]["Main_Checkpoint_SDXL_Config"] | components["schemas"]["Main_Checkpoint_SDXLRefiner_Config"] | components["schemas"]["Main_Checkpoint_Flux2_Config"] | components["schemas"]["Main_Checkpoint_FLUX_Config"] | components["schemas"]["Main_Checkpoint_ZImage_Config"] | components["schemas"]["Main_Checkpoint_Anima_Config"] | components["schemas"]["Main_BnBNF4_FLUX_Config"] | components["schemas"]["Main_GGUF_Flux2_Config"] | components["schemas"]["Main_GGUF_FLUX_Config"] | components["schemas"]["Main_GGUF_QwenImage_Config"] | components["schemas"]["Main_GGUF_ZImage_Config"] | components["schemas"]["VAE_Checkpoint_SD1_Config"] | components["schemas"]["VAE_Checkpoint_SD2_Config"] | components["schemas"]["VAE_Checkpoint_SDXL_Config"] | components["schemas"]["VAE_Checkpoint_FLUX_Config"] | components["schemas"]["VAE_Checkpoint_Flux2_Config"] | components["schemas"]["VAE_Checkpoint_Anima_Config"] | components["schemas"]["VAE_Diffusers_SD1_Config"] | components["schemas"]["VAE_Diffusers_SDXL_Config"] | components["schemas"]["VAE_Diffusers_Flux2_Config"] | components["schemas"]["ControlNet_Checkpoint_SD1_Config"] | components["schemas"]["ControlNet_Checkpoint_SD2_Config"] | components["schemas"]["ControlNet_Checkpoint_SDXL_Config"] | components["schemas"]["ControlNet_Checkpoint_FLUX_Config"] | components["schemas"]["ControlNet_Checkpoint_ZImage_Config"] | components["schemas"]["ControlNet_Diffusers_SD1_Config"] | components["schemas"]["ControlNet_Diffusers_SD2_Config"] | components["schemas"]["ControlNet_Diffusers_SDXL_Config"] | components["schemas"]["ControlNet_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_SD1_Config"] | components["schemas"]["LoRA_LyCORIS_SD2_Config"] | components["schemas"]["LoRA_LyCORIS_SDXL_Config"] | components["schemas"]["LoRA_LyCORIS_Flux2_Config"] | components["schemas"]["LoRA_LyCORIS_FLUX_Config"] | components["schemas"]["LoRA_LyCORIS_ZImage_Config"] | components["schemas"]["LoRA_LyCORIS_QwenImage_Config"] | components["schemas"]["LoRA_LyCORIS_Anima_Config"] | components["schemas"]["LoRA_OMI_SDXL_Config"] | components["schemas"]["LoRA_OMI_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_SD1_Config"] | components["schemas"]["LoRA_Diffusers_SD2_Config"] | components["schemas"]["LoRA_Diffusers_SDXL_Config"] | components["schemas"]["LoRA_Diffusers_Flux2_Config"] | components["schemas"]["LoRA_Diffusers_FLUX_Config"] | components["schemas"]["LoRA_Diffusers_ZImage_Config"] | components["schemas"]["ControlLoRA_LyCORIS_FLUX_Config"] | components["schemas"]["T5Encoder_T5Encoder_Config"] | components["schemas"]["T5Encoder_BnBLLMint8_Config"] | components["schemas"]["Qwen3Encoder_Qwen3Encoder_Config"] | components["schemas"]["Qwen3Encoder_Checkpoint_Config"] | components["schemas"]["Qwen3Encoder_GGUF_Config"] | components["schemas"]["TI_File_SD1_Config"] | components["schemas"]["TI_File_SD2_Config"] | components["schemas"]["TI_File_SDXL_Config"] | components["schemas"]["TI_Folder_SD1_Config"] | components["schemas"]["TI_Folder_SD2_Config"] | components["schemas"]["TI_Folder_SDXL_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD1_Config"] | components["schemas"]["IPAdapter_InvokeAI_SD2_Config"] | components["schemas"]["IPAdapter_InvokeAI_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD1_Config"] | components["schemas"]["IPAdapter_Checkpoint_SD2_Config"] | components["schemas"]["IPAdapter_Checkpoint_SDXL_Config"] | components["schemas"]["IPAdapter_Checkpoint_FLUX_Config"] | components["schemas"]["T2IAdapter_Diffusers_SD1_Config"] | components["schemas"]["T2IAdapter_Diffusers_SDXL_Config"] | components["schemas"]["Spandrel_Checkpoint_Config"] | components["schemas"]["CLIPEmbed_Diffusers_G_Config"] | components["schemas"]["CLIPEmbed_Diffusers_L_Config"] | components["schemas"]["CLIPVision_Diffusers_Config"] | components["schemas"]["SigLIP_Diffusers_Config"] | components["schemas"]["FLUXRedux_Checkpoint_Config"] | components["schemas"]["LlavaOnevision_Diffusers_Config"] | components["schemas"]["ExternalApiModelConfig"] | components["schemas"]["Unknown_Config"];
};
};
/** @description Bad request */
@@ -34698,6 +35570,8 @@ export interface operations {
query?: string | null;
/** @description Whether to include/exclude recent workflows */
has_been_opened?: boolean | null;
+ /** @description Filter by public/shared status */
+ is_public?: boolean | null;
};
header?: never;
path?: never;
@@ -34872,11 +35746,49 @@ export interface operations {
};
};
};
+ update_workflow_is_public: {
+ parameters: {
+ query?: never;
+ header?: never;
+ path: {
+ /** @description The workflow to update */
+ workflow_id: string;
+ };
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["Body_update_workflow_is_public"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["WorkflowRecordDTO"];
+ };
+ };
+ /** @description Validation Error */
+ 422: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["HTTPValidationError"];
+ };
+ };
+ };
+ };
get_all_tags: {
parameters: {
query?: {
/** @description The categories to include */
categories?: components["schemas"]["WorkflowCategory"][] | null;
+ /** @description Filter by public/shared status */
+ is_public?: boolean | null;
};
header?: never;
path?: never;
@@ -34913,6 +35825,8 @@ export interface operations {
categories?: components["schemas"]["WorkflowCategory"][] | null;
/** @description Whether to include/exclude recent workflows */
has_been_opened?: boolean | null;
+ /** @description Filter by public/shared status */
+ is_public?: boolean | null;
};
header?: never;
path?: never;
@@ -34949,6 +35863,8 @@ export interface operations {
categories: components["schemas"]["WorkflowCategory"][];
/** @description Whether to include/exclude recent workflows */
has_been_opened?: boolean | null;
+ /** @description Filter by public/shared status */
+ is_public?: boolean | null;
};
header?: never;
path?: never;
diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts
index 212fc08c24..3624d7ef6a 100644
--- a/invokeai/frontend/web/src/services/api/types.ts
+++ b/invokeai/frontend/web/src/services/api/types.ts
@@ -457,6 +457,10 @@ export const isZImageDiffusersMainModelConfig = (config: AnyModelConfig): config
return config.type === 'main' && config.base === 'z-image' && config.format === 'diffusers';
};
+export const isQwenImageDiffusersMainModelConfig = (config: AnyModelConfig): config is MainModelConfig => {
+ return config.type === 'main' && config.base === 'qwen-image' && config.format === 'diffusers';
+};
+
export const isTIModelConfig = (config: AnyModelConfig): config is MainModelConfig => {
return config.type === 'embedding';
};
@@ -476,7 +480,7 @@ export type ModelInstallStatus = S['InstallStatus'];
export type Graph = S['Graph'];
export type NonNullableGraph = SetRequired;
export type Batch = S['Batch'];
-export const zWorkflowRecordOrderBy = z.enum(['name', 'created_at', 'updated_at', 'opened_at']);
+export const zWorkflowRecordOrderBy = z.enum(['name', 'created_at', 'updated_at', 'opened_at', 'is_public']);
export type WorkflowRecordOrderBy = z.infer;
assert>();
diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx
index 2e0ff2251e..774acd3f93 100644
--- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx
+++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx
@@ -1,4 +1,4 @@
-import { ExternalLink, Flex, Text } from '@invoke-ai/ui-library';
+import { Flex, Text } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import { socketConnected } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
import type { AppStore } from 'app/store/store';
@@ -28,7 +28,7 @@ import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks
import { zNodeStatus } from 'features/nodes/types/invocation';
import { modelSelected } from 'features/parameters/store/actions';
import ErrorToastDescription, { getTitle } from 'features/toast/ErrorToastDescription';
-import { toast } from 'features/toast/toast';
+import { toast, toastApi } from 'features/toast/toast';
import { t } from 'i18next';
import { LRUCache } from 'lru-cache';
import { Trans } from 'react-i18next';
@@ -855,14 +855,61 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
log.debug({ data }, 'Bulk gallery download ready');
const { bulk_download_item_name } = data;
- // TODO(psyche): This URL may break in in some environments (e.g. Nvidia workbench) but we need to test it first
+ // Dismiss the "preparing" toast (which uses a prefixed id to avoid the
+ // race condition where this socket event arrives before the Redux
+ // middleware processes the POST response).
+ toastApi.close(`preparing:${bulk_download_item_name}`);
+
+ // The GET endpoint requires authentication, so we use fetch() with the
+ // Authorization header rather than a plain link (which cannot
+ // carry headers). After fetching the blob, we create a temporary object
+ // URL and trigger the browser's save dialog programmatically.
const url = `/api/v1/images/download/${bulk_download_item_name}`;
+ const token = localStorage.getItem('auth_token');
+ const headers: Record = token ? { Authorization: `Bearer ${token}` } : {};
+
+ const handleDownload = () => {
+ fetch(url, { headers })
+ .then((res) => {
+ if (!res.ok) {
+ throw new Error(`Download failed: ${res.status}`);
+ }
+ return res.blob();
+ })
+ .then((blob) => {
+ const blobUrl = URL.createObjectURL(blob);
+ const a = document.createElement('a');
+ a.href = blobUrl;
+ a.download = bulk_download_item_name;
+ document.body.appendChild(a);
+ a.click();
+ document.body.removeChild(a);
+ // Delay revocation — the browser's save dialog is asynchronous,
+ // and revoking immediately would invalidate the URL before the
+ // download completes.
+ setTimeout(() => URL.revokeObjectURL(blobUrl), 60_000);
+ })
+ .catch((err) => {
+ log.error({ err }, 'Bulk download fetch failed');
+ toast({
+ id: `error:${bulk_download_item_name}`,
+ title: t('gallery.bulkDownloadFailed'),
+ status: 'error',
+ description: String(err),
+ });
+ });
+ };
toast({
id: bulk_download_item_name,
title: t('gallery.bulkDownloadReady'),
status: 'success',
- description: ,
+ description: (
+ // eslint-disable-next-line react/jsx-no-bind -- not a component render; no re-render cost
+
+ {t('gallery.clickToDownload')}
+
+ ),
duration: null,
});
});
@@ -872,6 +919,9 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis
const { bulk_download_item_name, error } = data;
+ // Dismiss the "preparing" toast
+ toastApi.close(`preparing:${bulk_download_item_name}`);
+
toast({
id: bulk_download_item_name,
title: t('gallery.bulkDownloadFailed'),
diff --git a/pyproject.toml b/pyproject.toml
index 018cf1970d..aa77f2d368 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -36,7 +36,7 @@ dependencies = [
"accelerate",
"bitsandbytes; sys_platform!='darwin'",
"compel==2.1.1",
- "diffusers[torch]==0.36.0",
+ "diffusers[torch]==0.37.0",
"gguf",
"mediapipe==0.10.14", # needed for "mediapipeface" controlnet model
"numpy<2.0.0",
diff --git a/tests/app/invocations/test_qwen_image_denoise.py b/tests/app/invocations/test_qwen_image_denoise.py
new file mode 100644
index 0000000000..e0e8a3f0b7
--- /dev/null
+++ b/tests/app/invocations/test_qwen_image_denoise.py
@@ -0,0 +1,61 @@
+"""Tests for the Qwen Image denoise invocation."""
+
+import pytest
+
+from invokeai.app.invocations.qwen_image_denoise import QwenImageDenoiseInvocation
+
+
+class TestPrepareCfgScale:
+ """Test _prepare_cfg_scale utility method."""
+
+ def test_scalar_cfg_scale(self):
+ inv = QwenImageDenoiseInvocation.model_construct(cfg_scale=4.0)
+ result = inv._prepare_cfg_scale(5)
+ assert result == [4.0, 4.0, 4.0, 4.0, 4.0]
+
+ def test_list_cfg_scale(self):
+ inv = QwenImageDenoiseInvocation.model_construct(cfg_scale=[1.0, 2.0, 3.0])
+ result = inv._prepare_cfg_scale(3)
+ assert result == [1.0, 2.0, 3.0]
+
+ def test_list_cfg_scale_length_mismatch(self):
+ inv = QwenImageDenoiseInvocation.model_construct(cfg_scale=[1.0, 2.0])
+ with pytest.raises(AssertionError):
+ inv._prepare_cfg_scale(3)
+
+ def test_invalid_cfg_scale_type(self):
+ inv = QwenImageDenoiseInvocation.model_construct(cfg_scale="invalid")
+ with pytest.raises(ValueError, match="Invalid CFG scale type"):
+ inv._prepare_cfg_scale(3)
+
+
+class TestPackUnpackLatents:
+ """Test latent packing and unpacking roundtrip."""
+
+ def test_pack_unpack_roundtrip(self):
+ """Packing then unpacking should restore the original tensor."""
+ import torch
+
+ latents = torch.randn(1, 16, 128, 128)
+ packed = QwenImageDenoiseInvocation._pack_latents(latents, 1, 16, 128, 128)
+ assert packed.shape == (1, 64 * 64, 64) # (B, H/2*W/2, C*4)
+
+ unpacked = QwenImageDenoiseInvocation._unpack_latents(packed, 128, 128)
+ assert unpacked.shape == (1, 16, 128, 128)
+ assert torch.allclose(latents, unpacked)
+
+ def test_pack_shape(self):
+ """Pack should produce the correct shape."""
+ import torch
+
+ latents = torch.randn(1, 16, 140, 118)
+ packed = QwenImageDenoiseInvocation._pack_latents(latents, 1, 16, 140, 118)
+ assert packed.shape == (1, 70 * 59, 64)
+
+ def test_unpack_shape(self):
+ """Unpack should produce the correct shape."""
+ import torch
+
+ packed = torch.randn(1, 70 * 59, 64)
+ unpacked = QwenImageDenoiseInvocation._unpack_latents(packed, 140, 118)
+ assert unpacked.shape == (1, 16, 140, 118)
diff --git a/tests/app/invocations/test_qwen_image_model_loader.py b/tests/app/invocations/test_qwen_image_model_loader.py
new file mode 100644
index 0000000000..10cab34a64
--- /dev/null
+++ b/tests/app/invocations/test_qwen_image_model_loader.py
@@ -0,0 +1,113 @@
+"""Tests for the Qwen Image model loader invocation."""
+
+from unittest.mock import MagicMock
+
+import pytest
+
+from invokeai.app.invocations.model import ModelIdentifierField
+from invokeai.app.invocations.qwen_image_model_loader import QwenImageModelLoaderInvocation
+from invokeai.backend.model_manager.taxonomy import ModelFormat, SubModelType
+
+
+def _make_model_id(**kwargs) -> ModelIdentifierField:
+ defaults = {"key": "test-key", "hash": "test-hash", "name": "test", "base": "qwen-image", "type": "main"}
+ defaults.update(kwargs)
+ return ModelIdentifierField(**defaults)
+
+
+def _make_mock_context(
+ main_format: ModelFormat = ModelFormat.Diffusers, source_format: ModelFormat = ModelFormat.Diffusers
+):
+ """Create a mock InvocationContext that returns configs with the given formats."""
+ context = MagicMock()
+
+ def get_config(model_id):
+ config = MagicMock()
+ if model_id.key == "main-key":
+ config.format = main_format
+ config.name = "Main Model"
+ elif model_id.key == "source-key":
+ config.format = source_format
+ config.name = "Source Model"
+ return config
+
+ context.models.get_config = get_config
+ context.models.exists = MagicMock(return_value=True)
+ return context
+
+
+class TestDiffusersModel:
+ """Tests for loading a Diffusers-format Qwen Image model."""
+
+ def test_diffusers_model_extracts_all_components(self):
+ """A Diffusers model should extract transformer, VAE, tokenizer, and text encoder from itself."""
+ model_id = _make_model_id(key="main-key")
+ inv = QwenImageModelLoaderInvocation.model_construct(model=model_id, component_source=None)
+ context = _make_mock_context(main_format=ModelFormat.Diffusers)
+
+ result = inv.invoke(context)
+
+ assert result.transformer.transformer.submodel_type == SubModelType.Transformer
+ assert result.vae.vae.submodel_type == SubModelType.VAE
+ assert result.qwen_vl_encoder.tokenizer.submodel_type == SubModelType.Tokenizer
+ assert result.qwen_vl_encoder.text_encoder.submodel_type == SubModelType.TextEncoder
+
+ # All should reference the main model key
+ assert result.transformer.transformer.key == "main-key"
+ assert result.vae.vae.key == "main-key"
+ assert result.qwen_vl_encoder.tokenizer.key == "main-key"
+ assert result.qwen_vl_encoder.text_encoder.key == "main-key"
+
+ def test_diffusers_model_ignores_component_source(self):
+ """A Diffusers model should ignore the component_source even if provided."""
+ model_id = _make_model_id(key="main-key")
+ source_id = _make_model_id(key="source-key")
+ inv = QwenImageModelLoaderInvocation.model_construct(model=model_id, component_source=source_id)
+ context = _make_mock_context(main_format=ModelFormat.Diffusers)
+
+ result = inv.invoke(context)
+
+ # All components should come from main, not source
+ assert result.vae.vae.key == "main-key"
+ assert result.qwen_vl_encoder.tokenizer.key == "main-key"
+
+
+class TestGGUFModel:
+ """Tests for loading a GGUF-format Qwen Image model."""
+
+ def test_gguf_with_component_source_succeeds(self):
+ """A GGUF model with a Diffusers component source should load successfully."""
+ model_id = _make_model_id(key="main-key")
+ source_id = _make_model_id(key="source-key")
+ inv = QwenImageModelLoaderInvocation.model_construct(model=model_id, component_source=source_id)
+ context = _make_mock_context(main_format=ModelFormat.GGUFQuantized, source_format=ModelFormat.Diffusers)
+
+ result = inv.invoke(context)
+
+ # Transformer from main model
+ assert result.transformer.transformer.key == "main-key"
+ assert result.transformer.transformer.submodel_type == SubModelType.Transformer
+
+ # VAE and encoder from component source
+ assert result.vae.vae.key == "source-key"
+ assert result.qwen_vl_encoder.tokenizer.key == "source-key"
+ assert result.qwen_vl_encoder.text_encoder.key == "source-key"
+
+ def test_gguf_without_component_source_raises(self):
+ """A GGUF model without a component source should raise ValueError."""
+ model_id = _make_model_id(key="main-key")
+ inv = QwenImageModelLoaderInvocation.model_construct(model=model_id, component_source=None)
+ context = _make_mock_context(main_format=ModelFormat.GGUFQuantized)
+
+ with pytest.raises(ValueError, match="No source for VAE"):
+ inv.invoke(context)
+
+ def test_gguf_with_non_diffusers_source_raises(self):
+ """A GGUF model with a non-Diffusers component source should raise ValueError."""
+ model_id = _make_model_id(key="main-key")
+ source_id = _make_model_id(key="source-key")
+ inv = QwenImageModelLoaderInvocation.model_construct(model=model_id, component_source=source_id)
+ context = _make_mock_context(main_format=ModelFormat.GGUFQuantized, source_format=ModelFormat.GGUFQuantized)
+
+ with pytest.raises(ValueError, match="Component Source model must be in Diffusers format"):
+ inv.invoke(context)
diff --git a/tests/app/invocations/test_qwen_image_text_encoder.py b/tests/app/invocations/test_qwen_image_text_encoder.py
new file mode 100644
index 0000000000..ab3beabae7
--- /dev/null
+++ b/tests/app/invocations/test_qwen_image_text_encoder.py
@@ -0,0 +1,124 @@
+"""Tests for the Qwen Image text encoder prompt building and image resizing."""
+
+from PIL import Image
+
+from invokeai.app.invocations.qwen_image_text_encoder import (
+ QwenImageTextEncoderInvocation,
+ _build_prompt,
+)
+
+
+class TestBuildPrompt:
+ """Test the _build_prompt function for edit vs generate modes."""
+
+ def test_no_images_uses_generate_template(self):
+ """With 0 images, should use the generate (txt2img) template with no vision placeholder."""
+ prompt = _build_prompt("a beautiful sunset", 0)
+ assert "a beautiful sunset" in prompt
+ assert "<|im_start|>assistant" in prompt
+ # Generate mode: no vision placeholders, uses the "describe the image" system prompt
+ assert "<|vision_start|>" not in prompt
+ assert "Describe the image by detailing" in prompt
+
+ def test_no_images_does_not_use_edit_template(self):
+ """With 0 images, should NOT use the edit system prompt."""
+ prompt = _build_prompt("a beautiful sunset", 0)
+ assert "Describe the key features of the input image" not in prompt
+
+ def test_edit_mode_one_image(self):
+ """With 1 image, should use the edit template with one vision placeholder."""
+ prompt = _build_prompt("change hair to red", 1)
+ assert "Describe the key features of the input image" in prompt
+ assert prompt.count("<|vision_start|><|image_pad|><|vision_end|>") == 1
+ assert "change hair to red" in prompt
+ # Should NOT use the generate system prompt
+ assert "Describe the image by detailing" not in prompt
+
+ def test_edit_mode_multiple_images(self):
+ """With multiple images, should include one placeholder per image."""
+ prompt = _build_prompt("combine these images", 3)
+ assert prompt.count("<|vision_start|><|image_pad|><|vision_end|>") == 3
+ assert "combine these images" in prompt
+
+ def test_generate_template_has_correct_structure(self):
+ """Generate template should have system + user + assistant roles."""
+ prompt = _build_prompt("test prompt", 0)
+ assert prompt.startswith("<|im_start|>system\n")
+ assert "<|im_end|>\n<|im_start|>user\n" in prompt
+ assert prompt.endswith("<|im_start|>assistant\n")
+
+ def test_edit_template_has_correct_structure(self):
+ """Edit template should have system + user (with image) + assistant roles."""
+ prompt = _build_prompt("test prompt", 1)
+ assert prompt.startswith("<|im_start|>system\n")
+ assert "<|im_end|>\n<|im_start|>user\n" in prompt
+ assert "<|vision_start|>" in prompt
+ assert prompt.endswith("<|im_start|>assistant\n")
+
+ def test_prompt_special_characters(self):
+ """Prompt with special characters should be included verbatim."""
+ prompt = _build_prompt("add {curly} braces & brackets", 0)
+ assert "add {curly} braces & brackets" in prompt
+
+
+class TestResizeForVLEncoder:
+ """Test the image resizing logic for the VL encoder."""
+
+ def test_large_image_is_resized(self):
+ """A large image should be resized to ~target_pixels."""
+ img = Image.new("RGB", (2048, 2048))
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ # Should be much smaller than original
+ assert w < 2048
+ assert h < 2048
+ # Total pixels should be approximately target
+ assert abs(w * h - 512 * 512) < 10000 # within ~10k pixels
+
+ def test_small_image_is_resized(self):
+ """A small image should also be resized to ~target_pixels."""
+ img = Image.new("RGB", (64, 64))
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ # Should be larger than original
+ assert w > 64
+ assert h > 64
+
+ def test_aspect_ratio_preserved(self):
+ """Aspect ratio should be approximately preserved."""
+ img = Image.new("RGB", (800, 400)) # 2:1 aspect ratio
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ original_ratio = 800 / 400 # 2.0
+ new_ratio = w / h
+ # Allow some deviation due to rounding to multiples of 32
+ assert abs(new_ratio - original_ratio) < 0.3
+
+ def test_dimensions_are_multiples_of_32(self):
+ """Output dimensions should be multiples of 32."""
+ img = Image.new("RGB", (1000, 750))
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ assert w % 32 == 0
+ assert h % 32 == 0
+
+ def test_square_image(self):
+ """A square image should produce approximately square output."""
+ img = Image.new("RGB", (1024, 1024))
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ assert abs(w - h) <= 32 # within one grid step
+
+ def test_portrait_image(self):
+ """A portrait image should produce portrait output."""
+ img = Image.new("RGB", (600, 1200))
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ assert h > w # should remain portrait
+
+ def test_landscape_image(self):
+ """A landscape image should produce landscape output."""
+ img = Image.new("RGB", (1200, 600))
+ resized = QwenImageTextEncoderInvocation._resize_for_vl_encoder(img, target_pixels=512 * 512)
+ w, h = resized.size
+ assert w > h # should remain landscape
diff --git a/tests/app/routers/test_boards_multiuser.py b/tests/app/routers/test_boards_multiuser.py
index d5c4848156..ab64ac8a9b 100644
--- a/tests/app/routers/test_boards_multiuser.py
+++ b/tests/app/routers/test_boards_multiuser.py
@@ -457,3 +457,221 @@ def test_enqueue_batch_requires_auth(enable_multiuser_for_tests: Any, client: Te
},
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+
+# ---------------------------------------------------------------------------
+# Board visibility tests
+# ---------------------------------------------------------------------------
+
+
+def test_board_created_with_private_visibility(client: TestClient, user1_token: str):
+ """Test that newly created boards default to private visibility."""
+ create = client.post(
+ "/api/v1/boards/?board_name=Visibility+Default+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ data = create.json()
+ assert data["board_visibility"] == "private"
+
+
+def test_set_board_visibility_shared(client: TestClient, user1_token: str):
+ """Test that the board owner can set their board to shared."""
+ create = client.post(
+ "/api/v1/boards/?board_name=Shared+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ response = client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "shared"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json()["board_visibility"] == "shared"
+
+
+def test_set_board_visibility_public(client: TestClient, user1_token: str):
+ """Test that the board owner can set their board to public."""
+ create = client.post(
+ "/api/v1/boards/?board_name=Public+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ response = client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "public"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json()["board_visibility"] == "public"
+
+
+def test_shared_board_visible_to_other_users(client: TestClient, user1_token: str, user2_token: str):
+ """Test that a shared board is accessible to other authenticated users."""
+ # user1 creates a board and sets it to shared
+ create = client.post(
+ "/api/v1/boards/?board_name=User1+Shared+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "shared"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # user2 should be able to access the shared board
+ response = client.get(
+ f"/api/v1/boards/{board_id}",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["board_id"] == board_id
+
+
+def test_public_board_visible_to_other_users(client: TestClient, user1_token: str, user2_token: str):
+ """Test that a public board is accessible to other authenticated users."""
+ # user1 creates a board and sets it to public
+ create = client.post(
+ "/api/v1/boards/?board_name=User1+Public+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "public"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # user2 should be able to access the public board
+ response = client.get(
+ f"/api/v1/boards/{board_id}",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_200_OK
+ assert response.json()["board_id"] == board_id
+
+
+def test_shared_board_appears_in_other_user_list(client: TestClient, user1_token: str, user2_token: str):
+ """Test that shared boards appear in other users' board listings."""
+ # user1 creates and shares a board
+ create = client.post(
+ "/api/v1/boards/?board_name=User1+Listed+Shared+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "shared"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # user2 should see the shared board in their listing
+ response = client.get(
+ "/api/v1/boards/?all=true",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_200_OK
+ board_ids = [b["board_id"] for b in response.json()]
+ assert board_id in board_ids
+
+
+def test_private_board_not_visible_after_privacy_change(client: TestClient, user1_token: str, user2_token: str):
+ """Test that reverting a board from shared to private hides it from other users."""
+ # user1 creates a board, makes it shared, then reverts to private
+ create = client.post(
+ "/api/v1/boards/?board_name=Reverted+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "shared"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "private"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # user2 should not be able to access the now-private board
+ response = client.get(
+ f"/api/v1/boards/{board_id}",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+def test_non_owner_cannot_change_board_visibility(client: TestClient, user1_token: str, user2_token: str):
+ """Test that a non-owner cannot change a board's visibility."""
+ # user1 creates a board
+ create = client.post(
+ "/api/v1/boards/?board_name=User1+Private+Locked+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ # user2 tries to make it public - should be forbidden
+ response = client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "public"},
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+def test_shared_board_image_names_visible_to_other_users(client: TestClient, user1_token: str, user2_token: str):
+ """Test that image names for shared boards are accessible to other users."""
+ create = client.post(
+ "/api/v1/boards/?board_name=User1+Shared+Images+Board",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "shared"},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # user2 can access image names for a shared board
+ response = client.get(
+ f"/api/v1/boards/{board_id}/image_names",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_200_OK
+
+
+def test_admin_can_change_any_board_visibility(client: TestClient, admin_token: str, user1_token: str):
+ """Test that an admin can change the visibility of any user's board."""
+ create = client.post(
+ "/api/v1/boards/?board_name=User1+Board+For+Admin+Visibility",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert create.status_code == status.HTTP_201_CREATED
+ board_id = create.json()["board_id"]
+
+ # Admin sets it to public
+ response = client.patch(
+ f"/api/v1/boards/{board_id}",
+ json={"board_visibility": "public"},
+ headers={"Authorization": f"Bearer {admin_token}"},
+ )
+ assert response.status_code == status.HTTP_201_CREATED
+ assert response.json()["board_visibility"] == "public"
diff --git a/tests/app/routers/test_images.py b/tests/app/routers/test_images.py
index c0da3ec51c..619ecb78c4 100644
--- a/tests/app/routers/test_images.py
+++ b/tests/app/routers/test_images.py
@@ -52,7 +52,9 @@ def test_download_images_from_board_id_empty_image_name_list(
def prepare_download_images_test(monkeypatch: Any, mock_invoker: Invoker) -> None:
- monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", MockApiDependencies(mock_invoker))
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.auth_dependencies.ApiDependencies", mock_deps)
monkeypatch.setattr(
"invokeai.app.api.routers.images.ApiDependencies.invoker.services.bulk_download.generate_item_id",
lambda arg: "test",
@@ -79,7 +81,9 @@ def test_get_bulk_download_image(tmp_path: Path, monkeypatch: Any, mock_invoker:
mock_file.write_text("contents")
monkeypatch.setattr(mock_invoker.services.bulk_download, "get_path", lambda x: str(mock_file))
- monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", MockApiDependencies(mock_invoker))
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.auth_dependencies.ApiDependencies", mock_deps)
def mock_add_task(*args, **kwargs):
return None
@@ -93,7 +97,9 @@ def test_get_bulk_download_image(tmp_path: Path, monkeypatch: Any, mock_invoker:
def test_get_bulk_download_image_not_found(monkeypatch: Any, mock_invoker: Invoker, client: TestClient) -> None:
- monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", MockApiDependencies(mock_invoker))
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.auth_dependencies.ApiDependencies", mock_deps)
def mock_add_task(*args, **kwargs):
return None
@@ -112,7 +118,9 @@ def test_get_bulk_download_image_image_deleted_after_response(
mock_file.write_text("contents")
monkeypatch.setattr(mock_invoker.services.bulk_download, "get_path", lambda x: str(mock_file))
- monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", MockApiDependencies(mock_invoker))
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.auth_dependencies.ApiDependencies", mock_deps)
client.get("/api/v1/images/download/test.zip")
diff --git a/tests/app/routers/test_multiuser_authorization.py b/tests/app/routers/test_multiuser_authorization.py
new file mode 100644
index 0000000000..e9efae7034
--- /dev/null
+++ b/tests/app/routers/test_multiuser_authorization.py
@@ -0,0 +1,1819 @@
+"""Tests for API-level authorization on board-image mutations, image mutations,
+workflow thumbnail access, and admin email leak prevention.
+
+These tests verify the security fixes for:
+1. Shared-board write protection bypass via direct API calls
+2. Image mutation endpoints lacking ownership checks
+3. Private workflow thumbnail exposure
+4. Admin email leak on unauthenticated status endpoint
+"""
+
+import logging
+from typing import Any
+from unittest.mock import MagicMock
+
+import pytest
+from fastapi import status
+from fastapi.testclient import TestClient
+
+from invokeai.app.api.dependencies import ApiDependencies
+from invokeai.app.api_app import app
+from invokeai.app.services.config.config_default import InvokeAIAppConfig
+from invokeai.app.services.invocation_services import InvocationServices
+from invokeai.app.services.invoker import Invoker
+from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
+from invokeai.app.services.users.users_common import UserCreateRequest
+from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
+from invokeai.backend.util.logging import InvokeAILogger
+from tests.fixtures.sqlite_database import create_mock_sqlite_database
+
+
+class MockApiDependencies(ApiDependencies):
+ invoker: Invoker
+
+ def __init__(self, invoker: Invoker) -> None:
+ self.invoker = invoker
+
+
+WORKFLOW_BODY = {
+ "name": "Test Workflow",
+ "author": "",
+ "description": "",
+ "version": "1.0.0",
+ "contact": "",
+ "tags": "",
+ "notes": "",
+ "nodes": [],
+ "edges": [],
+ "exposedFields": [],
+ "meta": {"version": "3.0.0", "category": "user"},
+ "id": None,
+ "form_fields": [],
+}
+
+
+@pytest.fixture
+def setup_jwt_secret():
+ from invokeai.app.services.auth.token_service import set_jwt_secret
+
+ set_jwt_secret("test-secret-key-for-unit-tests-only-do-not-use-in-production")
+
+
+@pytest.fixture
+def client():
+ return TestClient(app)
+
+
+@pytest.fixture
+def mock_services() -> InvocationServices:
+ from invokeai.app.services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage
+ from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
+ from invokeai.app.services.boards.boards_default import BoardService
+ from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
+ from invokeai.app.services.client_state_persistence.client_state_persistence_sqlite import (
+ ClientStatePersistenceSqlite,
+ )
+ from invokeai.app.services.image_records.image_records_sqlite import SqliteImageRecordStorage
+ from invokeai.app.services.images.images_default import ImageService
+ from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
+ from invokeai.app.services.invocation_stats.invocation_stats_default import InvocationStatsService
+ from invokeai.app.services.users.users_default import UserService
+ from tests.test_nodes import TestEventService
+
+ configuration = InvokeAIAppConfig(use_memory_db=True, node_cache_size=0)
+ logger = InvokeAILogger.get_logger()
+ db = create_mock_sqlite_database(configuration, logger)
+
+ return InvocationServices(
+ board_image_records=SqliteBoardImageRecordStorage(db=db),
+ board_images=None, # type: ignore
+ board_records=SqliteBoardRecordStorage(db=db),
+ boards=BoardService(),
+ bulk_download=BulkDownloadService(),
+ configuration=configuration,
+ events=TestEventService(),
+ image_files=None, # type: ignore
+ image_records=SqliteImageRecordStorage(db=db),
+ images=ImageService(),
+ invocation_cache=MemoryInvocationCache(max_cache_size=0),
+ logger=logging, # type: ignore
+ model_images=None, # type: ignore
+ model_manager=None, # type: ignore
+ download_queue=None, # type: ignore
+ names=None, # type: ignore
+ performance_statistics=InvocationStatsService(),
+ session_processor=None, # type: ignore
+ session_queue=None, # type: ignore
+ urls=None, # type: ignore
+ workflow_records=SqliteWorkflowRecordsStorage(db=db),
+ tensors=None, # type: ignore
+ conditioning=None, # type: ignore
+ style_preset_records=None, # type: ignore
+ style_preset_image_files=None, # type: ignore
+ workflow_thumbnails=None, # type: ignore
+ model_relationship_records=None, # type: ignore
+ model_relationships=None, # type: ignore
+ client_state_persistence=ClientStatePersistenceSqlite(db=db),
+ users=UserService(db),
+ )
+
+
+@pytest.fixture()
+def mock_invoker(mock_services: InvocationServices) -> Invoker:
+ return Invoker(services=mock_services)
+
+
+def _save_image(mock_invoker: Invoker, image_name: str, user_id: str) -> None:
+ """Helper to insert an image record owned by a specific user."""
+ from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
+
+ mock_invoker.services.image_records.save(
+ image_name=image_name,
+ image_origin=ResourceOrigin.INTERNAL,
+ image_category=ImageCategory.GENERAL,
+ width=100,
+ height=100,
+ has_workflow=False,
+ user_id=user_id,
+ )
+
+
+def _create_user(mock_invoker: Invoker, email: str, display_name: str, is_admin: bool = False) -> str:
+ user = mock_invoker.services.users.create(
+ UserCreateRequest(email=email, display_name=display_name, password="TestPass123", is_admin=is_admin)
+ )
+ return user.user_id
+
+
+def _login(client: TestClient, email: str) -> str:
+ r = client.post("/api/v1/auth/login", json={"email": email, "password": "TestPass123", "remember_me": False})
+ assert r.status_code == 200
+ return r.json()["token"]
+
+
+def _auth(token: str) -> dict[str, str]:
+ return {"Authorization": f"Bearer {token}"}
+
+
+@pytest.fixture
+def enable_multiuser(monkeypatch: Any, mock_invoker: Invoker):
+ mock_invoker.services.configuration.multiuser = True
+
+ mock_board_images = MagicMock()
+ mock_board_images.get_all_board_image_names_for_board.return_value = []
+ mock_invoker.services.board_images = mock_board_images
+
+ mock_workflow_thumbnails = MagicMock()
+ mock_workflow_thumbnails.get_url.return_value = None
+ mock_invoker.services.workflow_thumbnails = mock_workflow_thumbnails
+
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.auth.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.auth_dependencies.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.boards.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.board_images.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.images.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.workflows.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.session_queue.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.recall_parameters.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.model_manager.ApiDependencies", mock_deps)
+ yield
+
+
+@pytest.fixture
+def admin_token(setup_jwt_secret: None, enable_multiuser: Any, mock_invoker: Invoker, client: TestClient):
+ _create_user(mock_invoker, "admin@test.com", "Admin", is_admin=True)
+ return _login(client, "admin@test.com")
+
+
+@pytest.fixture
+def user1_token(enable_multiuser: Any, mock_invoker: Invoker, client: TestClient, admin_token: str):
+ _create_user(mock_invoker, "user1@test.com", "User One")
+ return _login(client, "user1@test.com")
+
+
+@pytest.fixture
+def user2_token(enable_multiuser: Any, mock_invoker: Invoker, client: TestClient, admin_token: str):
+ _create_user(mock_invoker, "user2@test.com", "User Two")
+ return _login(client, "user2@test.com")
+
+
+def _create_board(client: TestClient, token: str, name: str = "Test Board") -> str:
+ r = client.post(f"/api/v1/boards/?board_name={name.replace(' ', '+')}", headers=_auth(token))
+ assert r.status_code == status.HTTP_201_CREATED
+ return r.json()["board_id"]
+
+
+def _share_board(client: TestClient, token: str, board_id: str) -> None:
+ r = client.patch(f"/api/v1/boards/{board_id}", json={"board_visibility": "shared"}, headers=_auth(token))
+ assert r.status_code == status.HTTP_201_CREATED
+
+
+def _set_board_visibility(client: TestClient, token: str, board_id: str, visibility: str) -> None:
+ r = client.patch(f"/api/v1/boards/{board_id}", json={"board_visibility": visibility}, headers=_auth(token))
+ assert r.status_code == status.HTTP_201_CREATED
+
+
+def _create_workflow(client: TestClient, token: str) -> str:
+ r = client.post("/api/v1/workflows/", json={"workflow": WORKFLOW_BODY}, headers=_auth(token))
+ assert r.status_code == 200
+ return r.json()["workflow_id"]
+
+
+# ===========================================================================
+# 1. Board-image mutation authorization
+# ===========================================================================
+
+
+class TestBoardImageMutationAuth:
+ """Tests that board_images mutation endpoints enforce ownership."""
+
+ def test_add_image_to_board_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/board_images/", json={"board_id": "x", "image_name": "y"})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_add_image_to_board_batch_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/board_images/batch", json={"board_id": "x", "image_names": ["y"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_remove_image_from_board_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.request("DELETE", "/api/v1/board_images/", json={"image_name": "y"})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_remove_images_from_board_batch_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/board_images/batch/delete", json={"image_names": ["y"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_non_owner_cannot_add_image_to_shared_board(self, client: TestClient, user1_token: str, user2_token: str):
+ board_id = _create_board(client, user1_token, "User1 Shared Board")
+ _share_board(client, user1_token, board_id)
+
+ r = client.post(
+ "/api/v1/board_images/",
+ json={"board_id": board_id, "image_name": "some-image"},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_add_images_batch_to_shared_board(
+ self, client: TestClient, user1_token: str, user2_token: str
+ ):
+ board_id = _create_board(client, user1_token, "User1 Shared Board Batch")
+ _share_board(client, user1_token, board_id)
+
+ r = client.post(
+ "/api/v1/board_images/batch",
+ json={"board_id": board_id, "image_names": ["img1", "img2"]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_admin_can_add_image_to_any_board(
+ self, client: TestClient, mock_invoker: Invoker, admin_token: str, user1_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-admin-board-img", user1.user_id)
+ board_id = _create_board(client, user1_token, "User1 Board For Admin")
+
+ # Admin can add any image to any board — should not be 403
+ r = client.post(
+ "/api/v1/board_images/",
+ json={"board_id": board_id, "image_name": "user1-admin-board-img"},
+ headers=_auth(admin_token),
+ )
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_can_add_own_image_to_public_board(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """Public boards are documented as writable by other authenticated users."""
+ public_board_id = _create_board(client, user1_token, "User1 Public Board")
+ _set_board_visibility(client, user1_token, public_board_id, "public")
+
+ user2 = mock_invoker.services.users.get_by_email("user2@test.com")
+ assert user2 is not None
+ _save_image(mock_invoker, "user2-public-board-img", user2.user_id)
+
+ r = client.post(
+ "/api/v1/board_images/",
+ json={"board_id": public_board_id, "image_name": "user2-public-board-img"},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_201_CREATED
+
+ def test_owner_can_add_image_to_own_board(self, client: TestClient, mock_invoker: Invoker, user1_token: str):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-own-board-img", user1.user_id)
+ board_id = _create_board(client, user1_token, "User1 Own Board")
+
+ r = client.post(
+ "/api/v1/board_images/",
+ json={"board_id": board_id, "image_name": "user1-own-board-img"},
+ headers=_auth(user1_token),
+ )
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_add_other_users_image_to_own_board(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """Attacker creates their own board, then tries to add victim's image to it.
+ This must be rejected — otherwise the attacker gains mutation rights via
+ the board-ownership fallback in _assert_image_owner."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "victim-image", user1.user_id)
+
+ attacker_board = _create_board(client, user2_token, "Attacker Board")
+
+ r = client.post(
+ "/api/v1/board_images/",
+ json={"board_id": attacker_board, "image_name": "victim-image"},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_batch_add_other_users_images_to_own_board(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """Same attack via the batch endpoint."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "victim-batch-img", user1.user_id)
+
+ attacker_board = _create_board(client, user2_token, "Attacker Batch Board")
+
+ r = client.post(
+ "/api/v1/board_images/batch",
+ json={"board_id": attacker_board, "image_names": ["victim-batch-img"]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+
+# ===========================================================================
+# 2a. Image read-access authorization
+# ===========================================================================
+
+
+class TestImageReadAuth:
+ """Tests that image GET endpoints enforce visibility."""
+
+ def test_get_image_dto_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/images/i/some-image")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_get_image_metadata_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/images/i/some-image/metadata")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_get_image_full_is_unauthenticated(self, enable_multiuser: Any, client: TestClient):
+ # Binary image endpoints are intentionally unauthenticated because
+ # browsers load them via
which cannot send Bearer tokens.
+ r = client.get("/api/v1/images/i/some-image/full")
+ assert r.status_code != status.HTTP_401_UNAUTHORIZED
+
+ def test_get_image_thumbnail_is_unauthenticated(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/images/i/some-image/thumbnail")
+ assert r.status_code != status.HTTP_401_UNAUTHORIZED
+
+ def test_get_image_urls_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/images/i/some-image/urls")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_non_owner_cannot_read_private_image(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 should not be able to read user1's image that is not on a shared board."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-private-img", user1.user_id)
+
+ r = client.get("/api/v1/images/i/user1-private-img", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_owner_can_read_own_image(self, client: TestClient, mock_invoker: Invoker, user1_token: str):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-readable", user1.user_id)
+
+ r = client.get("/api/v1/images/i/user1-readable", headers=_auth(user1_token))
+ # Should not be 403 (may be 404/500 due to missing board_image_records mock)
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_admin_can_read_any_image(
+ self, client: TestClient, mock_invoker: Invoker, admin_token: str, user1_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-admin-read", user1.user_id)
+
+ r = client.get("/api/v1/images/i/user1-admin-read", headers=_auth(admin_token))
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_shared_board_image_readable_by_other_user(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """An image on a shared board should be readable by any authenticated user."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "shared-board-img", user1.user_id)
+
+ # Create a shared board and add the image to it
+ board_id = _create_board(client, user1_token, "Shared Read Board")
+ _share_board(client, user1_token, board_id)
+ mock_invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name="shared-board-img")
+
+ r = client.get("/api/v1/images/i/shared-board-img", headers=_auth(user2_token))
+ # Should not be 403 — image is on a shared board
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_read_image_metadata(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-meta-blocked", user1.user_id)
+
+ r = client.get("/api/v1/images/i/user1-meta-blocked/metadata", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_list_images_private_board_rejected_for_non_owner(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 must not be able to enumerate images on user1's private board
+ via GET /api/v1/images?board_id=..."""
+ board_id = _create_board(client, user1_token, "Private Enum Board")
+
+ r = client.get(f"/api/v1/images/?board_id={board_id}", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_list_images_shared_board_allowed_for_non_owner(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 should be able to list images on user1's shared board."""
+ board_id = _create_board(client, user1_token, "Shared Enum Board")
+ _share_board(client, user1_token, board_id)
+
+ r = client.get(f"/api/v1/images/?board_id={board_id}", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_200_OK
+
+ def test_get_image_names_private_board_rejected_for_non_owner(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 must not be able to enumerate image names on user1's private board
+ via GET /api/v1/images/names?board_id=..."""
+ board_id = _create_board(client, user1_token, "Private Names Board")
+
+ r = client.get(f"/api/v1/images/names?board_id={board_id}", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_get_image_names_shared_board_allowed_for_non_owner(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 should be able to list image names on user1's shared board."""
+ board_id = _create_board(client, user1_token, "Shared Names Board")
+ _share_board(client, user1_token, board_id)
+
+ r = client.get(f"/api/v1/images/names?board_id={board_id}", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_200_OK
+
+ def test_list_images_own_private_board_allowed(self, client: TestClient, mock_invoker: Invoker, user1_token: str):
+ """Owner should be able to list images on their own private board."""
+ board_id = _create_board(client, user1_token, "Own Private Board")
+
+ r = client.get(f"/api/v1/images/?board_id={board_id}", headers=_auth(user1_token))
+ assert r.status_code == status.HTTP_200_OK
+
+ def test_admin_can_list_images_on_any_board(
+ self, client: TestClient, mock_invoker: Invoker, admin_token: str, user1_token: str
+ ):
+ """Admin should be able to list images on any board."""
+ board_id = _create_board(client, user1_token, "Admin Enum Board")
+
+ r = client.get(f"/api/v1/images/?board_id={board_id}", headers=_auth(admin_token))
+ assert r.status_code == status.HTTP_200_OK
+
+
+# ===========================================================================
+# 2b. Image mutation authorization
+# ===========================================================================
+
+
+class TestImageUploadAuth:
+ """Tests that image upload enforces board ownership."""
+
+ def test_upload_to_other_users_shared_board_forbidden(self, client: TestClient, user1_token: str, user2_token: str):
+ """A user should not be able to upload an image into another user's shared board."""
+ board_id = _create_board(client, user1_token, "User1 Shared Upload Board")
+ _share_board(client, user1_token, board_id)
+
+ # user2 tries to upload into user1's shared board
+ import io
+
+ fake_image = io.BytesIO(b"\x89PNG\r\n\x1a\n" + b"\x00" * 100)
+ r = client.post(
+ f"/api/v1/images/upload?image_category=general&is_intermediate=false&board_id={board_id}",
+ files={"file": ("test.png", fake_image, "image/png")},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_owner_can_upload_to_own_shared_board(self, client: TestClient, user1_token: str):
+ board_id = _create_board(client, user1_token, "User1 Own Upload Board")
+ _share_board(client, user1_token, board_id)
+
+ import io
+
+ fake_image = io.BytesIO(b"\x89PNG\r\n\x1a\n" + b"\x00" * 100)
+ r = client.post(
+ f"/api/v1/images/upload?image_category=general&is_intermediate=false&board_id={board_id}",
+ files={"file": ("test.png", fake_image, "image/png")},
+ headers=_auth(user1_token),
+ )
+ # Should not be 403 (may fail for other reasons in test env)
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_can_upload_to_public_board(self, client: TestClient, user1_token: str, user2_token: str):
+ """Public boards allow any authenticated user to upload images."""
+ board_id = _create_board(client, user1_token, "User1 Public Upload Board")
+ _set_board_visibility(client, user1_token, board_id, "public")
+
+ import io
+
+ fake_image = io.BytesIO(b"\x89PNG\r\n\x1a\n" + b"\x00" * 100)
+ r = client.post(
+ f"/api/v1/images/upload?image_category=general&is_intermediate=false&board_id={board_id}",
+ files={"file": ("test.png", fake_image, "image/png")},
+ headers=_auth(user2_token),
+ )
+ # Should not be 403 (may fail downstream for other reasons in test env)
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+
+class TestImageMutationAuth:
+ """Tests that image mutation endpoints enforce ownership."""
+
+ def test_delete_image_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.delete("/api/v1/images/i/some-image")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_update_image_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.patch("/api/v1/images/i/some-image", json={"starred": True})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_batch_delete_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/images/delete", json={"image_names": ["x"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_star_images_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/images/star", json={"image_names": ["x"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_unstar_images_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/images/unstar", json={"image_names": ["x"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_clear_intermediates_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.delete("/api/v1/images/intermediates")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_delete_uncategorized_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.delete("/api/v1/images/uncategorized")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_non_owner_cannot_delete_image(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 should not be able to delete user1's image."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-image", user1.user_id)
+
+ r = client.delete("/api/v1/images/i/user1-image", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_owner_can_delete_own_image(self, client: TestClient, mock_invoker: Invoker, user1_token: str):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-delete-me", user1.user_id)
+
+ r = client.delete("/api/v1/images/i/user1-delete-me", headers=_auth(user1_token))
+ # Should not be 403 (may be 200 or 500 depending on file system)
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_admin_can_delete_any_image(
+ self, client: TestClient, mock_invoker: Invoker, admin_token: str, user1_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-admin-delete", user1.user_id)
+
+ r = client.delete("/api/v1/images/i/user1-admin-delete", headers=_auth(admin_token))
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_board_owner_can_delete_image_on_own_board(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str
+ ):
+ """Board owner should be able to delete images on their board even if
+ the image's user_id is 'system' (e.g. generated images)."""
+ # Create image owned by "system" (simulates queue-generated image)
+ _save_image(mock_invoker, "system-img-on-board", "system")
+
+ # Create a board owned by user1 and add the image to it
+ board_id = _create_board(client, user1_token, "User1 Board With System Img")
+ mock_invoker.services.board_image_records.add_image_to_board(
+ board_id=board_id, image_name="system-img-on-board"
+ )
+
+ r = client.delete("/api/v1/images/i/system-img-on-board", headers=_auth(user1_token))
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_update_image(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-no-star", user1.user_id)
+
+ r = client.patch(
+ "/api/v1/images/i/user1-no-star",
+ json={"starred": True},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_star_image(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-star-blocked", user1.user_id)
+
+ r = client.post(
+ "/api/v1/images/star",
+ json={"image_names": ["user1-star-blocked"]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_cannot_batch_delete_image(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-batch-del", user1.user_id)
+
+ r = client.post(
+ "/api/v1/images/delete",
+ json={"image_names": ["user1-batch-del"]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_non_owner_can_delete_image_from_public_board(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """Public-board semantics promise delete access to images contained in the board."""
+ public_board_id = _create_board(client, user1_token, "User1 Public Delete Board")
+ _set_board_visibility(client, user1_token, public_board_id, "public")
+
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-public-delete", user1.user_id)
+ mock_invoker.services.board_image_records.add_image_to_board(public_board_id, "user1-public-delete")
+
+ r = client.delete(
+ "/api/v1/images/i/user1-public-delete",
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_200_OK
+
+ def test_clear_intermediates_non_admin_forbidden(self, client: TestClient, user1_token: str):
+ r = client.delete("/api/v1/images/intermediates", headers=_auth(user1_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_get_intermediates_count_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/images/intermediates")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_download_images_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/images/download", json={"image_names": ["x"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_non_owner_cannot_fetch_existing_bulk_download_item(
+ self,
+ client: TestClient,
+ mock_invoker: Invoker,
+ monkeypatch: Any,
+ tmp_path: Any,
+ user1_token: str,
+ user2_token: str,
+ ):
+ """A bulk download zip should be fetchable only by its owner."""
+ from fastapi import BackgroundTasks
+
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+
+ mock_file = tmp_path / "owned-download.zip"
+ mock_file.write_text("contents")
+
+ monkeypatch.setattr(mock_invoker.services.bulk_download, "get_path", lambda _: str(mock_file))
+ monkeypatch.setattr(mock_invoker.services.bulk_download, "get_owner", lambda _: user1.user_id)
+ monkeypatch.setattr(BackgroundTasks, "add_task", lambda *args, **kwargs: None)
+
+ r = client.get("/api/v1/images/download/owned-download.zip", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_images_by_names_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/images/images_by_names", json={"image_names": ["x"]})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_images_by_names_filters_unauthorized(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """images_by_names should silently skip images the caller cannot access."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-by-name", user1.user_id)
+
+ r = client.post(
+ "/api/v1/images/images_by_names",
+ json={"image_names": ["user1-by-name"]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == 200
+ # user2 should get an empty list — the image belongs to user1
+ assert r.json() == []
+
+ def test_none_board_image_names_only_return_callers_uncategorized_images(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """The uncategorized-images sentinel must not expose other users' image names."""
+ mock_invoker.services.board_images.get_all_board_image_names_for_board.side_effect = (
+ mock_invoker.services.board_image_records.get_all_board_image_names_for_board
+ )
+
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ user2 = mock_invoker.services.users.get_by_email("user2@test.com")
+ assert user1 is not None
+ assert user2 is not None
+
+ _save_image(mock_invoker, "user1-uncategorized-private", user1.user_id)
+ _save_image(mock_invoker, "user2-uncategorized-private", user2.user_id)
+
+ r = client.get("/api/v1/boards/none/image_names", headers=_auth(user2_token))
+ assert r.status_code == status.HTTP_200_OK
+ assert "user2-uncategorized-private" in r.json()
+ assert "user1-uncategorized-private" not in r.json()
+
+
+# ===========================================================================
+# 3. Workflow mutation authorization (additional)
+# ===========================================================================
+
+
+class TestWorkflowListScoping:
+ """Tests that listing workflows in multiuser mode does not filter out default workflows."""
+
+ def test_default_workflows_visible_when_listing_user_and_default(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str
+ ):
+ """When categories=['user','default'], default workflows must still appear even
+ though user_id_filter is set to the current user (default workflows belong to 'system')."""
+ from invokeai.app.services.workflow_records.workflow_records_common import (
+ Workflow,
+ WorkflowCategory,
+ WorkflowMeta,
+ WorkflowWithoutID,
+ )
+ from invokeai.app.util.misc import uuid_string
+
+ default_wf = WorkflowWithoutID(
+ name="Test Default Workflow",
+ description="A built-in workflow",
+ meta=WorkflowMeta(version="3.0.0", category=WorkflowCategory.Default),
+ nodes=[],
+ edges=[],
+ tags="",
+ author="",
+ contact="",
+ version="1.0.0",
+ notes="",
+ exposedFields=[],
+ form_fields=[],
+ )
+ wf_with_id = Workflow(**default_wf.model_dump(), id=uuid_string())
+ # Insert directly via DB since the create API rejects default workflows
+ with mock_invoker.services.workflow_records._db.transaction() as cursor:
+ cursor.execute(
+ "INSERT INTO workflow_library (workflow_id, workflow, user_id) VALUES (?, ?, ?)",
+ (wf_with_id.id, wf_with_id.model_dump_json(), "system"),
+ )
+
+ # Also create a user workflow via the API
+ _create_workflow(client, user1_token)
+
+ # List with categories=user&categories=default
+ r = client.get(
+ "/api/v1/workflows/?categories=user&categories=default",
+ headers=_auth(user1_token),
+ )
+ assert r.status_code == 200
+ data = r.json()
+ categories_found = {item["category"] for item in data["items"]}
+ assert "default" in categories_found, (
+ f"Default workflows were filtered out. Categories found: {categories_found}"
+ )
+ assert "user" in categories_found
+
+ def test_default_workflows_visible_when_no_category_filter(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str
+ ):
+ """When no categories filter is given, default workflows should still appear."""
+ from invokeai.app.services.workflow_records.workflow_records_common import (
+ Workflow,
+ WorkflowCategory,
+ WorkflowMeta,
+ WorkflowWithoutID,
+ )
+ from invokeai.app.util.misc import uuid_string
+
+ default_wf = WorkflowWithoutID(
+ name="Another Default Workflow",
+ description="Built-in",
+ meta=WorkflowMeta(version="3.0.0", category=WorkflowCategory.Default),
+ nodes=[],
+ edges=[],
+ tags="",
+ author="",
+ contact="",
+ version="1.0.0",
+ notes="",
+ exposedFields=[],
+ form_fields=[],
+ )
+ wf_with_id = Workflow(**default_wf.model_dump(), id=uuid_string())
+ with mock_invoker.services.workflow_records._db.transaction() as cursor:
+ cursor.execute(
+ "INSERT INTO workflow_library (workflow_id, workflow, user_id) VALUES (?, ?, ?)",
+ (wf_with_id.id, wf_with_id.model_dump_json(), "system"),
+ )
+
+ _create_workflow(client, user1_token)
+
+ r = client.get("/api/v1/workflows/", headers=_auth(user1_token))
+ assert r.status_code == 200
+ data = r.json()
+ categories_found = {item["category"] for item in data["items"]}
+ assert "default" in categories_found, (
+ f"Default workflows were filtered out. Categories found: {categories_found}"
+ )
+
+
+class TestWorkflowMutationAuth:
+ """Tests for additional workflow mutation endpoints."""
+
+ def test_update_opened_at_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.put("/api/v1/workflows/i/some-id/opened_at")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_non_owner_cannot_update_opened_at(self, client: TestClient, user1_token: str, user2_token: str):
+ workflow_id = _create_workflow(client, user1_token)
+ r = client.put(
+ f"/api/v1/workflows/i/{workflow_id}/opened_at",
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_owner_can_update_opened_at(self, client: TestClient, user1_token: str):
+ workflow_id = _create_workflow(client, user1_token)
+ r = client.put(
+ f"/api/v1/workflows/i/{workflow_id}/opened_at",
+ headers=_auth(user1_token),
+ )
+ assert r.status_code == 200
+
+
+# ===========================================================================
+# 4. Workflow thumbnail authorization
+# ===========================================================================
+
+
+class TestWorkflowThumbnailAuth:
+ """Tests for the workflow thumbnail GET endpoint.
+
+ Workflow and image thumbnail endpoints are intentionally unauthenticated
+ because browsers load them via
tags which cannot send Bearer
+ tokens. IDs are UUIDs, providing security through unguessability.
+ """
+
+ def test_thumbnail_is_unauthenticated(self, enable_multiuser: Any, client: TestClient):
+ # Binary image endpoints don't require auth — loaded via
+ r = client.get("/api/v1/workflows/i/some-workflow/thumbnail")
+ assert r.status_code != status.HTTP_401_UNAUTHORIZED
+
+
+# ===========================================================================
+# 4. Admin email leak prevention
+# ===========================================================================
+
+
+class TestAdminEmailLeak:
+ """Tests that the auth status endpoint does not leak admin email."""
+
+ def test_status_does_not_leak_admin_email_when_setup_complete(self, client: TestClient, admin_token: str):
+ """After setup is complete, admin_email must be null."""
+ r = client.get("/api/v1/auth/status")
+ assert r.status_code == 200
+ data = r.json()
+ assert data["multiuser_enabled"] is True
+ assert data["setup_required"] is False
+ assert data["admin_email"] is None
+
+ def test_status_returns_admin_email_during_setup(
+ self, setup_jwt_secret: None, enable_multiuser: Any, mock_invoker: Invoker, client: TestClient
+ ):
+ """Before any admin exists, setup_required=True and admin_email may be returned."""
+ # Don't create any users -- setup_required should be True
+ r = client.get("/api/v1/auth/status")
+ assert r.status_code == 200
+ data = r.json()
+ assert data["setup_required"] is True
+ # admin_email is null here because no admin exists yet, which is correct
+
+ def test_status_no_leak_in_single_user_mode(
+ self, setup_jwt_secret: None, monkeypatch: Any, mock_invoker: Invoker, client: TestClient
+ ):
+ """In single-user mode, admin_email should always be null."""
+ mock_invoker.services.configuration.multiuser = False
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.auth.ApiDependencies", mock_deps)
+
+ r = client.get("/api/v1/auth/status")
+ assert r.status_code == 200
+ data = r.json()
+ assert data["admin_email"] is None
+ assert data["multiuser_enabled"] is False
+
+
+# ===========================================================================
+# 6. Session queue authorization
+# ===========================================================================
+
+
+class TestSessionQueueAuth:
+ """Tests that session queue endpoints enforce authentication."""
+
+ def test_get_queue_item_ids_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/queue/default/item_ids")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_get_current_queue_item_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/queue/default/current")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_get_next_queue_item_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/queue/default/next")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_get_batch_status_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/queue/default/b/some-batch/status")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_counts_by_destination_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/queue/default/counts_by_destination?destination=canvas")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+
+# ===========================================================================
+# 6b. Session queue sanitization (cross-user isolation)
+# ===========================================================================
+
+
+class TestSessionQueueSanitization:
+ """Tests that sanitize_queue_item_for_user strips all sensitive fields
+ from queue items viewed by non-owner, non-admin users."""
+
+ @pytest.fixture
+ def _sample_queue_item(self):
+ from invokeai.app.services.shared.graph import Graph, GraphExecutionState
+
+ return SessionQueueItem(
+ item_id=42,
+ status="pending",
+ priority=10,
+ batch_id="batch-abc",
+ origin="workflows",
+ destination="canvas",
+ session_id="sess-123",
+ session=GraphExecutionState(id="sess-123", graph=Graph()),
+ error_type="RuntimeError",
+ error_message="something broke",
+ error_traceback="Traceback ...",
+ created_at="2026-01-01T00:00:00",
+ updated_at="2026-01-01T01:00:00",
+ started_at="2026-01-01T00:30:00",
+ completed_at=None,
+ queue_id="default",
+ user_id="owner-user",
+ user_display_name="Owner Display",
+ user_email="owner@test.com",
+ field_values=None,
+ workflow=None,
+ )
+
+ def test_owner_sees_all_fields(self, _sample_queue_item: SessionQueueItem):
+ from invokeai.app.api.routers.session_queue import sanitize_queue_item_for_user
+
+ result = sanitize_queue_item_for_user(_sample_queue_item, "owner-user", is_admin=False)
+ assert result.user_id == "owner-user"
+ assert result.user_display_name == "Owner Display"
+ assert result.user_email == "owner@test.com"
+ assert result.batch_id == "batch-abc"
+ assert result.origin == "workflows"
+ assert result.destination == "canvas"
+ assert result.session_id == "sess-123"
+ assert result.priority == 10
+
+ def test_admin_sees_all_fields(self, _sample_queue_item: SessionQueueItem):
+ from invokeai.app.api.routers.session_queue import sanitize_queue_item_for_user
+
+ result = sanitize_queue_item_for_user(_sample_queue_item, "admin-user", is_admin=True)
+ assert result.user_id == "owner-user"
+ assert result.user_display_name == "Owner Display"
+ assert result.user_email == "owner@test.com"
+ assert result.batch_id == "batch-abc"
+
+ def test_non_owner_sees_only_status_timestamps_errors(self, _sample_queue_item: SessionQueueItem):
+ from invokeai.app.api.routers.session_queue import sanitize_queue_item_for_user
+
+ result = sanitize_queue_item_for_user(_sample_queue_item, "other-user", is_admin=False)
+
+ # Preserved: item_id, queue_id, status, timestamps
+ assert result.item_id == 42
+ assert result.queue_id == "default"
+ assert result.status == "pending"
+ assert result.created_at == "2026-01-01T00:00:00"
+ assert result.updated_at == "2026-01-01T01:00:00"
+ assert result.started_at == "2026-01-01T00:30:00"
+ assert result.completed_at is None
+
+ # Stripped: errors (may leak file paths, prompts, model names)
+ assert result.error_type is None
+ assert result.error_message is None
+ assert result.error_traceback is None
+
+ # Stripped: user identity
+ assert result.user_id == "redacted"
+ assert result.user_display_name is None
+ assert result.user_email is None
+
+ # Stripped: generation metadata
+ assert result.batch_id == "redacted"
+ assert result.session_id == "redacted"
+ assert result.origin is None
+ assert result.destination is None
+ assert result.priority == 0
+ assert result.field_values is None
+ assert result.retried_from_item_id is None
+ assert result.workflow is None
+ assert result.session.id == "redacted"
+ assert len(result.session.graph.nodes) == 0
+
+ def test_sanitization_does_not_mutate_original(self, _sample_queue_item: SessionQueueItem):
+ from invokeai.app.api.routers.session_queue import sanitize_queue_item_for_user
+
+ sanitize_queue_item_for_user(_sample_queue_item, "other-user", is_admin=False)
+ # Original should be unchanged
+ assert _sample_queue_item.user_id == "owner-user"
+ assert _sample_queue_item.user_email == "owner@test.com"
+ assert _sample_queue_item.batch_id == "batch-abc"
+
+
+# ===========================================================================
+# 7. Recall parameters authorization
+# ===========================================================================
+
+
+class TestRecallParametersAuth:
+ """Tests that recall parameter endpoints enforce authentication."""
+
+ def test_get_recall_parameters_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v1/recall/default")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_update_recall_parameters_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v1/recall/default", json={"positive_prompt": "test"})
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+
+# ===========================================================================
+# 7a2. Recall parameters image access control
+# ===========================================================================
+
+
+class TestRecallImageAccess:
+ """Tests that recall parameter image references are validated for read access."""
+
+ def test_recall_controlnet_with_other_users_image_rejected(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 must not be able to reference user1's private image in a control layer."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "victim-ctrl-img", user1.user_id)
+
+ r = client.post(
+ "/api/v1/recall/default",
+ json={"control_layers": [{"model_name": "some-controlnet", "image_name": "victim-ctrl-img"}]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_recall_ip_adapter_with_other_users_image_rejected(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 must not be able to reference user1's private image in an IP adapter."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "victim-ip-img", user1.user_id)
+
+ r = client.post(
+ "/api/v1/recall/default",
+ json={"ip_adapters": [{"model_name": "some-ip-adapter", "image_name": "victim-ip-img"}]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_recall_own_image_allowed(self, client: TestClient, mock_invoker: Invoker, user1_token: str):
+ """Owner should be able to reference their own image in recall parameters."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "own-ctrl-img", user1.user_id)
+
+ r = client.post(
+ "/api/v1/recall/default",
+ json={"control_layers": [{"model_name": "some-controlnet", "image_name": "own-ctrl-img"}]},
+ headers=_auth(user1_token),
+ )
+ # Should not be 403 (may fail downstream for other reasons, e.g. model not found)
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_recall_shared_board_image_allowed(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """An image on a shared board should be usable in recall by any user."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "shared-recall-img", user1.user_id)
+
+ board_id = _create_board(client, user1_token, "Shared Recall Board")
+ _share_board(client, user1_token, board_id)
+ mock_invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name="shared-recall-img")
+
+ r = client.post(
+ "/api/v1/recall/default",
+ json={"ip_adapters": [{"model_name": "some-ip-adapter", "image_name": "shared-recall-img"}]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+ def test_recall_admin_can_reference_any_image(
+ self, client: TestClient, mock_invoker: Invoker, admin_token: str, user1_token: str
+ ):
+ """Admin should be able to reference any user's image."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "admin-recall-img", user1.user_id)
+
+ r = client.post(
+ "/api/v1/recall/default",
+ json={"control_layers": [{"model_name": "some-controlnet", "image_name": "admin-recall-img"}]},
+ headers=_auth(admin_token),
+ )
+ assert r.status_code != status.HTTP_403_FORBIDDEN
+
+
+# ===========================================================================
+# 7b. Recall parameters cross-user isolation
+# ===========================================================================
+
+
+class TestRecallParametersIsolation:
+ """Tests that recall parameters are scoped per-user, not globally by queue_id."""
+
+ def test_user1_write_does_not_leak_to_user2(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User1 sets a recall parameter; user2 should not see it in client state."""
+ # user1 writes a recall parameter
+ r = client.post(
+ "/api/v1/recall/default",
+ json={"positive_prompt": "user1 secret prompt"},
+ headers=_auth(user1_token),
+ )
+ assert r.status_code == 200
+
+ # Verify that user1's data is stored under user1's user_id, not the queue_id
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ user2 = mock_invoker.services.users.get_by_email("user2@test.com")
+ assert user1 is not None
+ assert user2 is not None
+
+ # user1 should have the value
+ val = mock_invoker.services.client_state_persistence.get_by_key(user1.user_id, "recall_positive_prompt")
+ assert val is not None
+ assert "user1 secret prompt" in val
+
+ # user2 should NOT have the value
+ val2 = mock_invoker.services.client_state_persistence.get_by_key(user2.user_id, "recall_positive_prompt")
+ assert val2 is None
+
+ def test_two_users_independent_state(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """Both users can write recall params independently without overwriting each other."""
+ r1 = client.post(
+ "/api/v1/recall/default",
+ json={"positive_prompt": "prompt from user1"},
+ headers=_auth(user1_token),
+ )
+ assert r1.status_code == 200
+
+ r2 = client.post(
+ "/api/v1/recall/default",
+ json={"positive_prompt": "prompt from user2"},
+ headers=_auth(user2_token),
+ )
+ assert r2.status_code == 200
+
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ user2 = mock_invoker.services.users.get_by_email("user2@test.com")
+ assert user1 is not None
+ assert user2 is not None
+
+ val1 = mock_invoker.services.client_state_persistence.get_by_key(user1.user_id, "recall_positive_prompt")
+ val2 = mock_invoker.services.client_state_persistence.get_by_key(user2.user_id, "recall_positive_prompt")
+ assert val1 is not None and "prompt from user1" in val1
+ assert val2 is not None and "prompt from user2" in val2
+
+
+# ===========================================================================
+# 9. Recall parameters event user scoping
+# ===========================================================================
+
+
+class TestRecallParametersEventScoping:
+ """Tests that RecallParametersUpdatedEvent carries user_id for targeted delivery."""
+
+ def test_event_includes_user_id(self):
+ """RecallParametersUpdatedEvent.build() must set user_id so the socket handler
+ can route the event to the correct user room instead of broadcasting."""
+ from invokeai.app.services.events.events_common import RecallParametersUpdatedEvent
+
+ event = RecallParametersUpdatedEvent.build(
+ queue_id="default",
+ user_id="user-abc",
+ parameters={"positive_prompt": "test"},
+ )
+ assert event.queue_id == "default"
+ assert event.user_id == "user-abc"
+ assert event.parameters == {"positive_prompt": "test"}
+
+ def test_event_not_broadcast_to_all_queue_subscribers(self):
+ """RecallParametersUpdatedEvent must have a user_id field so _handle_queue_event
+ in sockets.py can route it to the owner room + admin room, not the queue room."""
+ from invokeai.app.services.events.events_common import RecallParametersUpdatedEvent
+
+ event = RecallParametersUpdatedEvent.build(
+ queue_id="default",
+ user_id="owner-123",
+ parameters={"seed": 42},
+ )
+ # The event must carry user_id; without it the socket handler would
+ # fall through to the generic else branch and broadcast to all subscribers
+ assert hasattr(event, "user_id")
+ assert event.user_id == "owner-123"
+
+
+# ===========================================================================
+# 10. Queue status endpoint scoping
+# ===========================================================================
+
+
+class TestQueueStatusScoping:
+ """Tests that queue status, batch status, and counts_by_destination
+ endpoints scope data to the current user for non-admin callers."""
+
+ def test_get_queue_status_hides_current_item_for_non_owner(self):
+ """get_queue_status() must not expose current item details to non-owner, non-admin users."""
+ from invokeai.app.services.session_queue.session_queue_common import SessionQueueStatus
+
+ # Simulate a status where the current item belongs to another user
+ # When user_id is provided and doesn't match, item details should be None
+ status_obj = SessionQueueStatus(
+ queue_id="default",
+ item_id=None, # hidden because user doesn't own current item
+ session_id=None,
+ batch_id=None,
+ pending=2,
+ in_progress=0,
+ completed=1,
+ failed=0,
+ canceled=0,
+ total=3,
+ )
+ # Verify the model accepts None for item details
+ assert status_obj.item_id is None
+ assert status_obj.session_id is None
+ assert status_obj.batch_id is None
+
+ def test_session_queue_status_no_user_fields(self):
+ """SessionQueueStatus should not have user_pending/user_in_progress fields anymore.
+ Non-admin users now get their own counts in the main pending/in_progress fields."""
+ from invokeai.app.services.session_queue.session_queue_common import SessionQueueStatus
+
+ fields = set(SessionQueueStatus.model_fields.keys())
+ assert "user_pending" not in fields
+ assert "user_in_progress" not in fields
+
+
+# ===========================================================================
+# 10b. Model install job authorization
+# ===========================================================================
+
+
+class TestModelInstallAuth:
+ """Tests that model install job endpoints require admin authentication."""
+
+ def test_list_model_installs_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v2/models/install")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_get_model_install_job_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.get("/api/v2/models/install/1")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_pause_model_install_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v2/models/install/1/pause")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_resume_model_install_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v2/models/install/1/resume")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_restart_failed_model_install_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v2/models/install/1/restart_failed")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_restart_model_install_file_requires_auth(self, enable_multiuser: Any, client: TestClient):
+ r = client.post("/api/v2/models/install/1/restart_file", json="https://example.com/model.safetensors")
+ assert r.status_code == status.HTTP_401_UNAUTHORIZED
+
+ def test_non_admin_cannot_list_model_installs(self, enable_multiuser: Any, client: TestClient, user1_token: str):
+ r = client.get("/api/v2/models/install", headers=_auth(user1_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_non_admin_cannot_pause_model_install(self, enable_multiuser: Any, client: TestClient, user1_token: str):
+ r = client.post("/api/v2/models/install/1/pause", headers=_auth(user1_token))
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+
+# ===========================================================================
+# 11. Bulk download access control
+# ===========================================================================
+
+
+class TestBulkDownloadAccessControl:
+ """Tests that bulk download endpoints enforce image/board read access and
+ that the fetch endpoint verifies ownership of the zip file."""
+
+ @pytest.fixture(autouse=True)
+ def _mock_background_tasks(self, monkeypatch: Any):
+ """Prevent BackgroundTasks.add_task from actually running the handler,
+ which would fail because image_files is None in the test fixture."""
+ from fastapi import BackgroundTasks
+
+ monkeypatch.setattr(BackgroundTasks, "add_task", lambda *args, **kwargs: None)
+
+ def test_bulk_download_by_image_names_rejected_for_non_owner(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 must not be able to bulk-download images owned by user1."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-private-dl", user1.user_id)
+
+ r = client.post(
+ "/api/v1/images/download",
+ json={"image_names": ["user1-private-dl"]},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_bulk_download_by_image_names_allowed_for_owner(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str
+ ):
+ """Owner should be able to bulk-download their own images."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-own-dl", user1.user_id)
+
+ r = client.post(
+ "/api/v1/images/download",
+ json={"image_names": ["user1-own-dl"]},
+ headers=_auth(user1_token),
+ )
+ assert r.status_code == status.HTTP_202_ACCEPTED
+
+ def test_bulk_download_by_board_rejected_for_private_board(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 must not be able to bulk-download from user1's private board."""
+ board_id = _create_board(client, user1_token, "Private DL Board")
+
+ r = client.post(
+ "/api/v1/images/download",
+ json={"board_id": board_id},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_403_FORBIDDEN
+
+ def test_bulk_download_by_shared_board_allowed(
+ self, client: TestClient, mock_invoker: Invoker, user1_token: str, user2_token: str
+ ):
+ """User2 should be able to bulk-download from user1's shared board."""
+ board_id = _create_board(client, user1_token, "Shared DL Board")
+ _share_board(client, user1_token, board_id)
+
+ r = client.post(
+ "/api/v1/images/download",
+ json={"board_id": board_id},
+ headers=_auth(user2_token),
+ )
+ assert r.status_code == status.HTTP_202_ACCEPTED
+
+ def test_admin_can_bulk_download_any_images(
+ self, client: TestClient, mock_invoker: Invoker, admin_token: str, user1_token: str
+ ):
+ """Admin should be able to bulk-download any user's images."""
+ user1 = mock_invoker.services.users.get_by_email("user1@test.com")
+ assert user1 is not None
+ _save_image(mock_invoker, "user1-admin-dl", user1.user_id)
+
+ r = client.post(
+ "/api/v1/images/download",
+ json={"image_names": ["user1-admin-dl"]},
+ headers=_auth(admin_token),
+ )
+ assert r.status_code == status.HTTP_202_ACCEPTED
+
+ def test_bulk_download_events_carry_user_id(self):
+ """BulkDownloadEventBase must carry user_id so events can be routed privately."""
+ from invokeai.app.services.events.events_common import (
+ BulkDownloadCompleteEvent,
+ BulkDownloadErrorEvent,
+ BulkDownloadEventBase,
+ BulkDownloadStartedEvent,
+ )
+
+ assert "user_id" in BulkDownloadEventBase.model_fields
+
+ started = BulkDownloadStartedEvent.build("default", "item-1", "item-1.zip", user_id="owner-abc")
+ assert started.user_id == "owner-abc"
+
+ complete = BulkDownloadCompleteEvent.build("default", "item-2", "item-2.zip", user_id="owner-abc")
+ assert complete.user_id == "owner-abc"
+
+ error = BulkDownloadErrorEvent.build("default", "item-3", "item-3.zip", "oops", user_id="owner-abc")
+ assert error.user_id == "owner-abc"
+
+ def test_bulk_download_event_not_emitted_to_shared_default_room(self, mock_invoker: Invoker, monkeypatch: Any):
+ """Bulk download capability tokens must not be broadcast to the shared default room."""
+ import asyncio
+ from unittest.mock import AsyncMock
+
+ from fastapi import FastAPI
+
+ from invokeai.app.api.sockets import SocketIO
+ from invokeai.app.services.events.events_common import BulkDownloadCompleteEvent
+
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.dependencies.ApiDependencies", mock_deps)
+
+ fastapi_app = FastAPI()
+ socketio = SocketIO(fastapi_app)
+
+ event = BulkDownloadCompleteEvent.build("default", "item-x", "item-x.zip", user_id="owner-xyz")
+
+ mock_emit = AsyncMock()
+ socketio._sio.emit = mock_emit
+
+ asyncio.run(socketio._handle_bulk_image_download_event(("bulk_download_complete", event)))
+
+ rooms_emitted_to = [call.kwargs.get("room") for call in mock_emit.call_args_list]
+ assert "default" not in rooms_emitted_to
+ assert "user:owner-xyz" in rooms_emitted_to
+
+
+# ===========================================================================
+# 12. WebSocket authentication and event scoping
+# ===========================================================================
+
+
+class TestWebSocketAuth:
+ """Tests that anonymous WebSocket clients cannot subscribe to queue rooms
+ in multiuser mode, and that queue item events are scoped to the owner +
+ admin rooms instead of being broadcast to the full queue room."""
+
+ @pytest.fixture
+ def socketio(self, mock_invoker: Invoker, monkeypatch: Any):
+ """Create a SocketIO instance wired to the mock invoker's configuration."""
+ from fastapi import FastAPI
+
+ from invokeai.app.api.sockets import SocketIO
+
+ # The SocketIO connect/sub handlers look up ApiDependencies.invoker.services.configuration.multiuser
+ # at request time. Patch it to point at the mock invoker.
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.dependencies.ApiDependencies", mock_deps)
+
+ fastapi_app = FastAPI()
+ return SocketIO(fastapi_app)
+
+ def test_connect_rejected_without_token_in_multiuser_mode(self, socketio: Any, mock_invoker: Invoker) -> None:
+ """In multiuser mode, _handle_connect must return False when no valid token is provided."""
+ import asyncio
+
+ mock_invoker.services.configuration.multiuser = True
+
+ result = asyncio.run(socketio._handle_connect("sid-anon-1", environ={}, auth=None))
+ assert result is False
+ # The socket must not be recorded in the users dict
+ assert "sid-anon-1" not in socketio._socket_users
+
+ def test_connect_rejected_with_invalid_token_in_multiuser_mode(
+ self, socketio: Any, mock_invoker: Invoker, setup_jwt_secret: None
+ ) -> None:
+ """An invalid/garbage token in multiuser mode must still be rejected."""
+ import asyncio
+
+ mock_invoker.services.configuration.multiuser = True
+
+ result = asyncio.run(socketio._handle_connect("sid-bad-1", environ={}, auth={"token": "not-a-real-token"}))
+ assert result is False
+ assert "sid-bad-1" not in socketio._socket_users
+
+ def test_connect_accepted_without_token_in_single_user_mode(self, socketio: Any, mock_invoker: Invoker) -> None:
+ """In single-user mode, the socket handler should accept unauthenticated connections
+ as the system admin user (matching how the REST API's get_current_user_or_default behaves)."""
+ import asyncio
+
+ mock_invoker.services.configuration.multiuser = False
+
+ result = asyncio.run(socketio._handle_connect("sid-single-1", environ={}, auth=None))
+ assert result is True
+ assert socketio._socket_users["sid-single-1"]["user_id"] == "system"
+ assert socketio._socket_users["sid-single-1"]["is_admin"] is True
+
+ def test_connect_accepted_with_valid_token_in_multiuser_mode(
+ self,
+ socketio: Any,
+ mock_invoker: Invoker,
+ setup_jwt_secret: None,
+ ) -> None:
+ """A valid token in multiuser mode should be accepted with the correct user identity."""
+ import asyncio
+
+ from invokeai.app.services.auth.token_service import TokenData, create_access_token
+ from invokeai.app.services.users.users_common import UserCreateRequest
+
+ mock_invoker.services.configuration.multiuser = True
+
+ # Create the user in the database so the active-user check passes
+ user = mock_invoker.services.users.create(
+ UserCreateRequest(email="real@test.com", display_name="Real User", password="Test1234!@#$")
+ )
+ token = create_access_token(TokenData(user_id=user.user_id, email=user.email, is_admin=False))
+
+ result = asyncio.run(socketio._handle_connect("sid-good-1", environ={}, auth={"token": token}))
+ assert result is True
+ assert socketio._socket_users["sid-good-1"]["user_id"] == user.user_id
+ assert socketio._socket_users["sid-good-1"]["is_admin"] is False
+
+ def test_connect_rejected_for_deleted_user_in_multiuser_mode(
+ self, socketio: Any, mock_invoker: Invoker, setup_jwt_secret: None
+ ) -> None:
+ """A structurally valid JWT for a user that no longer exists in the database
+ must be rejected. This mirrors the REST auth check in auth_dependencies.py:53-58."""
+ import asyncio
+
+ from invokeai.app.services.auth.token_service import TokenData, create_access_token
+
+ mock_invoker.services.configuration.multiuser = True
+ # Create a token for a user_id that was never created in the user service
+ token = create_access_token(TokenData(user_id="deleted-user-999", email="gone@test.com", is_admin=False))
+
+ result = asyncio.run(socketio._handle_connect("sid-deleted-1", environ={}, auth={"token": token}))
+ assert result is False
+ assert "sid-deleted-1" not in socketio._socket_users
+
+ def test_connect_rejected_for_inactive_user_in_multiuser_mode(
+ self, socketio: Any, mock_invoker: Invoker, setup_jwt_secret: None
+ ) -> None:
+ """A structurally valid JWT for a deactivated user must be rejected even though
+ the token itself has not expired."""
+ import asyncio
+
+ from invokeai.app.services.auth.token_service import TokenData, create_access_token
+ from invokeai.app.services.users.users_common import UserCreateRequest
+
+ mock_invoker.services.configuration.multiuser = True
+
+ # Create a real user, then deactivate them
+ user = mock_invoker.services.users.create(
+ UserCreateRequest(email="inactive@test.com", display_name="Inactive", password="Test1234!@#$")
+ )
+ token = create_access_token(TokenData(user_id=user.user_id, email=user.email, is_admin=False))
+
+ # Deactivate the user
+ from invokeai.app.services.users.users_common import UserUpdateRequest
+
+ mock_invoker.services.users.update(user.user_id, UserUpdateRequest(is_active=False))
+
+ result = asyncio.run(socketio._handle_connect("sid-inactive-1", environ={}, auth={"token": token}))
+ assert result is False
+ assert "sid-inactive-1" not in socketio._socket_users
+
+ def test_sub_queue_refuses_unknown_socket_in_multiuser_mode(self, socketio: Any, mock_invoker: Invoker) -> None:
+ """If a socket somehow reaches _handle_sub_queue without a recorded identity
+ in multiuser mode (e.g. bug, race), it must be refused rather than falling back
+ to an anonymous system user who could then observe queue item events."""
+ import asyncio
+
+ mock_invoker.services.configuration.multiuser = True
+
+ # Call sub_queue without a corresponding connect — the sid is unknown.
+ asyncio.run(socketio._handle_sub_queue("sid-ghost-1", {"queue_id": "default"}))
+
+ # The ghost socket must not have been added to the internal users dict
+ assert "sid-ghost-1" not in socketio._socket_users
+
+ def test_queue_item_status_changed_has_user_id(self) -> None:
+ """QueueItemStatusChangedEvent must carry user_id so _handle_queue_event can
+ route it to the owner + admin rooms instead of the public queue room. Without
+ this field the event falls through to the generic broadcast branch and any
+ subscriber to the queue can observe cross-user queue activity."""
+ from invokeai.app.services.events.events_common import (
+ InvocationEventBase,
+ QueueItemEventBase,
+ QueueItemStatusChangedEvent,
+ )
+
+ # The event base carries a user_id field
+ assert "user_id" in QueueItemEventBase.model_fields
+ # QueueItemStatusChangedEvent inherits it
+ assert "user_id" in QueueItemStatusChangedEvent.model_fields
+ # It is NOT an InvocationEventBase (so the generic QueueItemEventBase branch
+ # in _handle_queue_event must also handle it privately)
+ assert not issubclass(QueueItemStatusChangedEvent, InvocationEventBase)
+
+ def test_batch_enqueued_event_carries_user_id(self) -> None:
+ """BatchEnqueuedEvent must carry user_id so it can be routed privately to the
+ owner and admin rooms. Otherwise a subscriber on the same queue_id would see
+ every other user's batch_id, origin and enqueued counts."""
+ from invokeai.app.services.events.events_common import BatchEnqueuedEvent
+ from invokeai.app.services.session_queue.session_queue_common import (
+ Batch,
+ EnqueueBatchResult,
+ )
+ from invokeai.app.services.shared.graph import Graph
+
+ enqueue_result = EnqueueBatchResult(
+ queue_id="default",
+ enqueued=3,
+ requested=3,
+ batch=Batch(batch_id="batch-xyz", origin="workflows", graph=Graph()),
+ priority=0,
+ item_ids=[1, 2, 3],
+ )
+ event = BatchEnqueuedEvent.build(enqueue_result, user_id="owner-123")
+ assert event.user_id == "owner-123"
+ assert event.batch_id == "batch-xyz"
+ assert event.queue_id == "default"
+
+ def test_queue_item_status_changed_routed_privately(self, socketio: Any) -> None:
+ """Verify that _handle_queue_event emits QueueItemStatusChangedEvent ONLY to
+ user:{user_id} and admin rooms, never to the queue_id room."""
+ import asyncio
+ from unittest.mock import AsyncMock
+
+ from invokeai.app.services.events.events_common import QueueItemStatusChangedEvent
+ from invokeai.app.services.session_queue.session_queue_common import (
+ BatchStatus,
+ SessionQueueStatus,
+ )
+
+ event = QueueItemStatusChangedEvent(
+ queue_id="default",
+ item_id=1,
+ batch_id="batch-private",
+ origin="workflows",
+ destination="canvas",
+ user_id="owner-xyz",
+ session_id="sess-private",
+ status="in_progress",
+ created_at="2026-01-01T00:00:00",
+ updated_at="2026-01-01T00:01:00",
+ started_at="2026-01-01T00:00:30",
+ completed_at=None,
+ batch_status=BatchStatus(
+ queue_id="default",
+ batch_id="batch-private",
+ origin="workflows",
+ destination="canvas",
+ pending=0,
+ in_progress=1,
+ completed=0,
+ failed=0,
+ canceled=0,
+ total=1,
+ ),
+ queue_status=SessionQueueStatus(
+ queue_id="default",
+ item_id=1,
+ session_id="sess-private",
+ batch_id="batch-private",
+ pending=0,
+ in_progress=1,
+ completed=0,
+ failed=0,
+ canceled=0,
+ total=1,
+ ),
+ )
+
+ mock_emit = AsyncMock()
+ socketio._sio.emit = mock_emit
+
+ asyncio.run(socketio._handle_queue_event(("queue_item_status_changed", event)))
+
+ rooms_emitted_to = [call.kwargs.get("room") for call in mock_emit.call_args_list]
+ assert "user:owner-xyz" in rooms_emitted_to
+ assert "admin" in rooms_emitted_to
+ # CRITICAL: must NOT emit to the queue_id room — that would leak to other users
+ assert "default" not in rooms_emitted_to
+
+ def test_batch_enqueued_routed_privately(self, socketio: Any) -> None:
+ """Verify that _handle_queue_event emits BatchEnqueuedEvent ONLY to
+ user:{user_id} and admin rooms, never to the queue_id room."""
+ import asyncio
+ from unittest.mock import AsyncMock
+
+ from invokeai.app.services.events.events_common import BatchEnqueuedEvent
+ from invokeai.app.services.session_queue.session_queue_common import (
+ Batch,
+ EnqueueBatchResult,
+ )
+ from invokeai.app.services.shared.graph import Graph
+
+ enqueue_result = EnqueueBatchResult(
+ queue_id="default",
+ enqueued=5,
+ requested=5,
+ batch=Batch(batch_id="batch-pvt", origin="workflows", graph=Graph()),
+ priority=0,
+ item_ids=[10, 11, 12, 13, 14],
+ )
+ event = BatchEnqueuedEvent.build(enqueue_result, user_id="owner-zzz")
+
+ mock_emit = AsyncMock()
+ socketio._sio.emit = mock_emit
+
+ asyncio.run(socketio._handle_queue_event(("batch_enqueued", event)))
+
+ rooms_emitted_to = [call.kwargs.get("room") for call in mock_emit.call_args_list]
+ assert "user:owner-zzz" in rooms_emitted_to
+ assert "admin" in rooms_emitted_to
+ assert "default" not in rooms_emitted_to
+
+ def test_queue_cleared_still_broadcast(self, socketio: Any) -> None:
+ """QueueClearedEvent does not carry user identity and should still be broadcast
+ to all queue subscribers — this is a sanity check that we haven't over-scoped."""
+ import asyncio
+ from unittest.mock import AsyncMock
+
+ from invokeai.app.services.events.events_common import QueueClearedEvent
+
+ event = QueueClearedEvent.build(queue_id="default")
+
+ mock_emit = AsyncMock()
+ socketio._sio.emit = mock_emit
+
+ asyncio.run(socketio._handle_queue_event(("queue_cleared", event)))
+
+ rooms_emitted_to = [call.kwargs.get("room") for call in mock_emit.call_args_list]
+ assert "default" in rooms_emitted_to
diff --git a/tests/app/routers/test_session_queue_sanitization.py b/tests/app/routers/test_session_queue_sanitization.py
index 1b2262d02e..1cd7a4953d 100644
--- a/tests/app/routers/test_session_queue_sanitization.py
+++ b/tests/app/routers/test_session_queue_sanitization.py
@@ -100,11 +100,21 @@ def test_sanitize_queue_item_for_different_user(sample_session_queue_item):
# Non-admin viewing another user's item should have sanitized data
assert result.field_values is None
assert result.workflow is None
- # Session should be replaced with empty graph
+ # Session should be replaced with empty/redacted graph
assert result.session.graph.nodes is not None
assert len(result.session.graph.nodes) == 0
- # Session ID should be preserved
- assert result.session.id == "test_session"
+ assert result.session.id == "redacted"
+ # Identity and batch fields should be redacted
+ assert result.user_id == "redacted"
+ assert result.batch_id == "redacted"
+ assert result.session_id == "redacted"
+ assert result.user_display_name is None
+ assert result.user_email is None
+ assert result.origin is None
+ assert result.destination is None
+ assert result.error_type is None
+ assert result.error_message is None
+ assert result.error_traceback is None
def test_sanitize_preserves_non_sensitive_fields(sample_session_queue_item):
@@ -115,15 +125,18 @@ def test_sanitize_preserves_non_sensitive_fields(sample_session_queue_item):
is_admin=False,
)
- # These fields should be preserved
+ # Non-sensitive fields should be preserved
assert result.item_id == 1
assert result.status == "pending"
- assert result.batch_id == "batch_123"
- assert result.session_id == "session_123"
assert result.queue_id == "default"
- assert result.user_id == "user_123"
- assert result.user_display_name == "Test User"
- assert result.user_email == "test@example.com"
+ assert result.created_at is not None
+ assert result.updated_at is not None
+ # Sensitive fields should be redacted for non-owner non-admin
+ assert result.batch_id == "redacted"
+ assert result.session_id == "redacted"
+ assert result.user_id == "redacted"
+ assert result.user_display_name is None
+ assert result.user_email is None
def test_sanitize_system_user_item_for_non_admin(sample_session_queue_item):
diff --git a/tests/app/routers/test_workflows_multiuser.py b/tests/app/routers/test_workflows_multiuser.py
new file mode 100644
index 0000000000..28b301e18e
--- /dev/null
+++ b/tests/app/routers/test_workflows_multiuser.py
@@ -0,0 +1,334 @@
+"""Tests for multiuser workflow library functionality."""
+
+import logging
+from typing import Any
+from unittest.mock import MagicMock
+
+import pytest
+from fastapi import status
+from fastapi.testclient import TestClient
+
+from invokeai.app.api.dependencies import ApiDependencies
+from invokeai.app.api_app import app
+from invokeai.app.services.config.config_default import InvokeAIAppConfig
+from invokeai.app.services.invocation_services import InvocationServices
+from invokeai.app.services.invoker import Invoker
+from invokeai.app.services.users.users_common import UserCreateRequest
+from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
+from invokeai.backend.util.logging import InvokeAILogger
+from tests.fixtures.sqlite_database import create_mock_sqlite_database
+
+
+class MockApiDependencies(ApiDependencies):
+ invoker: Invoker
+
+ def __init__(self, invoker: Invoker) -> None:
+ self.invoker = invoker
+
+
+WORKFLOW_BODY = {
+ "name": "Test Workflow",
+ "author": "",
+ "description": "A test workflow",
+ "version": "1.0.0",
+ "contact": "",
+ "tags": "",
+ "notes": "",
+ "nodes": [],
+ "edges": [],
+ "exposedFields": [],
+ "meta": {"version": "3.0.0", "category": "user"},
+ "id": None,
+ "form_fields": [],
+}
+
+
+@pytest.fixture
+def setup_jwt_secret():
+ from invokeai.app.services.auth.token_service import set_jwt_secret
+
+ set_jwt_secret("test-secret-key-for-unit-tests-only-do-not-use-in-production")
+
+
+@pytest.fixture
+def client():
+ return TestClient(app)
+
+
+@pytest.fixture
+def mock_services() -> InvocationServices:
+ from invokeai.app.services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage
+ from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
+ from invokeai.app.services.boards.boards_default import BoardService
+ from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
+ from invokeai.app.services.client_state_persistence.client_state_persistence_sqlite import (
+ ClientStatePersistenceSqlite,
+ )
+ from invokeai.app.services.image_records.image_records_sqlite import SqliteImageRecordStorage
+ from invokeai.app.services.images.images_default import ImageService
+ from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
+ from invokeai.app.services.invocation_stats.invocation_stats_default import InvocationStatsService
+ from invokeai.app.services.users.users_default import UserService
+ from tests.test_nodes import TestEventService
+
+ configuration = InvokeAIAppConfig(use_memory_db=True, node_cache_size=0)
+ logger = InvokeAILogger.get_logger()
+ db = create_mock_sqlite_database(configuration, logger)
+
+ return InvocationServices(
+ board_image_records=SqliteBoardImageRecordStorage(db=db),
+ board_images=None, # type: ignore
+ board_records=SqliteBoardRecordStorage(db=db),
+ boards=BoardService(),
+ bulk_download=BulkDownloadService(),
+ configuration=configuration,
+ events=TestEventService(),
+ image_files=None, # type: ignore
+ image_records=SqliteImageRecordStorage(db=db),
+ images=ImageService(),
+ invocation_cache=MemoryInvocationCache(max_cache_size=0),
+ logger=logging, # type: ignore
+ model_images=None, # type: ignore
+ model_manager=None, # type: ignore
+ download_queue=None, # type: ignore
+ names=None, # type: ignore
+ performance_statistics=InvocationStatsService(),
+ session_processor=None, # type: ignore
+ session_queue=None, # type: ignore
+ urls=None, # type: ignore
+ workflow_records=SqliteWorkflowRecordsStorage(db=db),
+ tensors=None, # type: ignore
+ conditioning=None, # type: ignore
+ style_preset_records=None, # type: ignore
+ style_preset_image_files=None, # type: ignore
+ workflow_thumbnails=None, # type: ignore
+ model_relationship_records=None, # type: ignore
+ model_relationships=None, # type: ignore
+ client_state_persistence=ClientStatePersistenceSqlite(db=db),
+ users=UserService(db),
+ )
+
+
+def create_test_user(mock_invoker: Invoker, email: str, display_name: str, is_admin: bool = False) -> str:
+ user_service = mock_invoker.services.users
+ user_data = UserCreateRequest(email=email, display_name=display_name, password="TestPass123", is_admin=is_admin)
+ user = user_service.create(user_data)
+ return user.user_id
+
+
+def get_user_token(client: TestClient, email: str) -> str:
+ response = client.post(
+ "/api/v1/auth/login",
+ json={"email": email, "password": "TestPass123", "remember_me": False},
+ )
+ assert response.status_code == 200
+ return response.json()["token"]
+
+
+@pytest.fixture
+def enable_multiuser(monkeypatch: Any, mock_invoker: Invoker):
+ mock_invoker.services.configuration.multiuser = True
+ mock_workflow_thumbnails = MagicMock()
+ mock_workflow_thumbnails.get_url.return_value = None
+ mock_invoker.services.workflow_thumbnails = mock_workflow_thumbnails
+
+ mock_deps = MockApiDependencies(mock_invoker)
+ monkeypatch.setattr("invokeai.app.api.routers.auth.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.auth_dependencies.ApiDependencies", mock_deps)
+ monkeypatch.setattr("invokeai.app.api.routers.workflows.ApiDependencies", mock_deps)
+ yield
+
+
+@pytest.fixture
+def admin_token(setup_jwt_secret: None, enable_multiuser: Any, mock_invoker: Invoker, client: TestClient):
+ create_test_user(mock_invoker, "admin@test.com", "Admin", is_admin=True)
+ return get_user_token(client, "admin@test.com")
+
+
+@pytest.fixture
+def user1_token(enable_multiuser: Any, mock_invoker: Invoker, client: TestClient, admin_token: str):
+ create_test_user(mock_invoker, "user1@test.com", "User One", is_admin=False)
+ return get_user_token(client, "user1@test.com")
+
+
+@pytest.fixture
+def user2_token(enable_multiuser: Any, mock_invoker: Invoker, client: TestClient, admin_token: str):
+ create_test_user(mock_invoker, "user2@test.com", "User Two", is_admin=False)
+ return get_user_token(client, "user2@test.com")
+
+
+def create_workflow(client: TestClient, token: str) -> str:
+ response = client.post(
+ "/api/v1/workflows/",
+ json={"workflow": WORKFLOW_BODY},
+ headers={"Authorization": f"Bearer {token}"},
+ )
+ assert response.status_code == 200, response.text
+ return response.json()["workflow_id"]
+
+
+# ---------------------------------------------------------------------------
+# Auth tests
+# ---------------------------------------------------------------------------
+
+
+def test_list_workflows_requires_auth(enable_multiuser: Any, client: TestClient):
+ response = client.get("/api/v1/workflows/")
+ assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+
+def test_create_workflow_requires_auth(enable_multiuser: Any, client: TestClient):
+ response = client.post("/api/v1/workflows/", json={"workflow": WORKFLOW_BODY})
+ assert response.status_code == status.HTTP_401_UNAUTHORIZED
+
+
+# ---------------------------------------------------------------------------
+# Ownership isolation
+# ---------------------------------------------------------------------------
+
+
+def test_workflows_are_isolated_between_users(client: TestClient, user1_token: str, user2_token: str):
+ """Users should only see their own workflows in list."""
+ # user1 creates a workflow
+ create_workflow(client, user1_token)
+
+ # user1 can see it
+ r1 = client.get("/api/v1/workflows/?categories=user", headers={"Authorization": f"Bearer {user1_token}"})
+ assert r1.status_code == 200
+ assert r1.json()["total"] == 1
+
+ # user2 cannot see user1's workflow
+ r2 = client.get("/api/v1/workflows/?categories=user", headers={"Authorization": f"Bearer {user2_token}"})
+ assert r2.status_code == 200
+ assert r2.json()["total"] == 0
+
+
+def test_user_cannot_delete_another_users_workflow(client: TestClient, user1_token: str, user2_token: str):
+ workflow_id = create_workflow(client, user1_token)
+ response = client.delete(
+ f"/api/v1/workflows/i/{workflow_id}",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+def test_user_cannot_update_another_users_workflow(client: TestClient, user1_token: str, user2_token: str):
+ workflow_id = create_workflow(client, user1_token)
+ updated = {**WORKFLOW_BODY, "id": workflow_id, "name": "Hijacked"}
+ response = client.patch(
+ f"/api/v1/workflows/i/{workflow_id}",
+ json={"workflow": updated},
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+def test_owner_can_delete_own_workflow(client: TestClient, user1_token: str):
+ workflow_id = create_workflow(client, user1_token)
+ response = client.delete(
+ f"/api/v1/workflows/i/{workflow_id}",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert response.status_code == 200
+
+
+def test_admin_can_delete_any_workflow(client: TestClient, admin_token: str, user1_token: str):
+ workflow_id = create_workflow(client, user1_token)
+ response = client.delete(
+ f"/api/v1/workflows/i/{workflow_id}",
+ headers={"Authorization": f"Bearer {admin_token}"},
+ )
+ assert response.status_code == 200
+
+
+# ---------------------------------------------------------------------------
+# Shared workflow (is_public)
+# ---------------------------------------------------------------------------
+
+
+def test_update_is_public_owner_succeeds(client: TestClient, user1_token: str):
+ workflow_id = create_workflow(client, user1_token)
+ response = client.patch(
+ f"/api/v1/workflows/i/{workflow_id}/is_public",
+ json={"is_public": True},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert response.status_code == 200
+ assert response.json()["is_public"] is True
+
+
+def test_update_is_public_other_user_forbidden(client: TestClient, user1_token: str, user2_token: str):
+ workflow_id = create_workflow(client, user1_token)
+ response = client.patch(
+ f"/api/v1/workflows/i/{workflow_id}/is_public",
+ json={"is_public": True},
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == status.HTTP_403_FORBIDDEN
+
+
+def test_public_workflow_visible_to_other_users(client: TestClient, user1_token: str, user2_token: str):
+ """A shared (is_public=True) workflow should appear when filtering with is_public=true."""
+ workflow_id = create_workflow(client, user1_token)
+ # Make it public
+ client.patch(
+ f"/api/v1/workflows/i/{workflow_id}/is_public",
+ json={"is_public": True},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # user2 can see it through is_public=true filter
+ response = client.get(
+ "/api/v1/workflows/?categories=user&is_public=true",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == 200
+ ids = [w["workflow_id"] for w in response.json()["items"]]
+ assert workflow_id in ids
+
+
+def test_private_workflow_not_visible_to_other_users(client: TestClient, user1_token: str, user2_token: str):
+ """A private (is_public=False) user workflow should NOT appear for another user."""
+ workflow_id = create_workflow(client, user1_token)
+
+ # user2 lists 'yours' style (their own workflows)
+ response = client.get(
+ "/api/v1/workflows/?categories=user",
+ headers={"Authorization": f"Bearer {user2_token}"},
+ )
+ assert response.status_code == 200
+ ids = [w["workflow_id"] for w in response.json()["items"]]
+ assert workflow_id not in ids
+
+
+def test_public_workflow_still_in_owners_list(client: TestClient, user1_token: str):
+ """A shared workflow should still appear in the owner's own workflow list."""
+ workflow_id = create_workflow(client, user1_token)
+ client.patch(
+ f"/api/v1/workflows/i/{workflow_id}/is_public",
+ json={"is_public": True},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+
+ # owner's 'yours' list (no is_public filter)
+ response = client.get(
+ "/api/v1/workflows/?categories=user",
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert response.status_code == 200
+ ids = [w["workflow_id"] for w in response.json()["items"]]
+ assert workflow_id in ids
+
+
+def test_workflow_has_user_id_and_is_public_fields(client: TestClient, user1_token: str):
+ """Created workflow should return user_id and is_public fields."""
+ response = client.post(
+ "/api/v1/workflows/",
+ json={"workflow": WORKFLOW_BODY},
+ headers={"Authorization": f"Bearer {user1_token}"},
+ )
+ assert response.status_code == 200
+ data = response.json()
+ assert "user_id" in data
+ assert "is_public" in data
+ assert data["is_public"] is False
diff --git a/tests/app/util/test_step_callback.py b/tests/app/util/test_step_callback.py
new file mode 100644
index 0000000000..bada75f7ec
--- /dev/null
+++ b/tests/app/util/test_step_callback.py
@@ -0,0 +1,119 @@
+"""Tests for diffusion step callback preview image generation."""
+
+import torch
+from PIL import Image
+
+from invokeai.app.util.step_callback import (
+ QWEN_IMAGE_LATENT_RGB_BIAS,
+ QWEN_IMAGE_LATENT_RGB_FACTORS,
+ sample_to_lowres_estimated_image,
+)
+
+
+class TestSampleToLowresEstimatedImage:
+ """Test the latent-to-preview-image conversion used during denoising."""
+
+ def test_qwen_image_preview_produces_valid_image(self):
+ """A synthetic Qwen latent tensor produces a valid RGB preview image."""
+ # Create a small 1x16x4x4 latent tensor (batch=1, channels=16, 4x4 spatial)
+ torch.manual_seed(42)
+ sample = torch.randn(1, 16, 4, 4)
+
+ factors = torch.tensor(QWEN_IMAGE_LATENT_RGB_FACTORS, dtype=sample.dtype)
+ bias = torch.tensor(QWEN_IMAGE_LATENT_RGB_BIAS, dtype=sample.dtype)
+
+ image = sample_to_lowres_estimated_image(
+ samples=sample,
+ latent_rgb_factors=factors,
+ latent_rgb_bias=bias,
+ )
+
+ assert isinstance(image, Image.Image)
+ assert image.size == (4, 4)
+ assert image.mode == "RGB"
+
+ def test_qwen_image_preview_deterministic(self):
+ """The same input tensor always produces the same preview image."""
+ sample = torch.ones(1, 16, 2, 2)
+
+ factors = torch.tensor(QWEN_IMAGE_LATENT_RGB_FACTORS, dtype=sample.dtype)
+ bias = torch.tensor(QWEN_IMAGE_LATENT_RGB_BIAS, dtype=sample.dtype)
+
+ image1 = sample_to_lowres_estimated_image(samples=sample, latent_rgb_factors=factors, latent_rgb_bias=bias)
+ image2 = sample_to_lowres_estimated_image(samples=sample, latent_rgb_factors=factors, latent_rgb_bias=bias)
+
+ assert list(image1.getdata()) == list(image2.getdata())
+
+ def test_qwen_image_preview_known_value(self):
+ """Verify the preview computation against a hand-calculated expected value.
+
+ With a 1x16x1x1 tensor of all ones:
+ - latent_image = [1,1,...,1] @ factors = sum of each column of factors
+ - R = sum(col 0) = 0.3677, G = sum(col 1) = 0.4577, B = sum(col 2) = 0.9101
+ - After bias: R = 0.1842, G = 0.3709, B = 0.5741
+ - After scale ((x+1)/2): R = 0.5921, G = 0.6855, B = 0.7871
+ - After quantize (*255): R = 151, G = 175, B = 201
+ """
+ sample = torch.ones(1, 16, 1, 1)
+
+ factors = torch.tensor(QWEN_IMAGE_LATENT_RGB_FACTORS, dtype=sample.dtype)
+ bias = torch.tensor(QWEN_IMAGE_LATENT_RGB_BIAS, dtype=sample.dtype)
+
+ image = sample_to_lowres_estimated_image(samples=sample, latent_rgb_factors=factors, latent_rgb_bias=bias)
+
+ assert image.size == (1, 1)
+ pixel = image.getpixel((0, 0))
+
+ # Compute expected values
+ col_sums = [sum(row[c] for row in QWEN_IMAGE_LATENT_RGB_FACTORS) for c in range(3)]
+ expected = []
+ for c in range(3):
+ val = col_sums[c] + QWEN_IMAGE_LATENT_RGB_BIAS[c]
+ val = (val + 1) / 2 # scale from [-1,1] to [0,1]
+ val = max(0.0, min(1.0, val)) # clamp
+ expected.append(int(val * 255))
+
+ assert pixel == tuple(expected), f"Expected {tuple(expected)}, got {pixel}"
+
+ def test_qwen_image_preview_zeros_tensor(self):
+ """A zero tensor with bias produces a valid image reflecting just the bias."""
+ sample = torch.zeros(1, 16, 2, 2)
+
+ factors = torch.tensor(QWEN_IMAGE_LATENT_RGB_FACTORS, dtype=sample.dtype)
+ bias = torch.tensor(QWEN_IMAGE_LATENT_RGB_BIAS, dtype=sample.dtype)
+
+ image = sample_to_lowres_estimated_image(samples=sample, latent_rgb_factors=factors, latent_rgb_bias=bias)
+
+ assert isinstance(image, Image.Image)
+ assert image.size == (2, 2)
+
+ # All pixels should be identical (uniform zero input)
+ pixels = list(image.get_flattened_data())
+ assert all(p == pixels[0] for p in pixels)
+
+ # With zero input, result = bias, scaled: ((bias + 1) / 2) * 255
+ expected = []
+ for c in range(3):
+ val = (QWEN_IMAGE_LATENT_RGB_BIAS[c] + 1) / 2
+ val = max(0.0, min(1.0, val))
+ expected.append(int(val * 255))
+ assert pixels[0] == tuple(expected)
+
+ def test_qwen_image_factors_have_correct_shape(self):
+ """Qwen Image uses 16 latent channels, so factors should be 16x3."""
+ assert len(QWEN_IMAGE_LATENT_RGB_FACTORS) == 16
+ for row in QWEN_IMAGE_LATENT_RGB_FACTORS:
+ assert len(row) == 3
+ assert len(QWEN_IMAGE_LATENT_RGB_BIAS) == 3
+
+ def test_3d_input_accepted(self):
+ """sample_to_lowres_estimated_image accepts 3D input (no batch dim)."""
+ sample = torch.randn(16, 4, 4) # no batch dimension
+
+ factors = torch.tensor(QWEN_IMAGE_LATENT_RGB_FACTORS, dtype=sample.dtype)
+ bias = torch.tensor(QWEN_IMAGE_LATENT_RGB_BIAS, dtype=sample.dtype)
+
+ image = sample_to_lowres_estimated_image(samples=sample, latent_rgb_factors=factors, latent_rgb_bias=bias)
+
+ assert isinstance(image, Image.Image)
+ assert image.size == (4, 4)
diff --git a/tests/backend/model_manager/configs/test_double_variant_regression.py b/tests/backend/model_manager/configs/test_double_variant_regression.py
new file mode 100644
index 0000000000..d2b0c3416d
--- /dev/null
+++ b/tests/backend/model_manager/configs/test_double_variant_regression.py
@@ -0,0 +1,115 @@
+"""Regression tests for the double-variant kwarg bug.
+
+When override_fields contains a field (variant, repo_variant, prediction_type, etc.)
+that is also computed and passed as an explicit kwarg to cls(), using .get() instead
+of .pop() causes TypeError("got multiple values for keyword argument ...").
+
+These tests verify that .pop() is used consistently, so override values don't conflict
+with explicitly computed values.
+"""
+
+from pathlib import Path
+from unittest.mock import MagicMock, patch
+
+from invokeai.backend.model_manager.taxonomy import QwenImageVariantType
+
+# Required fields for the Pydantic config model
+_REQUIRED_FIELDS = {
+ "hash": "blake3:fakehash",
+ "path": "/fake/models/test-model",
+ "file_size": 1000,
+ "name": "test-model",
+ "description": "test",
+ "source": "test",
+ "source_type": "path",
+ "key": "test-key",
+}
+
+
+def _make_mock_dir(dirname: str = "test-model") -> MagicMock:
+ """Create a mock ModelOnDisk for a Diffusers directory."""
+ mod = MagicMock()
+ mod.path = Path(f"/fake/models/{dirname}")
+ return mod
+
+
+class TestDoubleVariantRegression:
+ """Verify that override_fields with variant/repo_variant don't cause double-kwarg errors."""
+
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_class_name")
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_dir")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_qwen_image_diffusers_with_variant_in_overrides(self, _rfo, _rid, _rfc):
+ """Installing a Qwen Image Edit Diffusers model with variant in override_fields should not crash."""
+ from invokeai.backend.model_manager.configs.main import Main_Diffusers_QwenImage_Config
+
+ mod = _make_mock_dir("Qwen-Image-Edit-2511")
+
+ # Simulate what happens when a starter model provides variant
+ overrides = {
+ **_REQUIRED_FIELDS,
+ "variant": QwenImageVariantType.Edit,
+ }
+
+ from invokeai.backend.model_manager.configs.base import ModelRepoVariant
+
+ with patch.object(
+ Main_Diffusers_QwenImage_Config, "_get_repo_variant_or_raise", return_value=ModelRepoVariant("")
+ ):
+ with patch.object(
+ Main_Diffusers_QwenImage_Config,
+ "_get_qwen_image_variant",
+ return_value=QwenImageVariantType.Edit,
+ ):
+ # This would previously raise: TypeError("got multiple values for keyword argument 'variant'")
+ config = Main_Diffusers_QwenImage_Config.from_model_on_disk(mod, overrides)
+
+ assert config.variant == QwenImageVariantType.Edit
+
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_class_name")
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_dir")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_qwen_image_diffusers_override_variant_takes_precedence(self, _rfo, _rid, _rfc):
+ """An explicit variant override should take precedence over auto-detection."""
+ from invokeai.backend.model_manager.configs.base import ModelRepoVariant
+ from invokeai.backend.model_manager.configs.main import Main_Diffusers_QwenImage_Config
+
+ mod = _make_mock_dir("Qwen-Image-2512")
+
+ overrides = {
+ **_REQUIRED_FIELDS,
+ "variant": QwenImageVariantType.Edit, # explicitly override to Edit
+ }
+
+ with patch.object(
+ Main_Diffusers_QwenImage_Config, "_get_repo_variant_or_raise", return_value=ModelRepoVariant("")
+ ):
+ with patch.object(
+ Main_Diffusers_QwenImage_Config,
+ "_get_qwen_image_variant",
+ return_value=QwenImageVariantType.Generate, # auto-detect says Generate
+ ):
+ config = Main_Diffusers_QwenImage_Config.from_model_on_disk(mod, overrides)
+
+ # Override should win over auto-detection
+ assert config.variant == QwenImageVariantType.Edit
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_qwen_image_gguf_with_variant_in_overrides(self, _rfo, _rif, _hgt, _hqk):
+ """Installing a Qwen Image Edit GGUF with variant in override_fields should not crash."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ mod = MagicMock()
+ mod.path = Path("/fake/models/qwen-image-edit-2511-Q4_K_M.gguf")
+ mod.load_state_dict.return_value = {}
+
+ overrides = {
+ **_REQUIRED_FIELDS,
+ "variant": QwenImageVariantType.Edit,
+ }
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(mod, overrides)
+ assert config.variant == QwenImageVariantType.Edit
diff --git a/tests/backend/model_manager/configs/test_qwen_image_gguf_variant_detection.py b/tests/backend/model_manager/configs/test_qwen_image_gguf_variant_detection.py
new file mode 100644
index 0000000000..3a62b3031e
--- /dev/null
+++ b/tests/backend/model_manager/configs/test_qwen_image_gguf_variant_detection.py
@@ -0,0 +1,122 @@
+"""Tests for GGUF Qwen Image variant detection.
+
+Detection precedence:
+1. Explicit `variant` in override_fields wins.
+2. Presence of the `__index_timestep_zero__` tensor in the state dict marks an Edit model.
+3. Otherwise fall back to a filename heuristic ("edit" in the stem → Edit).
+4. Otherwise default to Generate.
+"""
+
+from pathlib import Path
+from unittest.mock import MagicMock, patch
+
+from invokeai.backend.model_manager.taxonomy import QwenImageVariantType
+
+# Required fields for the Pydantic config model
+_REQUIRED_FIELDS = {
+ "hash": "blake3:fakehash",
+ "path": "/fake/models/test.gguf",
+ "file_size": 1000,
+ "name": "test-model",
+ "description": "test",
+ "source": "test",
+ "source_type": "path",
+ "key": "test-key",
+}
+
+
+class TestGGUFQwenImageVariantDetection:
+ """Test that GGUF Qwen Image models infer the edit variant from filename."""
+
+ def _make_mock_mod(self, filename: str) -> MagicMock:
+ """Create a mock ModelOnDisk with the given filename."""
+ mod = MagicMock()
+ mod.path = Path(f"/fake/models/{filename}")
+ return mod
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_edit_in_filename_sets_edit_variant(self, _rfo, _rif, _hgt, _hqk):
+ """A GGUF file with 'edit' in the name should be tagged as edit variant."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ mod = self._make_mock_mod("qwen-image-edit-2511-Q4_K_M.gguf")
+ mod.load_state_dict.return_value = {}
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(mod, {**_REQUIRED_FIELDS})
+ assert config.variant == QwenImageVariantType.Edit
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_edit_case_insensitive(self, _rfo, _rif, _hgt, _hqk):
+ """The 'edit' check should be case-insensitive."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ mod = self._make_mock_mod("Qwen-Image-EDIT-2511-Q8_0.gguf")
+ mod.load_state_dict.return_value = {}
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(mod, {**_REQUIRED_FIELDS})
+ assert config.variant == QwenImageVariantType.Edit
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_no_marker_no_edit_in_filename_defaults_to_generate(self, _rfo, _rif, _hgt, _hqk):
+ """A GGUF file without the marker tensor or 'edit' in the name should default to Generate."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ mod = self._make_mock_mod("qwen-image-2512-Q4_K_M.gguf")
+ mod.load_state_dict.return_value = {}
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(mod, {**_REQUIRED_FIELDS})
+ assert config.variant == QwenImageVariantType.Generate
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_marker_tensor_sets_edit_variant(self, _rfo, _rif, _hgt, _hqk):
+ """Presence of `__index_timestep_zero__` in the state dict should set the Edit variant."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ # Filename has no "edit" marker, but the tensor is present
+ mod = self._make_mock_mod("some-arbitrary-name.gguf")
+ mod.load_state_dict.return_value = {"__index_timestep_zero__": object()}
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(mod, {**_REQUIRED_FIELDS})
+ assert config.variant == QwenImageVariantType.Edit
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_marker_tensor_takes_precedence_over_filename(self, _rfo, _rif, _hgt, _hqk):
+ """The marker tensor wins even when the filename has no 'edit' substring."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ mod = self._make_mock_mod("qwen-image-2512-Q4_K_M.gguf")
+ mod.load_state_dict.return_value = {"__index_timestep_zero__": object()}
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(mod, {**_REQUIRED_FIELDS})
+ assert config.variant == QwenImageVariantType.Edit
+
+ @patch("invokeai.backend.model_manager.configs.main._has_qwen_image_keys", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main._has_ggml_tensors", return_value=True)
+ @patch("invokeai.backend.model_manager.configs.main.raise_if_not_file")
+ @patch("invokeai.backend.model_manager.configs.main.raise_for_override_fields")
+ def test_explicit_variant_override_not_overwritten(self, _rfo, _rif, _hgt, _hqk):
+ """An explicit variant in override_fields should not be overwritten by filename heuristic."""
+ from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+
+ mod = self._make_mock_mod("qwen-image-edit-2511-Q4_K_M.gguf")
+ mod.load_state_dict.return_value = {}
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(
+ mod, {**_REQUIRED_FIELDS, "variant": QwenImageVariantType.Generate}
+ )
+ assert config.variant == QwenImageVariantType.Generate
diff --git a/tests/backend/model_manager/configs/test_qwen_image_main_config.py b/tests/backend/model_manager/configs/test_qwen_image_main_config.py
new file mode 100644
index 0000000000..932689eb86
--- /dev/null
+++ b/tests/backend/model_manager/configs/test_qwen_image_main_config.py
@@ -0,0 +1,52 @@
+from pathlib import Path
+from tempfile import TemporaryDirectory
+from unittest.mock import MagicMock
+
+import gguf
+import pytest
+import torch
+
+from invokeai.backend.model_manager.configs.main import Main_GGUF_QwenImage_Config
+from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
+
+
+def _build_ggml_tensor() -> GGMLTensor:
+ return GGMLTensor(
+ data=torch.zeros((1,), dtype=torch.uint8),
+ ggml_quantization_type=gguf.GGMLQuantizationType.Q4_0,
+ tensor_shape=torch.Size([1, 1]),
+ compute_dtype=torch.float32,
+ )
+
+
+@pytest.mark.parametrize("is_edit_model", [True, False])
+def test_qwen_gguf_config_sets_a_variant_for_imported_models(is_edit_model: bool) -> None:
+ with TemporaryDirectory() as tmpdir:
+ model_path = Path(tmpdir) / ("qwen-image-edit.gguf" if is_edit_model else "qwen-image.gguf")
+ model_name = "Qwen Image Edit GGUF" if is_edit_model else "Qwen Image GGUF"
+ model_path.touch()
+
+ mod = MagicMock()
+ mod.path = model_path
+ mod.load_state_dict.return_value = {
+ "txt_in.weight": _build_ggml_tensor(),
+ "txt_norm.weight": _build_ggml_tensor(),
+ "img_in.weight": _build_ggml_tensor(),
+ }
+
+ config = Main_GGUF_QwenImage_Config.from_model_on_disk(
+ mod,
+ {
+ "hash": "test-hash",
+ "path": str(model_path),
+ "file_size": model_path.stat().st_size,
+ "name": model_name,
+ "source": str(model_path),
+ "source_type": "path",
+ },
+ )
+
+ if is_edit_model:
+ assert config.variant == "edit"
+ else:
+ assert config.variant == "generate"
diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/qwen_image_lora_diffusers_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/qwen_image_lora_diffusers_format.py
new file mode 100644
index 0000000000..4f8aad839f
--- /dev/null
+++ b/tests/backend/patches/lora_conversions/lora_state_dicts/qwen_image_lora_diffusers_format.py
@@ -0,0 +1,16 @@
+# Diffusers/PEFT-format Qwen Image LoRA state dict keys.
+# Keys use the pattern: transformer_blocks.{N}.{sub_module}.{param}
+
+state_dict_keys: dict[str, list[int]] = {
+ # Block 0 - standard LoRA (lora_down/lora_up)
+ "transformer_blocks.0.attn.to_k.lora_down.weight": [64, 3072],
+ "transformer_blocks.0.attn.to_k.lora_up.weight": [3072, 64],
+ "transformer_blocks.0.attn.to_k.alpha": [],
+ "transformer_blocks.0.attn.to_q.lora_down.weight": [64, 3072],
+ "transformer_blocks.0.attn.to_q.lora_up.weight": [3072, 64],
+ "transformer_blocks.0.attn.to_q.alpha": [],
+ # Block 1
+ "transformer_blocks.1.attn.to_k.lora_down.weight": [64, 3072],
+ "transformer_blocks.1.attn.to_k.lora_up.weight": [3072, 64],
+ "transformer_blocks.1.attn.to_k.alpha": [],
+}
diff --git a/tests/backend/patches/lora_conversions/lora_state_dicts/qwen_image_lora_kohya_format.py b/tests/backend/patches/lora_conversions/lora_state_dicts/qwen_image_lora_kohya_format.py
new file mode 100644
index 0000000000..1094b9b278
--- /dev/null
+++ b/tests/backend/patches/lora_conversions/lora_state_dicts/qwen_image_lora_kohya_format.py
@@ -0,0 +1,34 @@
+# Kohya-format Qwen Image LoRA state dict keys.
+# Keys use the pattern: lora_unet_transformer_blocks_{N}_{sub_module}.{param}
+# where sub_module uses underscores instead of dots.
+
+state_dict_keys: dict[str, list[int]] = {
+ # Block 0 - attention projections (LoKR format)
+ "lora_unet_transformer_blocks_0_attn_to_k.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_0_attn_to_k.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_0_attn_to_k.alpha": [],
+ "lora_unet_transformer_blocks_0_attn_to_q.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_0_attn_to_q.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_0_attn_to_q.alpha": [],
+ "lora_unet_transformer_blocks_0_attn_to_v.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_0_attn_to_v.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_0_attn_to_v.alpha": [],
+ "lora_unet_transformer_blocks_0_attn_to_out_0.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_0_attn_to_out_0.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_0_attn_to_out_0.alpha": [],
+ # Block 0 - add projections (text stream)
+ "lora_unet_transformer_blocks_0_attn_add_k_proj.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_0_attn_add_k_proj.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_0_attn_add_k_proj.alpha": [],
+ # Block 0 - MLP
+ "lora_unet_transformer_blocks_0_img_mlp_net_0_proj.lokr_w1": [12288, 16],
+ "lora_unet_transformer_blocks_0_img_mlp_net_0_proj.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_0_img_mlp_net_0_proj.alpha": [],
+ "lora_unet_transformer_blocks_0_txt_mlp_net_2.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_0_txt_mlp_net_2.lokr_w2": [16, 12288],
+ "lora_unet_transformer_blocks_0_txt_mlp_net_2.alpha": [],
+ # Block 1 - subset to keep test small
+ "lora_unet_transformer_blocks_1_attn_to_k.lokr_w1": [3072, 16],
+ "lora_unet_transformer_blocks_1_attn_to_k.lokr_w2": [16, 3072],
+ "lora_unet_transformer_blocks_1_attn_to_k.alpha": [],
+}
diff --git a/tests/backend/patches/lora_conversions/test_qwen_image_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_qwen_image_lora_conversion_utils.py
new file mode 100644
index 0000000000..642a20b716
--- /dev/null
+++ b/tests/backend/patches/lora_conversions/test_qwen_image_lora_conversion_utils.py
@@ -0,0 +1,149 @@
+"""Tests for Qwen Image LoRA conversion utilities."""
+
+import torch
+
+from invokeai.backend.patches.lora_conversions.qwen_image_lora_constants import (
+ QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX,
+)
+from invokeai.backend.patches.lora_conversions.qwen_image_lora_conversion_utils import (
+ _convert_kohya_key,
+ is_state_dict_likely_kohya_qwen_image,
+ lora_model_from_qwen_image_state_dict,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.qwen_image_lora_diffusers_format import (
+ state_dict_keys as diffusers_state_dict_keys,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.qwen_image_lora_kohya_format import (
+ state_dict_keys as kohya_state_dict_keys,
+)
+from tests.backend.patches.lora_conversions.lora_state_dicts.utils import keys_to_mock_state_dict
+
+# ---- Format detection tests ----
+
+
+def test_is_kohya_format_true():
+ """Kohya-format state dict is correctly identified."""
+ state_dict = keys_to_mock_state_dict(kohya_state_dict_keys)
+ assert is_state_dict_likely_kohya_qwen_image(state_dict)
+
+
+def test_is_kohya_format_false_diffusers():
+ """Diffusers-format state dict is not identified as Kohya."""
+ state_dict = keys_to_mock_state_dict(diffusers_state_dict_keys)
+ assert not is_state_dict_likely_kohya_qwen_image(state_dict)
+
+
+def test_is_kohya_format_false_empty():
+ """Empty state dict is not identified as Kohya."""
+ assert not is_state_dict_likely_kohya_qwen_image({})
+
+
+# ---- Kohya key conversion tests ----
+
+
+def test_convert_kohya_key_attention():
+ """Kohya attention projection keys convert correctly."""
+ assert _convert_kohya_key("lora_unet_transformer_blocks_0_attn_to_k") == "transformer_blocks.0.attn.to_k"
+ assert _convert_kohya_key("lora_unet_transformer_blocks_5_attn_to_q") == "transformer_blocks.5.attn.to_q"
+ assert _convert_kohya_key("lora_unet_transformer_blocks_0_attn_to_v") == "transformer_blocks.0.attn.to_v"
+ assert _convert_kohya_key("lora_unet_transformer_blocks_0_attn_to_out_0") == "transformer_blocks.0.attn.to_out.0"
+ assert (
+ _convert_kohya_key("lora_unet_transformer_blocks_0_attn_add_k_proj") == "transformer_blocks.0.attn.add_k_proj"
+ )
+
+
+def test_convert_kohya_key_mlp():
+ """Kohya MLP keys convert correctly."""
+ assert (
+ _convert_kohya_key("lora_unet_transformer_blocks_0_img_mlp_net_0_proj")
+ == "transformer_blocks.0.img_mlp.net.0.proj"
+ )
+ assert _convert_kohya_key("lora_unet_transformer_blocks_0_txt_mlp_net_2") == "transformer_blocks.0.txt_mlp.net.2"
+
+
+def test_convert_kohya_key_unknown_returns_none():
+ """Unknown Kohya sub-module returns None."""
+ assert _convert_kohya_key("lora_unet_transformer_blocks_0_unknown_projection") is None
+
+
+def test_convert_kohya_key_non_matching_returns_none():
+ """Key that doesn't match the regex returns None."""
+ assert _convert_kohya_key("some_random_key") is None
+
+
+# ---- Full model conversion tests ----
+
+
+def test_kohya_conversion_produces_correct_layer_keys():
+ """Kohya state dict converts to ModelPatchRaw with correct prefixed layer keys."""
+ state_dict = keys_to_mock_state_dict(kohya_state_dict_keys)
+ model = lora_model_from_qwen_image_state_dict(state_dict, alpha=None)
+
+ # Build expected keys: convert each Kohya layer name to model path, add prefix
+ expected_keys: set[str] = set()
+ for k in kohya_state_dict_keys:
+ layer_name = k.split(".", 1)[0] # e.g. lora_unet_transformer_blocks_0_attn_to_k
+ model_path = _convert_kohya_key(layer_name)
+ if model_path is not None:
+ expected_keys.add(f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}{model_path}")
+
+ assert set(model.layers.keys()) == expected_keys
+ assert len(model.layers) > 0
+
+
+def test_diffusers_conversion_produces_correct_layer_keys():
+ """Diffusers state dict converts to ModelPatchRaw with correct prefixed layer keys."""
+ state_dict = keys_to_mock_state_dict(diffusers_state_dict_keys)
+ model = lora_model_from_qwen_image_state_dict(state_dict, alpha=None)
+
+ expected_keys = {
+ f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}transformer_blocks.0.attn.to_k",
+ f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}transformer_blocks.0.attn.to_q",
+ f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}transformer_blocks.1.attn.to_k",
+ }
+
+ assert set(model.layers.keys()) == expected_keys
+
+
+def test_diffusers_with_transformer_prefix_strips_it():
+ """Diffusers keys with 'transformer.' prefix get it stripped."""
+ state_dict = {
+ "transformer.transformer_blocks.0.attn.to_k.lora_down.weight": torch.empty(64, 3072),
+ "transformer.transformer_blocks.0.attn.to_k.lora_up.weight": torch.empty(3072, 64),
+ }
+ model = lora_model_from_qwen_image_state_dict(state_dict, alpha=None)
+
+ expected_key = f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}transformer_blocks.0.attn.to_k"
+ assert expected_key in model.layers
+
+
+# ---- Unknown key handling tests ----
+
+
+def test_kohya_unknown_submodule_is_silently_skipped():
+ """Unknown Kohya sub-modules are skipped, producing no layers for them."""
+ state_dict = {
+ # Known key — should produce a layer
+ "lora_unet_transformer_blocks_0_attn_to_k.lokr_w1": torch.empty(3072, 16),
+ "lora_unet_transformer_blocks_0_attn_to_k.lokr_w2": torch.empty(16, 3072),
+ # Unknown key — should be skipped
+ "lora_unet_transformer_blocks_0_unknown_projection.lokr_w1": torch.empty(3072, 16),
+ "lora_unet_transformer_blocks_0_unknown_projection.lokr_w2": torch.empty(16, 3072),
+ }
+ model = lora_model_from_qwen_image_state_dict(state_dict, alpha=None)
+
+ # Only the known key should produce a layer
+ assert len(model.layers) == 1
+ expected_key = f"{QWEN_IMAGE_EDIT_LORA_TRANSFORMER_PREFIX}transformer_blocks.0.attn.to_k"
+ assert expected_key in model.layers
+
+
+def test_kohya_all_unknown_submodules_produces_empty_model():
+ """State dict with only unknown Kohya sub-modules produces an empty ModelPatchRaw."""
+ state_dict = {
+ "lora_unet_transformer_blocks_0_totally_unknown.lokr_w1": torch.empty(3072, 16),
+ "lora_unet_transformer_blocks_0_totally_unknown.lokr_w2": torch.empty(16, 3072),
+ }
+ model = lora_model_from_qwen_image_state_dict(state_dict, alpha=None)
+
+ assert len(model.layers) == 0
diff --git a/tests/model_identification/stripped_models/165644cc-b656-4680-88ac-947901272c4f/__test_metadata__.json b/tests/model_identification/stripped_models/165644cc-b656-4680-88ac-947901272c4f/__test_metadata__.json
new file mode 100644
index 0000000000..f3841f40a9
--- /dev/null
+++ b/tests/model_identification/stripped_models/165644cc-b656-4680-88ac-947901272c4f/__test_metadata__.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f2acce0645d1450869c1e941cced0544dfe2ae64d56ef3642c4975dac79f4a3
+size 242
diff --git a/tests/model_identification/stripped_models/165644cc-b656-4680-88ac-947901272c4f/qwen_image_edit_lightning_test.safetensors b/tests/model_identification/stripped_models/165644cc-b656-4680-88ac-947901272c4f/qwen_image_edit_lightning_test.safetensors
new file mode 100644
index 0000000000..a79d5d6769
--- /dev/null
+++ b/tests/model_identification/stripped_models/165644cc-b656-4680-88ac-947901272c4f/qwen_image_edit_lightning_test.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b79928d2b392f4b530bd4afd1ba32f029c57ad57543bd3efbaf20709f4e0c7a0
+size 2812
diff --git a/tests/model_identification/stripped_models/4fa625c8-8b55-4170-9a5d-2e5b77a96429/__test_metadata__.json b/tests/model_identification/stripped_models/4fa625c8-8b55-4170-9a5d-2e5b77a96429/__test_metadata__.json
new file mode 100644
index 0000000000..17b55a3635
--- /dev/null
+++ b/tests/model_identification/stripped_models/4fa625c8-8b55-4170-9a5d-2e5b77a96429/__test_metadata__.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9804b670940d0923d209724b4cadd30cc305fc307f55878e608b3f6d18d1c892
+size 225
diff --git a/tests/model_identification/stripped_models/4fa625c8-8b55-4170-9a5d-2e5b77a96429/qwen_image_kohya_lokr_test.safetensors b/tests/model_identification/stripped_models/4fa625c8-8b55-4170-9a5d-2e5b77a96429/qwen_image_kohya_lokr_test.safetensors
new file mode 100644
index 0000000000..904e54419a
--- /dev/null
+++ b/tests/model_identification/stripped_models/4fa625c8-8b55-4170-9a5d-2e5b77a96429/qwen_image_kohya_lokr_test.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ea2ee286dd9a1b1275fdb02222808590d0084b470aea9861a263c7e6a30aeb5
+size 2934
diff --git a/tests/model_identification/stripped_models/5e72cdbb-da06-42c0-891e-f63d54ba93e3/__test_metadata__.json b/tests/model_identification/stripped_models/5e72cdbb-da06-42c0-891e-f63d54ba93e3/__test_metadata__.json
new file mode 100644
index 0000000000..37a1542f21
--- /dev/null
+++ b/tests/model_identification/stripped_models/5e72cdbb-da06-42c0-891e-f63d54ba93e3/__test_metadata__.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe2e33080025617407bc6ffa647b55b3691f4a73515fd1eb0413054af5ee312a
+size 228
diff --git a/tests/model_identification/stripped_models/5e72cdbb-da06-42c0-891e-f63d54ba93e3/qwen_image_community_lora_test.safetensors b/tests/model_identification/stripped_models/5e72cdbb-da06-42c0-891e-f63d54ba93e3/qwen_image_community_lora_test.safetensors
new file mode 100644
index 0000000000..97fff00bc7
--- /dev/null
+++ b/tests/model_identification/stripped_models/5e72cdbb-da06-42c0-891e-f63d54ba93e3/qwen_image_community_lora_test.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:399bb52df3e39a59ddf6f1a5445aa7464fff14ffd700158ec98aef3f988a9cbc
+size 1560
diff --git a/tests/model_identification/stripped_models/f9f3c9fa-9449-4f90-996e-ea6be6b7d233/__test_metadata__.json b/tests/model_identification/stripped_models/f9f3c9fa-9449-4f90-996e-ea6be6b7d233/__test_metadata__.json
new file mode 100644
index 0000000000..5a41ffed04
--- /dev/null
+++ b/tests/model_identification/stripped_models/f9f3c9fa-9449-4f90-996e-ea6be6b7d233/__test_metadata__.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:32053abc6257adf4771405fddfdaed2b91497c7cd7b0ea6af0aa29f9e008ca2f
+size 233
diff --git a/tests/model_identification/stripped_models/f9f3c9fa-9449-4f90-996e-ea6be6b7d233/qwen_image_kohya_lokr_test.safetensors b/tests/model_identification/stripped_models/f9f3c9fa-9449-4f90-996e-ea6be6b7d233/qwen_image_kohya_lokr_test.safetensors
new file mode 100644
index 0000000000..6e34832a71
--- /dev/null
+++ b/tests/model_identification/stripped_models/f9f3c9fa-9449-4f90-996e-ea6be6b7d233/qwen_image_kohya_lokr_test.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b3d666baf329c922be86eacd12517cf734514da91377787d2f3cbd2b1a017c0
+size 2910
diff --git a/uv.lock b/uv.lock
index 226aecacc3..ef0ad02217 100644
--- a/uv.lock
+++ b/uv.lock
@@ -579,7 +579,7 @@ wheels = [
[[package]]
name = "diffusers"
-version = "0.36.0"
+version = "0.37.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock" },
@@ -592,9 +592,9 @@ dependencies = [
{ name = "requests" },
{ name = "safetensors" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/88/45/ccb2e2180ddf475a0f931dac6a50346310e4c464ce3cccb8a65d1fc1e16d/diffusers-0.36.0.tar.gz", hash = "sha256:a9cde8721b415bde6a678f2d02abb85396487e1b0e0d2b4abb462d14a9825ab0", size = 3795088, upload-time = "2025-12-08T10:14:34.255Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/58/3b/01d0ff800b811c5ad8bba682f4c6abf1d7071cd81464c01724333fefb7ba/diffusers-0.37.0.tar.gz", hash = "sha256:408789af73898585f525afd07ca72b3955affea4216a669558e9f59b5b1fe704", size = 4141136, upload-time = "2026-03-05T14:58:39.704Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/35/50/281f92cb1f83854dbd79b6e958b3bc5018607e2542971d41604ba7a14b2f/diffusers-0.36.0-py3-none-any.whl", hash = "sha256:525d42abc74bfc3b2db594999961295c054b48ef40a11724dacf50e6abd1af98", size = 4597884, upload-time = "2025-12-08T10:14:31.979Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/55/586a3a2b9c95f371c9c3cb048c3cac15aedcce8d6d53ebd6bbc46860722d/diffusers-0.37.0-py3-none-any.whl", hash = "sha256:7eab74bf896974250b5e1027cae813aba1004f02d97c9b44891b83713386aa08", size = 5000449, upload-time = "2026-03-05T14:58:37.361Z" },
]
[package.optional-dependencies]
@@ -1122,7 +1122,7 @@ requires-dist = [
{ name = "blake3" },
{ name = "compel", specifier = "==2.1.1" },
{ name = "deprecated" },
- { name = "diffusers", extras = ["torch"], specifier = "==0.36.0" },
+ { name = "diffusers", extras = ["torch"], specifier = "==0.37.0" },
{ name = "dnspython" },
{ name = "dynamicprompts" },
{ name = "einops" },
@@ -3639,10 +3639,10 @@ dependencies = [
{ name = "torch", version = "2.7.1+cpu", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "extra == 'extra-8-invokeai-cpu' or (extra == 'extra-8-invokeai-cuda' and extra == 'extra-8-invokeai-rocm')" },
]
wheels = [
- { url = "https://download.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:4e0cbc165a472605d0c13da68ae22e84b17a6b815d5e600834777823e1bcb658" },
- { url = "https://download.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:9482adee074f60a45fd69892f7488281aadfda7836948c94b0a9b0caf55d1d67" },
- { url = "https://download.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b5fa7044bd82c6358e8229351c98070cf3a7bf4a6e89ea46352ae6c65745ef94" },
- { url = "https://download.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp312-cp312-win_amd64.whl", hash = "sha256:433cb4dbced7291f17064cea08ac1e5aebd02ec190e1c207d117ad62a8961f2b" },
+ { url = "https://download-r2.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:4e0cbc165a472605d0c13da68ae22e84b17a6b815d5e600834777823e1bcb658" },
+ { url = "https://download-r2.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:9482adee074f60a45fd69892f7488281aadfda7836948c94b0a9b0caf55d1d67" },
+ { url = "https://download-r2.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b5fa7044bd82c6358e8229351c98070cf3a7bf4a6e89ea46352ae6c65745ef94" },
+ { url = "https://download-r2.pytorch.org/whl/cpu/torchvision-0.22.1%2Bcpu-cp312-cp312-win_amd64.whl", hash = "sha256:433cb4dbced7291f17064cea08ac1e5aebd02ec190e1c207d117ad62a8961f2b" },
]
[[package]]
@@ -3663,10 +3663,10 @@ dependencies = [
{ name = "torch", version = "2.7.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "extra == 'extra-8-invokeai-cuda' or (extra == 'extra-8-invokeai-cpu' and extra == 'extra-8-invokeai-rocm')" },
]
wheels = [
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:92568ac46b13a8c88b61589800b1b9c4629be091ea7ce080fc6fc622e11e0915" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp311-cp311-win_amd64.whl", hash = "sha256:85ecd729c947151eccea502853be6efc2c0029dc26e6e5148e04684aed008390" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f64ef9bb91d71ab35d8384912a19f7419e35928685bc67544d58f45148334373" },
- { url = "https://download.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:650561ba326d21021243f5e064133dc62dc64d52f79623db5cd76637a9665f96" },
+ { url = "https://download-r2.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:92568ac46b13a8c88b61589800b1b9c4629be091ea7ce080fc6fc622e11e0915" },
+ { url = "https://download-r2.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp311-cp311-win_amd64.whl", hash = "sha256:85ecd729c947151eccea502853be6efc2c0029dc26e6e5148e04684aed008390" },
+ { url = "https://download-r2.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f64ef9bb91d71ab35d8384912a19f7419e35928685bc67544d58f45148334373" },
+ { url = "https://download-r2.pytorch.org/whl/cu128/torchvision-0.22.1%2Bcu128-cp312-cp312-win_amd64.whl", hash = "sha256:650561ba326d21021243f5e064133dc62dc64d52f79623db5cd76637a9665f96" },
]
[[package]]
@@ -3689,8 +3689,8 @@ dependencies = [
{ name = "torch", version = "2.7.1+rocm6.3", source = { registry = "https://download.pytorch.org/whl/rocm6.3" }, marker = "(extra == 'extra-8-invokeai-cpu' and extra == 'extra-8-invokeai-cuda') or (extra != 'extra-8-invokeai-cuda' and extra == 'extra-8-invokeai-rocm') or (extra != 'extra-8-invokeai-cpu' and extra == 'extra-8-invokeai-rocm')" },
]
wheels = [
- { url = "https://download.pytorch.org/whl/rocm6.3/torchvision-0.22.1%2Brocm6.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c150162c2e1de371e5a52c0eb4a98541f307e01716cfe5c850f25c7caa3d3fc4" },
- { url = "https://download.pytorch.org/whl/rocm6.3/torchvision-0.22.1%2Brocm6.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:0dce205fb04d9eb2f6feb74faf17cba9180aff70a8c8ac084912ce41b2dc0ab7" },
+ { url = "https://download-r2.pytorch.org/whl/rocm6.3/torchvision-0.22.1%2Brocm6.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c150162c2e1de371e5a52c0eb4a98541f307e01716cfe5c850f25c7caa3d3fc4" },
+ { url = "https://download-r2.pytorch.org/whl/rocm6.3/torchvision-0.22.1%2Brocm6.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:0dce205fb04d9eb2f6feb74faf17cba9180aff70a8c8ac084912ce41b2dc0ab7" },
]
[[package]]