mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 02:48:01 -05:00
Compare commits
193 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7adac4581a | ||
|
|
962db86cac | ||
|
|
d65ec0e250 | ||
|
|
7fdde5e84a | ||
|
|
895956bcfe | ||
|
|
f27d26cfa2 | ||
|
|
965bcba6c2 | ||
|
|
c9f2460ff2 | ||
|
|
5abbbf4b5b | ||
|
|
e66688edbf | ||
|
|
a519483f95 | ||
|
|
75c91604bb | ||
|
|
53bdaba7b6 | ||
|
|
f3f405ca77 | ||
|
|
dda69950a7 | ||
|
|
b2198b9fa7 | ||
|
|
02b91e8e7b | ||
|
|
09bf7c35eb | ||
|
|
deb9a65b3d | ||
|
|
5be9a7227c | ||
|
|
bb9f886bd4 | ||
|
|
46520946f8 | ||
|
|
830880a6fc | ||
|
|
63b94a8ff3 | ||
|
|
f12924a1e1 | ||
|
|
f8e51c86f5 | ||
|
|
c84a646735 | ||
|
|
b52f8121af | ||
|
|
05bed3fddd | ||
|
|
87ea20192f | ||
|
|
2f9c95c462 | ||
|
|
47cadbb48e | ||
|
|
23518b9830 | ||
|
|
94dcf391a6 | ||
|
|
e7a60c01ed | ||
|
|
4b54ccc29c | ||
|
|
c4183ec98c | ||
|
|
5a9cbe35e0 | ||
|
|
df18fe0298 | ||
|
|
e5591d145f | ||
|
|
371c187fc3 | ||
|
|
e982c95687 | ||
|
|
0eeb0dd67b | ||
|
|
28c74cbe38 | ||
|
|
7414f68acc | ||
|
|
a984462b80 | ||
|
|
c6c2567203 | ||
|
|
f05c8b909f | ||
|
|
73330a1308 | ||
|
|
6f568d48ed | ||
|
|
81a97f3796 | ||
|
|
3f9535d2f9 | ||
|
|
83bfbdcad4 | ||
|
|
729428084c | ||
|
|
523a932ecc | ||
|
|
21be7d7157 | ||
|
|
a29fb18c0b | ||
|
|
aed446f013 | ||
|
|
e81c9b0d6e | ||
|
|
89f457c486 | ||
|
|
30ed09a36e | ||
|
|
3334652acc | ||
|
|
e83536f396 | ||
|
|
97593f95f6 | ||
|
|
7f14cee17e | ||
|
|
0a836d6fc1 | ||
|
|
54e781d5bb | ||
|
|
aa71d0c817 | ||
|
|
07313e429d | ||
|
|
bad5023238 | ||
|
|
73a0d2c06c | ||
|
|
918e9c8ccc | ||
|
|
1e388e9ca4 | ||
|
|
5b84d45932 | ||
|
|
dc3f1184b2 | ||
|
|
87438bcad7 | ||
|
|
afd894fd04 | ||
|
|
df305c0b99 | ||
|
|
deecb7f3c3 | ||
|
|
dd5f353465 | ||
|
|
a8759ea0a6 | ||
|
|
3ff529c718 | ||
|
|
3b0fecafb0 | ||
|
|
099011000f | ||
|
|
155daa3137 | ||
|
|
c493e223cf | ||
|
|
124ca23f8b | ||
|
|
a8023cbcb6 | ||
|
|
b733d3897e | ||
|
|
ef95b37ace | ||
|
|
4feff5a185 | ||
|
|
6c8dc32d5c | ||
|
|
e5da808b2f | ||
|
|
7d3434da62 | ||
|
|
4cc70d9f16 | ||
|
|
7988bc1a59 | ||
|
|
1756d885f6 | ||
|
|
9ec4d968aa | ||
|
|
76c09301f9 | ||
|
|
1cf8749754 | ||
|
|
5d6c468833 | ||
|
|
80b3f44ae8 | ||
|
|
c77c12aa1d | ||
|
|
731992c5ec | ||
|
|
c259899bf4 | ||
|
|
f62b9ad919 | ||
|
|
57533657f9 | ||
|
|
e35537e60a | ||
|
|
a89d68b93a | ||
|
|
59a8c0d441 | ||
|
|
d5d08f6569 | ||
|
|
8a4282365e | ||
|
|
b9c7bc8b0e | ||
|
|
0f45ee04a2 | ||
|
|
839a791509 | ||
|
|
f03a2bf03f | ||
|
|
4136817d30 | ||
|
|
7f0452173b | ||
|
|
8e46b03f09 | ||
|
|
9045237bfb | ||
|
|
58959a18cb | ||
|
|
e51588197f | ||
|
|
c5319ac48c | ||
|
|
50657650c2 | ||
|
|
f657c95e45 | ||
|
|
2d3a2f9842 | ||
|
|
008837642e | ||
|
|
1a84a2fb7e | ||
|
|
b87febcf4c | ||
|
|
95a9bb6c7b | ||
|
|
93ec9a048f | ||
|
|
ec6cea6705 | ||
|
|
bfbcaad8c2 | ||
|
|
3694158434 | ||
|
|
814fb939c0 | ||
|
|
4cb73e6c19 | ||
|
|
e8aed67cf1 | ||
|
|
f56dd01419 | ||
|
|
ed9cd6a7a2 | ||
|
|
c44c28ec4c | ||
|
|
e1f7359171 | ||
|
|
3e97d49a69 | ||
|
|
c12585e52d | ||
|
|
b39774a57c | ||
|
|
8988539cd5 | ||
|
|
88c68e8016 | ||
|
|
5073c7d0a3 | ||
|
|
84e86819b8 | ||
|
|
440e3e01ac | ||
|
|
c2302f7ab1 | ||
|
|
2594eed1af | ||
|
|
e8db1c1d5a | ||
|
|
d5c5e8e8ed | ||
|
|
518a7c941f | ||
|
|
bdafe53f2e | ||
|
|
cf0cbaf0ae | ||
|
|
ac6fc6eccb | ||
|
|
07d65b8fd1 | ||
|
|
3c2e6378ca | ||
|
|
445f122f37 | ||
|
|
8c0ee9c48f | ||
|
|
0eb237ac64 | ||
|
|
9aa04f0bea | ||
|
|
76e2f41ec7 | ||
|
|
1353c3301a | ||
|
|
bf209663ac | ||
|
|
04b96dd7b4 | ||
|
|
79b2c68853 | ||
|
|
aac456527e | ||
|
|
c88b835373 | ||
|
|
9da116fd3d | ||
|
|
201d7f1fdb | ||
|
|
17a5b1bd28 | ||
|
|
a409aec00f | ||
|
|
b0593eda92 | ||
|
|
9acb24914f | ||
|
|
ab4433da2f | ||
|
|
d4423aa16f | ||
|
|
1f6430c1b0 | ||
|
|
8e28888bc4 | ||
|
|
b6b21dbcbf | ||
|
|
7b48ef2264 | ||
|
|
9c542ed655 | ||
|
|
4c02ba908a | ||
|
|
82293ae3b2 | ||
|
|
f1fde792ee | ||
|
|
e82393f7ed | ||
|
|
d5211a8088 | ||
|
|
3b095b5945 | ||
|
|
34959ef573 | ||
|
|
7f10f8f96a | ||
|
|
f2689598c0 | ||
|
|
551c78d9f3 |
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
@@ -1,12 +1,12 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
|
||||
|
||||
# nodes
|
||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername
|
||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername @jazzhaiku
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
@@ -22,7 +22,7 @@
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
|
||||
# generation, model management, postprocessing
|
||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername
|
||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername @jazzhaiku
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
|
||||
7
.github/workflows/frontend-checks.yml
vendored
7
.github/workflows/frontend-checks.yml
vendored
@@ -44,7 +44,12 @@ jobs:
|
||||
- name: check for changed frontend files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
|
||||
7
.github/workflows/frontend-tests.yml
vendored
7
.github/workflows/frontend-tests.yml
vendored
@@ -44,7 +44,12 @@ jobs:
|
||||
- name: check for changed frontend files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
frontend:
|
||||
|
||||
9
.github/workflows/python-checks.yml
vendored
9
.github/workflows/python-checks.yml
vendored
@@ -43,7 +43,12 @@ jobs:
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
@@ -62,7 +67,7 @@ jobs:
|
||||
|
||||
- name: install ruff
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: pip install ruff==0.6.0
|
||||
run: pip install ruff==0.9.9
|
||||
shell: bash
|
||||
|
||||
- name: ruff check
|
||||
|
||||
7
.github/workflows/python-tests.yml
vendored
7
.github/workflows/python-tests.yml
vendored
@@ -77,7 +77,12 @@ jobs:
|
||||
- name: check for changed python files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
|
||||
7
.github/workflows/typegen-checks.yml
vendored
7
.github/workflows/typegen-checks.yml
vendored
@@ -42,7 +42,12 @@ jobs:
|
||||
- name: check for changed files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
|
||||
# See:
|
||||
# - CVE-2025-30066
|
||||
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
|
||||
# - https://github.com/tj-actions/changed-files/issues/2463
|
||||
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
|
||||
with:
|
||||
files_yaml: |
|
||||
src:
|
||||
|
||||
@@ -36,6 +36,7 @@ from invokeai.app.services.style_preset_images.style_preset_images_disk import S
|
||||
from invokeai.app.services.style_preset_records.style_preset_records_sqlite import SqliteStylePresetRecordsStorage
|
||||
from invokeai.app.services.urls.urls_default import LocalUrlService
|
||||
from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_disk import WorkflowThumbnailFileStorageDisk
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
@@ -83,6 +84,7 @@ class ApiDependencies:
|
||||
|
||||
model_images_folder = config.models_path
|
||||
style_presets_folder = config.style_presets_path
|
||||
workflow_thumbnails_folder = config.workflow_thumbnails_path
|
||||
|
||||
db = init_db(config=config, logger=logger, image_files=image_files)
|
||||
|
||||
@@ -120,6 +122,7 @@ class ApiDependencies:
|
||||
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
||||
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
|
||||
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
|
||||
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
|
||||
|
||||
services = InvocationServices(
|
||||
board_image_records=board_image_records,
|
||||
@@ -147,6 +150,7 @@ class ApiDependencies:
|
||||
conditioning=conditioning,
|
||||
style_preset_records=style_preset_records,
|
||||
style_preset_image_files=style_preset_image_files,
|
||||
workflow_thumbnails=workflow_thumbnails,
|
||||
)
|
||||
|
||||
ApiDependencies.invoker = Invoker(services)
|
||||
|
||||
124
invokeai/app/api/extract_metadata_from_image.py
Normal file
124
invokeai/app/api/extract_metadata_from_image.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutIDValidator
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractedMetadata:
|
||||
invokeai_metadata: str | None
|
||||
invokeai_workflow: str | None
|
||||
invokeai_graph: str | None
|
||||
|
||||
|
||||
def extract_metadata_from_image(
|
||||
pil_image: Image.Image,
|
||||
invokeai_metadata_override: str | None,
|
||||
invokeai_workflow_override: str | None,
|
||||
invokeai_graph_override: str | None,
|
||||
logger: logging.Logger,
|
||||
) -> ExtractedMetadata:
|
||||
"""
|
||||
Extracts the "invokeai_metadata", "invokeai_workflow", and "invokeai_graph" data embedded in the PIL Image.
|
||||
|
||||
These items are stored as stringified JSON in the image file's metadata, so we need to do some parsing to validate
|
||||
them. Once parsed, the values are returned as they came (as strings), or None if they are not present or invalid.
|
||||
|
||||
In some situations, we may prefer to override the values extracted from the image file with some other values.
|
||||
|
||||
For example, when uploading an image via API, the client can optionally provide the metadata directly in the request,
|
||||
as opposed to embedding it in the image file. In this case, the client-provided metadata will be used instead of the
|
||||
metadata embedded in the image file.
|
||||
|
||||
Args:
|
||||
pil_image: The PIL Image object.
|
||||
invokeai_metadata_override: The metadata override provided by the client.
|
||||
invokeai_workflow_override: The workflow override provided by the client.
|
||||
invokeai_graph_override: The graph override provided by the client.
|
||||
logger: The logger to use for debug logging.
|
||||
|
||||
Returns:
|
||||
ExtractedMetadata: The extracted metadata, workflow, and graph.
|
||||
"""
|
||||
|
||||
# The fallback value for metadata is None.
|
||||
stringified_metadata: str | None = None
|
||||
|
||||
# Use the metadata override if provided, else attempt to extract it from the image file.
|
||||
metadata_raw = invokeai_metadata_override or pil_image.info.get("invokeai_metadata", None)
|
||||
|
||||
# If the metadata is present in the image file, we will attempt to parse it as JSON. When we create images,
|
||||
# we always store metadata as a stringified JSON dict. So, we expect it to be a string here.
|
||||
if isinstance(metadata_raw, str):
|
||||
try:
|
||||
# Must be a JSON string
|
||||
metadata_parsed = json.loads(metadata_raw)
|
||||
# Must be a dict
|
||||
if isinstance(metadata_parsed, dict):
|
||||
# Looks good, overwrite the fallback value
|
||||
stringified_metadata = metadata_raw
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to parse metadata for uploaded image, {e}")
|
||||
pass
|
||||
|
||||
# We expect the workflow, if embedded in the image, to be a JSON-stringified WorkflowWithoutID. We will store it
|
||||
# as a string.
|
||||
workflow_raw: str | None = invokeai_workflow_override or pil_image.info.get("invokeai_workflow", None)
|
||||
|
||||
# The fallback value for workflow is None.
|
||||
stringified_workflow: str | None = None
|
||||
|
||||
# If the workflow is present in the image file, we will attempt to parse it as JSON. When we create images, we
|
||||
# always store workflows as a stringified JSON WorkflowWithoutID. So, we expect it to be a string here.
|
||||
if isinstance(workflow_raw, str):
|
||||
try:
|
||||
# Validate the workflow JSON before storing it
|
||||
WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||
# Looks good, overwrite the fallback value
|
||||
stringified_workflow = workflow_raw
|
||||
except Exception:
|
||||
logger.debug("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
# We expect the workflow, if embedded in the image, to be a JSON-stringified Graph. We will store it as a
|
||||
# string.
|
||||
graph_raw: str | None = invokeai_graph_override or pil_image.info.get("invokeai_graph", None)
|
||||
|
||||
# The fallback value for graph is None.
|
||||
stringified_graph: str | None = None
|
||||
|
||||
# If the graph is present in the image file, we will attempt to parse it as JSON. When we create images, we
|
||||
# always store graphs as a stringified JSON Graph. So, we expect it to be a string here.
|
||||
if isinstance(graph_raw, str):
|
||||
try:
|
||||
# TODO(psyche): Due to pydantic's handling of None values, it is possible for the graph to fail validation,
|
||||
# even if it is a direct dump of a valid graph. Node fields in the graph are allowed to have be unset if
|
||||
# they have incoming connections, but something about the ser/de process cannot adequately handle this.
|
||||
#
|
||||
# In lieu of fixing the graph validation, we will just do a simple check here to see if the graph is dict
|
||||
# with the correct keys. This is not a perfect solution, but it should be good enough for now.
|
||||
|
||||
# FIX ME: Validate the graph JSON before storing it
|
||||
# Graph.model_validate_json(graph_raw)
|
||||
|
||||
# Crappy workaround to validate JSON
|
||||
graph_parsed = json.loads(graph_raw)
|
||||
if not isinstance(graph_parsed, dict):
|
||||
raise ValueError("Not a dict")
|
||||
if not isinstance(graph_parsed.get("nodes", None), dict):
|
||||
raise ValueError("'nodes' is not a dict")
|
||||
if not isinstance(graph_parsed.get("edges", None), list):
|
||||
raise ValueError("'edges' is not a list")
|
||||
|
||||
# Looks good, overwrite the fallback value
|
||||
stringified_graph = graph_raw
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to parse graph for uploaded image, {e}")
|
||||
pass
|
||||
|
||||
return ExtractedMetadata(
|
||||
invokeai_metadata=stringified_metadata, invokeai_workflow=stringified_workflow, invokeai_graph=stringified_graph
|
||||
)
|
||||
@@ -6,9 +6,10 @@ from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request,
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, JsonValue
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ImageCategory,
|
||||
@@ -45,18 +46,16 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
metadata: Optional[JsonValue] = Body(
|
||||
default=None, description="The metadata to associate with the image", embed=True
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||
embed=True,
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type or not file.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
_metadata = None
|
||||
_workflow = None
|
||||
_graph = None
|
||||
|
||||
contents = await file.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
@@ -67,30 +66,13 @@ async def upload_image(
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
# TODO: retain non-invokeai metadata on upload?
|
||||
# attempt to parse metadata from image
|
||||
metadata_raw = metadata if isinstance(metadata, str) else pil_image.info.get("invokeai_metadata", None)
|
||||
if isinstance(metadata_raw, str):
|
||||
_metadata = metadata_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to parse workflow from image
|
||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
||||
if isinstance(workflow_raw, str):
|
||||
_workflow = workflow_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to extract graph from image
|
||||
graph_raw = pil_image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph_raw, str):
|
||||
_graph = graph_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse graph for uploaded image")
|
||||
pass
|
||||
extracted_metadata = extract_metadata_from_image(
|
||||
pil_image=pil_image,
|
||||
invokeai_metadata_override=metadata,
|
||||
invokeai_workflow_override=None,
|
||||
invokeai_graph_override=None,
|
||||
logger=ApiDependencies.invoker.services.logger,
|
||||
)
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.create(
|
||||
@@ -99,9 +81,9 @@ async def upload_image(
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
metadata=_metadata,
|
||||
workflow=_workflow,
|
||||
graph=_graph,
|
||||
metadata=extracted_metadata.invokeai_metadata,
|
||||
workflow=extracted_metadata.invokeai_workflow,
|
||||
graph=extracted_metadata.invokeai_graph,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import io
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, Query
|
||||
from fastapi import APIRouter, Body, File, HTTPException, Path, Query, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
@@ -10,11 +14,14 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowCategory,
|
||||
WorkflowNotFoundError,
|
||||
WorkflowRecordDTO,
|
||||
WorkflowRecordListItemDTO,
|
||||
WorkflowRecordListItemWithThumbnailDTO,
|
||||
WorkflowRecordOrderBy,
|
||||
WorkflowRecordWithThumbnailDTO,
|
||||
WorkflowWithoutID,
|
||||
)
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_common import WorkflowThumbnailFileNotFoundException
|
||||
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
||||
|
||||
|
||||
@@ -22,15 +29,17 @@ workflows_router = APIRouter(prefix="/v1/workflows", tags=["workflows"])
|
||||
"/i/{workflow_id}",
|
||||
operation_id="get_workflow",
|
||||
responses={
|
||||
200: {"model": WorkflowRecordDTO},
|
||||
200: {"model": WorkflowRecordWithThumbnailDTO},
|
||||
},
|
||||
)
|
||||
async def get_workflow(
|
||||
workflow_id: str = Path(description="The workflow to get"),
|
||||
) -> WorkflowRecordDTO:
|
||||
) -> WorkflowRecordWithThumbnailDTO:
|
||||
"""Gets a workflow"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
thumbnail_url = ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow_id)
|
||||
workflow = ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
return WorkflowRecordWithThumbnailDTO(thumbnail_url=thumbnail_url, **workflow.model_dump())
|
||||
except WorkflowNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
@@ -57,6 +66,11 @@ async def delete_workflow(
|
||||
workflow_id: str = Path(description="The workflow to delete"),
|
||||
) -> None:
|
||||
"""Deletes a workflow"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
|
||||
except WorkflowThumbnailFileNotFoundException:
|
||||
# It's OK if the workflow has no thumbnail file. We can still delete the workflow.
|
||||
pass
|
||||
ApiDependencies.invoker.services.workflow_records.delete(workflow_id)
|
||||
|
||||
|
||||
@@ -78,7 +92,7 @@ async def create_workflow(
|
||||
"/",
|
||||
operation_id="list_workflows",
|
||||
responses={
|
||||
200: {"model": PaginatedResults[WorkflowRecordListItemDTO]},
|
||||
200: {"model": PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]},
|
||||
},
|
||||
)
|
||||
async def list_workflows(
|
||||
@@ -88,10 +102,158 @@ async def list_workflows(
|
||||
default=WorkflowRecordOrderBy.Name, description="The attribute to order by"
|
||||
),
|
||||
direction: SQLiteDirection = Query(default=SQLiteDirection.Ascending, description="The direction to order by"),
|
||||
category: WorkflowCategory = Query(default=WorkflowCategory.User, description="The category of workflow to get"),
|
||||
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories of workflow to get"),
|
||||
tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"),
|
||||
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||
) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]:
|
||||
"""Gets a page of workflows"""
|
||||
return ApiDependencies.invoker.services.workflow_records.get_many(
|
||||
order_by=order_by, direction=direction, page=page, per_page=per_page, query=query, category=category
|
||||
workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = []
|
||||
workflows = ApiDependencies.invoker.services.workflow_records.get_many(
|
||||
order_by=order_by,
|
||||
direction=direction,
|
||||
page=page,
|
||||
per_page=per_page,
|
||||
query=query,
|
||||
categories=categories,
|
||||
tags=tags,
|
||||
has_been_opened=has_been_opened,
|
||||
)
|
||||
for workflow in workflows.items:
|
||||
workflows_with_thumbnails.append(
|
||||
WorkflowRecordListItemWithThumbnailDTO(
|
||||
thumbnail_url=ApiDependencies.invoker.services.workflow_thumbnails.get_url(workflow.workflow_id),
|
||||
**workflow.model_dump(),
|
||||
)
|
||||
)
|
||||
return PaginatedResults[WorkflowRecordListItemWithThumbnailDTO](
|
||||
items=workflows_with_thumbnails,
|
||||
total=workflows.total,
|
||||
page=workflows.page,
|
||||
pages=workflows.pages,
|
||||
per_page=workflows.per_page,
|
||||
)
|
||||
|
||||
|
||||
@workflows_router.put(
|
||||
"/i/{workflow_id}/thumbnail",
|
||||
operation_id="set_workflow_thumbnail",
|
||||
responses={
|
||||
200: {"model": WorkflowRecordDTO},
|
||||
},
|
||||
)
|
||||
async def set_workflow_thumbnail(
|
||||
workflow_id: str = Path(description="The workflow to update"),
|
||||
image: UploadFile = File(description="The image file to upload"),
|
||||
):
|
||||
"""Sets a workflow's thumbnail image"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
except WorkflowNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
if not image.content_type or not image.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
contents = await image.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_thumbnails.save(workflow_id, pil_image)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@workflows_router.delete(
|
||||
"/i/{workflow_id}/thumbnail",
|
||||
operation_id="delete_workflow_thumbnail",
|
||||
responses={
|
||||
200: {"model": WorkflowRecordDTO},
|
||||
},
|
||||
)
|
||||
async def delete_workflow_thumbnail(
|
||||
workflow_id: str = Path(description="The workflow to update"),
|
||||
):
|
||||
"""Removes a workflow's thumbnail image"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_records.get(workflow_id)
|
||||
except WorkflowNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.workflow_thumbnails.delete(workflow_id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@workflows_router.get(
|
||||
"/i/{workflow_id}/thumbnail",
|
||||
operation_id="get_workflow_thumbnail",
|
||||
responses={
|
||||
200: {
|
||||
"description": "The workflow thumbnail was fetched successfully",
|
||||
},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "The workflow thumbnail could not be found"},
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
async def get_workflow_thumbnail(
|
||||
workflow_id: str = Path(description="The id of the workflow thumbnail to get"),
|
||||
) -> FileResponse:
|
||||
"""Gets a workflow's thumbnail image"""
|
||||
|
||||
try:
|
||||
path = ApiDependencies.invoker.services.workflow_thumbnails.get_path(workflow_id)
|
||||
|
||||
response = FileResponse(
|
||||
path,
|
||||
media_type="image/png",
|
||||
filename=workflow_id + ".png",
|
||||
content_disposition_type="inline",
|
||||
)
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@workflows_router.get("/counts_by_tag", operation_id="get_counts_by_tag")
|
||||
async def get_counts_by_tag(
|
||||
tags: list[str] = Query(description="The tags to get counts for"),
|
||||
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories to include"),
|
||||
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||
) -> dict[str, int]:
|
||||
"""Counts workflows by tag"""
|
||||
|
||||
return ApiDependencies.invoker.services.workflow_records.counts_by_tag(
|
||||
tags=tags, categories=categories, has_been_opened=has_been_opened
|
||||
)
|
||||
|
||||
|
||||
@workflows_router.get("/counts_by_category", operation_id="counts_by_category")
|
||||
async def counts_by_category(
|
||||
categories: list[WorkflowCategory] = Query(description="The categories to include"),
|
||||
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
|
||||
) -> dict[str, int]:
|
||||
"""Counts workflows by category"""
|
||||
|
||||
return ApiDependencies.invoker.services.workflow_records.counts_by_category(
|
||||
categories=categories, has_been_opened=has_been_opened
|
||||
)
|
||||
|
||||
|
||||
@workflows_router.put(
|
||||
"/i/{workflow_id}/opened_at",
|
||||
operation_id="update_opened_at",
|
||||
)
|
||||
async def update_opened_at(
|
||||
workflow_id: str = Path(description="The workflow to update"),
|
||||
) -> None:
|
||||
"""Updates the opened_at field of a workflow"""
|
||||
ApiDependencies.invoker.services.workflow_records.update_opened_at(workflow_id)
|
||||
|
||||
@@ -417,7 +417,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring")
|
||||
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
|
||||
@@ -40,10 +40,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"compel",
|
||||
title="Prompt",
|
||||
title="Prompt - SD1.5",
|
||||
tags=["prompt", "compel"],
|
||||
category="conditioning",
|
||||
version="1.2.0",
|
||||
version="1.2.1",
|
||||
)
|
||||
class CompelInvocation(BaseInvocation):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -233,10 +233,10 @@ class SDXLPromptInvocationBase:
|
||||
|
||||
@invocation(
|
||||
"sdxl_compel_prompt",
|
||||
title="SDXL Prompt",
|
||||
title="Prompt - SDXL",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.2.0",
|
||||
version="1.2.1",
|
||||
)
|
||||
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -327,10 +327,10 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
|
||||
@invocation(
|
||||
"sdxl_refiner_compel_prompt",
|
||||
title="SDXL Refiner Prompt",
|
||||
title="Prompt - SDXL Refiner",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.1.1",
|
||||
version="1.1.2",
|
||||
)
|
||||
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@@ -376,10 +376,10 @@ class CLIPSkipInvocationOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"clip_skip",
|
||||
title="CLIP Skip",
|
||||
title="Apply CLIP Skip - SD1.5, SDXL",
|
||||
tags=["clipskip", "clip", "skip"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class CLIPSkipInvocation(BaseInvocation):
|
||||
"""Skip layers in clip text_encoder model."""
|
||||
@@ -513,7 +513,7 @@ def log_tokenization_for_text(
|
||||
usedTokens += 1
|
||||
|
||||
if usedTokens > 0:
|
||||
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
|
||||
print(f"\n>> [TOKENLOG] Tokens {display_label or ''} ({usedTokens}):")
|
||||
print(f"{tokenized}\x1b[0m")
|
||||
|
||||
if discarded != "":
|
||||
|
||||
@@ -87,7 +87,7 @@ class ControlOutput(BaseInvocationOutput):
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
|
||||
@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
"""
|
||||
Invoke-managed custom node loader. See README.md for more information.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
loaded_packs: list[str] = []
|
||||
failed_packs: list[str] = []
|
||||
|
||||
custom_nodes_dir = Path(__file__).parent
|
||||
|
||||
for d in custom_nodes_dir.iterdir():
|
||||
# skip files
|
||||
if not d.is_dir():
|
||||
continue
|
||||
|
||||
# skip hidden directories
|
||||
if d.name.startswith("_") or d.name.startswith("."):
|
||||
continue
|
||||
|
||||
# skip directories without an `__init__.py`
|
||||
init = d / "__init__.py"
|
||||
if not init.exists():
|
||||
continue
|
||||
|
||||
module_name = init.parent.stem
|
||||
|
||||
# skip if already imported
|
||||
if module_name in globals():
|
||||
continue
|
||||
|
||||
# load the module, appending adding a suffix to identify it as a custom node pack
|
||||
spec = spec_from_file_location(module_name, init.absolute())
|
||||
|
||||
if spec is None or spec.loader is None:
|
||||
logger.warn(f"Could not load {init}")
|
||||
continue
|
||||
|
||||
logger.info(f"Loading node pack {module_name}")
|
||||
|
||||
try:
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
loaded_packs.append(module_name)
|
||||
except Exception:
|
||||
failed_packs.append(module_name)
|
||||
full_error = traceback.format_exc()
|
||||
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
|
||||
|
||||
del init, module_name
|
||||
|
||||
loaded_count = len(loaded_packs)
|
||||
if loaded_count > 0:
|
||||
logger.info(
|
||||
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_dir}: {', '.join(loaded_packs)}"
|
||||
)
|
||||
@@ -127,10 +127,10 @@ def get_scheduler(
|
||||
|
||||
@invocation(
|
||||
"denoise_latents",
|
||||
title="Denoise Latents",
|
||||
title="Denoise - SD1.5, SDXL",
|
||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.5.3",
|
||||
version="1.5.4",
|
||||
)
|
||||
class DenoiseLatentsInvocation(BaseInvocation):
|
||||
"""Denoises noisy latents to decodable images"""
|
||||
|
||||
@@ -57,6 +57,8 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
CLIPGEmbedModel = "CLIPGEmbedModelField"
|
||||
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
||||
ControlLoRAModel = "ControlLoRAModelField"
|
||||
SigLipModel = "SigLipModelField"
|
||||
FluxReduxModel = "FluxReduxModelField"
|
||||
# endregion
|
||||
|
||||
# region Misc Field Types
|
||||
@@ -152,6 +154,7 @@ class FieldDescriptions:
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
spandrel_image_to_image_model = "Image-to-Image model"
|
||||
vllm_model = "VLLM model"
|
||||
lora_weight = "The weight at which the LoRA is applied to each model"
|
||||
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
|
||||
raw_prompt = "Raw prompt text (no parsing)"
|
||||
@@ -201,6 +204,7 @@ class FieldDescriptions:
|
||||
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
|
||||
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
|
||||
instantx_control_mode = "The control mode for InstantX ControlNet union models. Ignored for other ControlNet models. The standard mapping is: canny (0), tile (1), depth (2), blur (3), pose (4), gray (5), low quality (6). Negative values will be treated as 'None'."
|
||||
flux_redux_conditioning = "FLUX Redux conditioning tensor"
|
||||
|
||||
|
||||
class ImageField(BaseModel):
|
||||
@@ -259,6 +263,17 @@ class FluxConditioningField(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class FluxReduxConditioningField(BaseModel):
|
||||
"""A FLUX Redux conditioning tensor primitive value"""
|
||||
|
||||
conditioning: TensorField = Field(description="The Redux image conditioning tensor.")
|
||||
mask: Optional[TensorField] = Field(
|
||||
default=None,
|
||||
description="The mask associated with this conditioning tensor. Excluded regions should be set to False, "
|
||||
"included regions should be set to True.",
|
||||
)
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
|
||||
@@ -21,10 +21,10 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"flux_control_lora_loader",
|
||||
title="Flux Control LoRA",
|
||||
title="Control LoRA - FLUX",
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxControlLoRALoaderInvocation(BaseInvocation):
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
FluxReduxConditioningField,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
@@ -46,7 +47,7 @@ from invokeai.backend.flux.sampling_utils import (
|
||||
pack,
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
|
||||
from invokeai.backend.flux.text_conditioning import FluxReduxConditioning, FluxTextConditioning
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
@@ -61,7 +62,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.2.2",
|
||||
version="3.2.3",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@@ -103,6 +104,11 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
||||
input=Input.Connection,
|
||||
)
|
||||
redux_conditioning: FluxReduxConditioningField | list[FluxReduxConditioningField] | None = InputField(
|
||||
default=None,
|
||||
description="FLUX Redux conditioning tensor.",
|
||||
input=Input.Connection,
|
||||
)
|
||||
cfg_scale: float | list[float] = InputField(default=1.0, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
||||
cfg_scale_start_step: int = InputField(
|
||||
default=0,
|
||||
@@ -190,11 +196,23 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
redux_conditionings: list[FluxReduxConditioning] = self._load_redux_conditioning(
|
||||
context=context,
|
||||
redux_cond_field=self.redux_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
|
||||
pos_text_conditionings, img_seq_len=packed_h * packed_w
|
||||
text_conditioning=pos_text_conditionings,
|
||||
redux_conditioning=redux_conditionings,
|
||||
img_seq_len=packed_h * packed_w,
|
||||
)
|
||||
neg_regional_prompting_extension = (
|
||||
RegionalPromptingExtension.from_text_conditioning(neg_text_conditionings, img_seq_len=packed_h * packed_w)
|
||||
RegionalPromptingExtension.from_text_conditioning(
|
||||
text_conditioning=neg_text_conditionings, redux_conditioning=[], img_seq_len=packed_h * packed_w
|
||||
)
|
||||
if neg_text_conditionings
|
||||
else None
|
||||
)
|
||||
@@ -400,6 +418,42 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
return text_conditionings
|
||||
|
||||
def _load_redux_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
redux_cond_field: FluxReduxConditioningField | list[FluxReduxConditioningField] | None,
|
||||
packed_height: int,
|
||||
packed_width: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
) -> list[FluxReduxConditioning]:
|
||||
# Normalize to a list of FluxReduxConditioningFields.
|
||||
if redux_cond_field is None:
|
||||
return []
|
||||
|
||||
redux_cond_list = (
|
||||
[redux_cond_field] if isinstance(redux_cond_field, FluxReduxConditioningField) else redux_cond_field
|
||||
)
|
||||
|
||||
redux_conditionings: list[FluxReduxConditioning] = []
|
||||
for redux_cond_field in redux_cond_list:
|
||||
# Load the Redux conditioning tensor.
|
||||
redux_cond_data = context.tensors.load(redux_cond_field.conditioning.tensor_name)
|
||||
redux_cond_data.to(device=device, dtype=dtype)
|
||||
|
||||
# Load the mask, if provided.
|
||||
mask: Optional[torch.Tensor] = None
|
||||
if redux_cond_field.mask is not None:
|
||||
mask = context.tensors.load(redux_cond_field.mask.tensor_name)
|
||||
mask = mask.to(device=device)
|
||||
mask = RegionalPromptingExtension.preprocess_regional_prompt_mask(
|
||||
mask, packed_height, packed_width, dtype, device
|
||||
)
|
||||
|
||||
redux_conditionings.append(FluxReduxConditioning(redux_embeddings=redux_cond_data, mask=mask))
|
||||
|
||||
return redux_conditionings
|
||||
|
||||
@classmethod
|
||||
def prep_cfg_scale(
|
||||
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
||||
|
||||
@@ -37,10 +37,10 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
title="Main Model - FLUX",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.5",
|
||||
version="1.0.6",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
|
||||
119
invokeai/app/invocations/flux_redux.py
Normal file
119
invokeai/app/invocations/flux_redux.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxReduxConditioningField,
|
||||
InputField,
|
||||
OutputField,
|
||||
TensorField,
|
||||
UIType,
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType
|
||||
from invokeai.backend.model_manager.starter_models import siglip
|
||||
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation_output("flux_redux_output")
|
||||
class FluxReduxOutput(BaseInvocationOutput):
|
||||
"""The conditioning output of a FLUX Redux invocation."""
|
||||
|
||||
redux_cond: FluxReduxConditioningField = OutputField(
|
||||
description=FieldDescriptions.flux_redux_conditioning, title="Conditioning"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_redux",
|
||||
title="FLUX Redux",
|
||||
tags=["ip_adapter", "control"],
|
||||
category="ip_adapter",
|
||||
version="2.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxReduxInvocation(BaseInvocation):
|
||||
"""Runs a FLUX Redux model to generate a conditioning tensor."""
|
||||
|
||||
image: ImageField = InputField(description="The FLUX Redux image prompt.")
|
||||
mask: Optional[TensorField] = InputField(
|
||||
default=None,
|
||||
description="The bool mask associated with this FLUX Redux image prompt. Excluded regions should be set to "
|
||||
"False, included regions should be set to True.",
|
||||
)
|
||||
redux_model: ModelIdentifierField = InputField(
|
||||
description="The FLUX Redux model to use.",
|
||||
title="FLUX Redux Model",
|
||||
ui_type=UIType.FluxReduxModel,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxReduxOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
|
||||
encoded_x = self._siglip_encode(context, image)
|
||||
redux_conditioning = self._flux_redux_encode(context, encoded_x)
|
||||
|
||||
tensor_name = context.tensors.save(redux_conditioning)
|
||||
return FluxReduxOutput(
|
||||
redux_cond=FluxReduxConditioningField(conditioning=TensorField(tensor_name=tensor_name), mask=self.mask)
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def _siglip_encode(self, context: InvocationContext, image: Image.Image) -> torch.Tensor:
|
||||
siglip_model_config = self._get_siglip_model(context)
|
||||
with context.models.load(siglip_model_config.key).model_on_device() as (_, siglip_pipeline):
|
||||
assert isinstance(siglip_pipeline, SigLipPipeline)
|
||||
return siglip_pipeline.encode_image(
|
||||
x=image, device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def _flux_redux_encode(self, context: InvocationContext, encoded_x: torch.Tensor) -> torch.Tensor:
|
||||
with context.models.load(self.redux_model).model_on_device() as (_, flux_redux):
|
||||
assert isinstance(flux_redux, FluxReduxModel)
|
||||
dtype = next(flux_redux.parameters()).dtype
|
||||
encoded_x = encoded_x.to(dtype=dtype)
|
||||
return flux_redux(encoded_x)
|
||||
|
||||
def _get_siglip_model(self, context: InvocationContext) -> AnyModelConfig:
|
||||
siglip_models = context.models.search_by_attrs(name=siglip.name, base=BaseModelType.Any, type=ModelType.SigLIP)
|
||||
|
||||
if not len(siglip_models) > 0:
|
||||
context.logger.warning(
|
||||
f"The SigLIP model required by FLUX Redux ({siglip.name}) is not installed. Downloading and installing now. This may take a while."
|
||||
)
|
||||
|
||||
# TODO(psyche): Can the probe reliably determine the type of the model? Just hardcoding it bc I don't want to experiment now
|
||||
config_overrides = ModelRecordChanges(name=siglip.name, type=ModelType.SigLIP)
|
||||
|
||||
# Queue the job
|
||||
job = context._services.model_manager.install.heuristic_import(siglip.source, config=config_overrides)
|
||||
|
||||
# Wait for up to 10 minutes - model is ~3.5GB
|
||||
context._services.model_manager.install.wait_for_job(job, timeout=600)
|
||||
|
||||
siglip_models = context.models.search_by_attrs(
|
||||
name=siglip.name,
|
||||
base=BaseModelType.Any,
|
||||
type=ModelType.SigLIP,
|
||||
)
|
||||
|
||||
if len(siglip_models) == 0:
|
||||
context.logger.error("Error while fetching SigLIP for FLUX Redux")
|
||||
assert len(siglip_models) == 1
|
||||
|
||||
return siglip_models[0]
|
||||
@@ -26,10 +26,10 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
|
||||
|
||||
@invocation(
|
||||
"flux_text_encoder",
|
||||
title="FLUX Text Encoding",
|
||||
title="Prompt - FLUX",
|
||||
tags=["prompt", "conditioning", "flux"],
|
||||
category="conditioning",
|
||||
version="1.1.1",
|
||||
version="1.1.2",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
@@ -22,10 +22,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"flux_vae_decode",
|
||||
title="FLUX Latents to Image",
|
||||
title="Latents to Image - FLUX",
|
||||
tags=["latents", "image", "vae", "l2i", "flux"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
@@ -19,10 +19,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"flux_vae_encode",
|
||||
title="FLUX Image to Latents",
|
||||
title="Image to Latents - FLUX",
|
||||
tags=["latents", "image", "vae", "i2l", "flux"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
"""Encodes an image into latents."""
|
||||
|
||||
@@ -19,9 +19,9 @@ class IdealSizeOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"ideal_size",
|
||||
title="Ideal Size",
|
||||
title="Ideal Size - SD1.5, SDXL",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.4",
|
||||
version="1.0.5",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
|
||||
@@ -31,10 +31,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"i2l",
|
||||
title="Image to Latents",
|
||||
title="Image to Latents - SD1.5, SDXL",
|
||||
tags=["latents", "image", "vae", "i2l"],
|
||||
category="latents",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class ImageToLatentsInvocation(BaseInvocation):
|
||||
"""Encodes an image into latents."""
|
||||
|
||||
@@ -69,7 +69,13 @@ CLIP_VISION_MODEL_MAP: dict[Literal["ViT-L", "ViT-H", "ViT-G"], StarterModel] =
|
||||
}
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.5.0")
|
||||
@invocation(
|
||||
"ip_adapter",
|
||||
title="IP-Adapter - SD1.5, SDXL",
|
||||
tags=["ip_adapter", "control"],
|
||||
category="ip_adapter",
|
||||
version="1.5.1",
|
||||
)
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
|
||||
@@ -31,10 +31,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"l2i",
|
||||
title="Latents to Image",
|
||||
title="Latents to Image - SD1.5, SDXL",
|
||||
tags=["latents", "image", "vae", "l2i"],
|
||||
category="latents",
|
||||
version="1.3.1",
|
||||
version="1.3.2",
|
||||
)
|
||||
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
@@ -1,40 +1,83 @@
|
||||
import logging
|
||||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_custom_nodes(custom_nodes_path: Path):
|
||||
def load_custom_nodes(custom_nodes_path: Path, logger: logging.Logger):
|
||||
"""
|
||||
Loads all custom nodes from the custom_nodes_path directory.
|
||||
|
||||
This function copies a custom __init__.py file to the custom_nodes_path directory, effectively turning it into a
|
||||
python module.
|
||||
If custom_nodes_path does not exist, it creates it.
|
||||
|
||||
The custom __init__.py file itself imports all the custom node packs as python modules from the custom_nodes_path
|
||||
directory.
|
||||
It also copies the custom_nodes/README.md file to the custom_nodes_path directory. Because this file may change,
|
||||
it is _always_ copied to the custom_nodes_path directory.
|
||||
|
||||
Then,the custom __init__.py file is programmatically imported using importlib. As it executes, it imports all the
|
||||
custom node packs as python modules.
|
||||
Then, it crawls the custom_nodes_path directory and imports all top-level directories as python modules.
|
||||
|
||||
If the directory does not contain an __init__.py file or starts with an `_` or `.`, it is skipped.
|
||||
"""
|
||||
|
||||
# create the custom nodes directory if it does not exist
|
||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
|
||||
# Copy the README file to the custom nodes directory
|
||||
source_custom_nodes_readme_path = Path(__file__).parent / "custom_nodes/README.md"
|
||||
target_custom_nodes_readme_path = Path(custom_nodes_path) / "README.md"
|
||||
|
||||
# copy our custom nodes __init__.py to the custom nodes directory
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
|
||||
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
|
||||
# copy our custom nodes README to the custom nodes directory
|
||||
shutil.copy(source_custom_nodes_readme_path, target_custom_nodes_readme_path)
|
||||
|
||||
# set the same permissions as the destination directory, in case our source is read-only,
|
||||
# so that the files are user-writable
|
||||
for p in custom_nodes_path.glob("**/*"):
|
||||
p.chmod(custom_nodes_path.stat().st_mode)
|
||||
loaded_packs: list[str] = []
|
||||
failed_packs: list[str] = []
|
||||
|
||||
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
|
||||
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
for d in custom_nodes_path.iterdir():
|
||||
# skip files
|
||||
if not d.is_dir():
|
||||
continue
|
||||
|
||||
# skip hidden directories
|
||||
if d.name.startswith("_") or d.name.startswith("."):
|
||||
continue
|
||||
|
||||
# skip directories without an `__init__.py`
|
||||
init = d / "__init__.py"
|
||||
if not init.exists():
|
||||
continue
|
||||
|
||||
module_name = init.parent.stem
|
||||
|
||||
# skip if already imported
|
||||
if module_name in globals():
|
||||
continue
|
||||
|
||||
# load the module
|
||||
spec = spec_from_file_location(module_name, init.absolute())
|
||||
|
||||
if spec is None or spec.loader is None:
|
||||
logger.warning(f"Could not load {init}")
|
||||
continue
|
||||
|
||||
logger.info(f"Loading node pack {module_name}")
|
||||
|
||||
try:
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
loaded_packs.append(module_name)
|
||||
except Exception:
|
||||
failed_packs.append(module_name)
|
||||
full_error = traceback.format_exc()
|
||||
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
|
||||
|
||||
del init, module_name
|
||||
|
||||
loaded_count = len(loaded_packs)
|
||||
if loaded_count > 0:
|
||||
logger.info(
|
||||
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_path}: {', '.join(loaded_packs)}"
|
||||
)
|
||||
|
||||
@@ -610,10 +610,10 @@ class LatentsMetaOutput(LatentsOutput, MetadataOutput):
|
||||
|
||||
@invocation(
|
||||
"denoise_latents_meta",
|
||||
title="Denoise Latents + metadata",
|
||||
title=f"{DenoiseLatentsInvocation.UIConfig.title} + Metadata",
|
||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
|
||||
def invoke(self, context: InvocationContext) -> LatentsMetaOutput:
|
||||
@@ -675,10 +675,10 @@ class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
|
||||
|
||||
@invocation(
|
||||
"flux_denoise_meta",
|
||||
title="Flux Denoise + metadata",
|
||||
title=f"{FluxDenoiseInvocation.UIConfig.title} + Metadata",
|
||||
tags=["flux", "latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class FluxDenoiseLatentsMetaInvocation(FluxDenoiseInvocation, WithMetadata):
|
||||
"""Run denoising process with a FLUX transformer model + metadata."""
|
||||
|
||||
@@ -122,10 +122,10 @@ class ModelIdentifierOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"model_identifier",
|
||||
title="Model identifier",
|
||||
title="Any Model",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ModelIdentifierInvocation(BaseInvocation):
|
||||
@@ -144,10 +144,10 @@ class ModelIdentifierInvocation(BaseInvocation):
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
title="Main Model - SD1.5",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.3",
|
||||
version="1.0.4",
|
||||
)
|
||||
class MainModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a main model, outputting its submodels."""
|
||||
@@ -244,7 +244,7 @@ class LoRASelectorOutput(BaseInvocationOutput):
|
||||
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
|
||||
|
||||
|
||||
@invocation("lora_selector", title="LoRA Selector", tags=["model"], category="model", version="1.0.1")
|
||||
@invocation("lora_selector", title="LoRA Model - SD1.5", tags=["model"], category="model", version="1.0.2")
|
||||
class LoRASelectorInvocation(BaseInvocation):
|
||||
"""Selects a LoRA model and weight."""
|
||||
|
||||
@@ -257,7 +257,9 @@ class LoRASelectorInvocation(BaseInvocation):
|
||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||
|
||||
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.1.0")
|
||||
@invocation(
|
||||
"lora_collection_loader", title="LoRA Collection - SD1.5", tags=["model"], category="model", version="1.1.1"
|
||||
)
|
||||
class LoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
@@ -320,10 +322,10 @@ class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"sdxl_lora_loader",
|
||||
title="SDXL LoRA",
|
||||
title="LoRA Model - SDXL",
|
||||
tags=["lora", "model"],
|
||||
category="model",
|
||||
version="1.0.3",
|
||||
version="1.0.4",
|
||||
)
|
||||
class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
@@ -400,10 +402,10 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
|
||||
@invocation(
|
||||
"sdxl_lora_collection_loader",
|
||||
title="SDXL LoRA Collection Loader",
|
||||
title="LoRA Collection - SDXL",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||
@@ -469,7 +471,9 @@ class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.3")
|
||||
@invocation(
|
||||
"vae_loader", title="VAE Model - SD1.5, SDXL, SD3, FLUX", tags=["vae", "model"], category="model", version="1.0.4"
|
||||
)
|
||||
class VAELoaderInvocation(BaseInvocation):
|
||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||
|
||||
@@ -496,10 +500,10 @@ class SeamlessModeOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"seamless",
|
||||
title="Seamless",
|
||||
title="Apply Seamless - SD1.5, SDXL",
|
||||
tags=["seamless", "model"],
|
||||
category="model",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class SeamlessModeInvocation(BaseInvocation):
|
||||
"""Applies the seamless transformation to the Model UNet and VAE."""
|
||||
@@ -539,7 +543,7 @@ class SeamlessModeInvocation(BaseInvocation):
|
||||
return SeamlessModeOutput(unet=unet, vae=vae)
|
||||
|
||||
|
||||
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.1")
|
||||
@invocation("freeu", title="Apply FreeU - SD1.5, SDXL", tags=["freeu"], category="unet", version="1.0.2")
|
||||
class FreeUInvocation(BaseInvocation):
|
||||
"""
|
||||
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):
|
||||
|
||||
@@ -72,10 +72,10 @@ class NoiseOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"noise",
|
||||
title="Noise",
|
||||
title="Create Latent Noise",
|
||||
tags=["latents", "noise"],
|
||||
category="latents",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class NoiseInvocation(BaseInvocation):
|
||||
"""Generates latent noise."""
|
||||
|
||||
@@ -32,10 +32,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"sd3_denoise",
|
||||
title="SD3 Denoise",
|
||||
title="Denoise - SD3",
|
||||
tags=["image", "sd3"],
|
||||
category="image",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@@ -21,10 +21,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"sd3_i2l",
|
||||
title="SD3 Image to Latents",
|
||||
title="Image to Latents - SD3",
|
||||
tags=["image", "latents", "vae", "i2l", "sd3"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@@ -24,10 +24,10 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
@invocation(
|
||||
"sd3_l2i",
|
||||
title="SD3 Latents to Image",
|
||||
title="Latents to Image - SD3",
|
||||
tags=["latents", "image", "vae", "l2i", "sd3"],
|
||||
category="latents",
|
||||
version="1.3.1",
|
||||
version="1.3.2",
|
||||
)
|
||||
class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
@@ -30,10 +30,10 @@ class Sd3ModelLoaderOutput(BaseInvocationOutput):
|
||||
|
||||
@invocation(
|
||||
"sd3_model_loader",
|
||||
title="SD3 Main Model",
|
||||
title="Main Model - SD3",
|
||||
tags=["model", "sd3"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
|
||||
@@ -29,10 +29,10 @@ SD3_T5_MAX_SEQ_LEN = 256
|
||||
|
||||
@invocation(
|
||||
"sd3_text_encoder",
|
||||
title="SD3 Text Encoding",
|
||||
title="Prompt - SD3",
|
||||
tags=["prompt", "conditioning", "sd3"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
|
||||
@@ -24,7 +24,7 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.3")
|
||||
@invocation("sdxl_model_loader", title="Main Model - SDXL", tags=["model", "sdxl"], category="model", version="1.0.4")
|
||||
class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl base model, outputting its submodels."""
|
||||
|
||||
@@ -58,10 +58,10 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
|
||||
@invocation(
|
||||
"sdxl_refiner_model_loader",
|
||||
title="SDXL Refiner Model",
|
||||
title="Refiner Model - SDXL",
|
||||
tags=["model", "sdxl", "refiner"],
|
||||
category="model",
|
||||
version="1.0.3",
|
||||
version="1.0.4",
|
||||
)
|
||||
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl refiner model, outputting its submodels."""
|
||||
|
||||
@@ -185,9 +185,9 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
# Find the largest mask.
|
||||
return [max(masks, key=lambda x: float(x.sum()))]
|
||||
elif self.mask_filter == "highest_box_score":
|
||||
assert (
|
||||
bounding_boxes is not None
|
||||
), "Bounding boxes must be provided to use the 'highest_box_score' mask filter."
|
||||
assert bounding_boxes is not None, (
|
||||
"Bounding boxes must be provided to use the 'highest_box_score' mask filter."
|
||||
)
|
||||
assert len(masks) == len(bounding_boxes)
|
||||
# Find the index of the bounding box with the highest score.
|
||||
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most
|
||||
|
||||
@@ -45,7 +45,11 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.3"
|
||||
"t2i_adapter",
|
||||
title="T2I-Adapter - SD1.5, SDXL",
|
||||
tags=["t2i_adapter", "control"],
|
||||
category="t2i_adapter",
|
||||
version="1.0.4",
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
|
||||
@@ -53,11 +53,11 @@ def crop_controlnet_data(control_data: ControlNetData, latent_region: TBLR) -> C
|
||||
|
||||
@invocation(
|
||||
"tiled_multi_diffusion_denoise_latents",
|
||||
title="Tiled Multi-Diffusion Denoise Latents",
|
||||
title="Tiled Multi-Diffusion Denoise - SD1.5, SDXL",
|
||||
tags=["upscale", "denoise"],
|
||||
category="latents",
|
||||
classification=Classification.Beta,
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
"""Tiled Multi-Diffusion denoising.
|
||||
|
||||
@@ -59,7 +59,7 @@ def run_app() -> None:
|
||||
# Load custom nodes. This must be done after importing the Graph class, which itself imports all modules from the
|
||||
# invocations module. The ordering here is implicit, but important - we want to load custom nodes after all the
|
||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path)
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
|
||||
|
||||
# Start the server.
|
||||
config = uvicorn.Config(
|
||||
|
||||
@@ -72,6 +72,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
outputs_dir: Path to directory for outputs.
|
||||
custom_nodes_dir: Path to directory for custom nodes.
|
||||
style_presets_dir: Path to directory for style presets.
|
||||
workflow_thumbnails_dir: Path to directory for workflow thumbnails.
|
||||
log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
||||
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
||||
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
@@ -142,6 +143,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs.")
|
||||
custom_nodes_dir: Path = Field(default=Path("nodes"), description="Path to directory for custom nodes.")
|
||||
style_presets_dir: Path = Field(default=Path("style_presets"), description="Path to directory for style presets.")
|
||||
workflow_thumbnails_dir: Path = Field(default=Path("workflow_thumbnails"), description="Path to directory for workflow thumbnails.")
|
||||
|
||||
# LOGGING
|
||||
log_handlers: list[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".')
|
||||
@@ -304,6 +306,11 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
"""Path to the style presets directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.style_presets_dir)
|
||||
|
||||
@property
|
||||
def workflow_thumbnails_path(self) -> Path:
|
||||
"""Path to the workflow thumbnails directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.workflow_thumbnails_dir)
|
||||
|
||||
@property
|
||||
def convert_cache_path(self) -> Path:
|
||||
"""Path to the converted cache models directory, resolved to an absolute path.."""
|
||||
@@ -476,9 +483,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
|
||||
try:
|
||||
# Meta is not included in the model fields, so we need to validate it separately
|
||||
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
|
||||
assert (
|
||||
config.schema_version == CONFIG_SCHEMA_VERSION
|
||||
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
||||
assert config.schema_version == CONFIG_SCHEMA_VERSION, (
|
||||
f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
||||
)
|
||||
return config
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
|
||||
|
||||
@@ -32,6 +32,7 @@ if TYPE_CHECKING:
|
||||
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
|
||||
from invokeai.app.services.urls.urls_base import UrlServiceBase
|
||||
from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_base import WorkflowThumbnailServiceBase
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||
|
||||
|
||||
@@ -65,6 +66,7 @@ class InvocationServices:
|
||||
conditioning: "ObjectSerializerBase[ConditioningFieldData]",
|
||||
style_preset_records: "StylePresetRecordsStorageBase",
|
||||
style_preset_image_files: "StylePresetImageFileStorageBase",
|
||||
workflow_thumbnails: "WorkflowThumbnailServiceBase",
|
||||
):
|
||||
self.board_images = board_images
|
||||
self.board_image_records = board_image_records
|
||||
@@ -91,3 +93,4 @@ class InvocationServices:
|
||||
self.conditioning = conditioning
|
||||
self.style_preset_records = style_preset_records
|
||||
self.style_preset_image_files = style_preset_image_files
|
||||
self.workflow_thumbnails = workflow_thumbnails
|
||||
|
||||
@@ -19,6 +19,8 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_13 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import build_migration_16
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import build_migration_17
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -55,6 +57,8 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_14())
|
||||
migrator.register_migration(build_migration_15())
|
||||
migrator.register_migration(build_migration_16())
|
||||
migrator.register_migration(build_migration_17())
|
||||
migrator.register_migration(build_migration_18())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration17Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._add_workflows_tags_col(cursor)
|
||||
|
||||
def _add_workflows_tags_col(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
- Adds `tags` column to the workflow_library table. It is a generated column that extracts the tags from the
|
||||
workflow JSON.
|
||||
"""
|
||||
|
||||
cursor.execute(
|
||||
"ALTER TABLE workflow_library ADD COLUMN tags TEXT GENERATED ALWAYS AS (json_extract(workflow, '$.tags')) VIRTUAL;"
|
||||
)
|
||||
|
||||
|
||||
def build_migration_17() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 16 to 17.
|
||||
|
||||
This migration does the following:
|
||||
- Adds `tags` column to the workflow_library table. It is a generated column that extracts the tags from the
|
||||
workflow JSON.
|
||||
"""
|
||||
migration_17 = Migration(
|
||||
from_version=16,
|
||||
to_version=17,
|
||||
callback=Migration17Callback(),
|
||||
)
|
||||
|
||||
return migration_17
|
||||
@@ -0,0 +1,47 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration18Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._make_workflow_opened_at_nullable(cursor)
|
||||
|
||||
def _make_workflow_opened_at_nullable(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
Make the `opened_at` column nullable in the `workflow_library` table. This is accomplished by:
|
||||
- Dropping the existing `idx_workflow_library_opened_at` index (must be done before dropping the column)
|
||||
- Dropping the existing `opened_at` column
|
||||
- Adding a new nullable column `opened_at` (no data migration needed, all values will be NULL)
|
||||
- Adding a new `idx_workflow_library_opened_at` index on the `opened_at` column
|
||||
"""
|
||||
# For index renaming in SQLite, we need to drop and recreate
|
||||
cursor.execute("DROP INDEX IF EXISTS idx_workflow_library_opened_at;")
|
||||
# Rename existing column to deprecated
|
||||
cursor.execute("ALTER TABLE workflow_library DROP COLUMN opened_at;")
|
||||
# Add new nullable column - all values will be NULL - no migration of data needed
|
||||
cursor.execute("ALTER TABLE workflow_library ADD COLUMN opened_at DATETIME;")
|
||||
# Create new index on the new column
|
||||
cursor.execute(
|
||||
"CREATE INDEX idx_workflow_library_opened_at ON workflow_library(opened_at);",
|
||||
)
|
||||
|
||||
|
||||
def build_migration_18() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 17 to 18.
|
||||
|
||||
This migration does the following:
|
||||
- Make the `opened_at` column nullable in the `workflow_library` table. This is accomplished by:
|
||||
- Dropping the existing `idx_workflow_library_opened_at` index (must be done before dropping the column)
|
||||
- Dropping the existing `opened_at` column
|
||||
- Adding a new nullable column `opened_at` (no data migration needed, all values will be NULL)
|
||||
- Adding a new `idx_workflow_library_opened_at` index on the `opened_at` column
|
||||
"""
|
||||
migration_18 = Migration(
|
||||
from_version=17,
|
||||
to_version=18,
|
||||
callback=Migration18Callback(),
|
||||
)
|
||||
|
||||
return migration_18
|
||||
@@ -18,3 +18,8 @@ class UrlServiceBase(ABC):
|
||||
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
||||
"""Gets the URL for a style preset image"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow_thumbnail_url(self, workflow_id: str) -> str:
|
||||
"""Gets the URL for a workflow thumbnail"""
|
||||
pass
|
||||
|
||||
@@ -22,3 +22,6 @@ class LocalUrlService(UrlServiceBase):
|
||||
|
||||
def get_style_preset_image_url(self, style_preset_id: str) -> str:
|
||||
return f"{self._base_url}/style_presets/i/{style_preset_id}/image"
|
||||
|
||||
def get_workflow_thumbnail_url(self, workflow_id: str) -> str:
|
||||
return f"{self._base_url}/workflows/i/{workflow_id}/thumbnail"
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "ESRGAN Upscaling with Canny ControlNet",
|
||||
"id": "default_686bb1d0-d086-4c70-9fa3-2f600b922023",
|
||||
"name": "Upscaler - SD1.5, ESRGAN",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample workflow for using Upscaling with ControlNet with SD1.5",
|
||||
"description": "Sample workflow for using ESRGAN to upscale with ControlNet with SD1.5",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "upscale, controlnet, default",
|
||||
"tags": "sd1.5, upscaling, control",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -184,14 +185,7 @@
|
||||
},
|
||||
"control_model": {
|
||||
"name": "control_model",
|
||||
"label": "Control Model (select Canny)",
|
||||
"value": {
|
||||
"key": "a7b9c76f-4bc5-42aa-b918-c1c458a5bb24",
|
||||
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
|
||||
"name": "sd-controlnet-canny",
|
||||
"base": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "Control Model (select Canny)"
|
||||
},
|
||||
"control_weight": {
|
||||
"name": "control_weight",
|
||||
@@ -294,14 +288,7 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "5cd43ca0-dd0a-418d-9f7e-35b2b9d5e106",
|
||||
"hash": "blake3:6987f323017f597213cc3264250edf57056d21a40a0a85d83a1a33a7d44dc41a",
|
||||
"name": "Deliberate_v5",
|
||||
"base": "sd-1",
|
||||
"type": "main"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
@@ -848,4 +835,4 @@
|
||||
"targetHandle": "image_resolution"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "FLUX Image to Image",
|
||||
"id": "default_cbf0e034-7b54-4b2c-b670-3b1e2e4b4a88",
|
||||
"name": "Image to Image - FLUX",
|
||||
"author": "InvokeAI",
|
||||
"description": "A simple image-to-image workflow using a FLUX dev model. ",
|
||||
"version": "1.1.0",
|
||||
"contact": "",
|
||||
"tags": "image2image, flux, image-to-image",
|
||||
"tags": "flux, image to image",
|
||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend using FLUX dev models for image-to-image workflows. The image-to-image performance with FLUX schnell models is poor.",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -200,36 +201,15 @@
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
|
||||
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
|
||||
"name": "t5_bnb_int8_quantized_encoder",
|
||||
"base": "any",
|
||||
"type": "t5_encoder"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"clip_embed_model": {
|
||||
"name": "clip_embed_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
|
||||
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
|
||||
"name": "clip-vit-large-patch14",
|
||||
"base": "any",
|
||||
"type": "clip_embed"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
|
||||
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
|
||||
"name": "FLUX.1-schnell_ae",
|
||||
"base": "flux",
|
||||
"type": "vae"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "Face Detailer with IP-Adapter & Canny (See Note in Details)",
|
||||
"id": "default_dec5a2e9-f59c-40d9-8869-a056751d79b8",
|
||||
"name": "Face Detailer - SD1.5",
|
||||
"author": "kosmoskatten",
|
||||
"description": "A workflow to add detail to and improve faces. This workflow is most effective when used with a model that creates realistic outputs. ",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "face detailer, IP-Adapter, Canny",
|
||||
"tags": "sd1.5, reference image, control",
|
||||
"notes": "Set this image as the blur mask: https://i.imgur.com/Gxi61zP.png",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -135,14 +136,7 @@
|
||||
},
|
||||
"control_model": {
|
||||
"name": "control_model",
|
||||
"label": "Control Model (select canny)",
|
||||
"value": {
|
||||
"key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d",
|
||||
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
|
||||
"name": "sd-controlnet-canny",
|
||||
"base": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "Control Model (select canny)"
|
||||
},
|
||||
"control_weight": {
|
||||
"name": "control_weight",
|
||||
@@ -196,14 +190,7 @@
|
||||
},
|
||||
"ip_adapter_model": {
|
||||
"name": "ip_adapter_model",
|
||||
"label": "IP-Adapter Model (select IP Adapter Face)",
|
||||
"value": {
|
||||
"key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e",
|
||||
"hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5",
|
||||
"name": "ip_adapter_sd15",
|
||||
"base": "sd-1",
|
||||
"type": "ip_adapter"
|
||||
}
|
||||
"label": "IP-Adapter Model (select IP Adapter Face)"
|
||||
},
|
||||
"clip_vision_model": {
|
||||
"name": "clip_vision_model",
|
||||
@@ -1445,4 +1432,4 @@
|
||||
"targetHandle": "vae"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "FLUX Text to Image",
|
||||
"id": "default_444fe292-896b-44fd-bfc6-c0b5d220fffc",
|
||||
"name": "Text to Image - FLUX",
|
||||
"author": "InvokeAI",
|
||||
"description": "A simple text-to-image workflow using FLUX dev or schnell models.",
|
||||
"version": "1.1.0",
|
||||
"contact": "",
|
||||
"tags": "text2image, flux",
|
||||
"tags": "flux, text to image",
|
||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -168,36 +169,15 @@
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
|
||||
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
|
||||
"name": "t5_bnb_int8_quantized_encoder",
|
||||
"base": "any",
|
||||
"type": "t5_encoder"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"clip_embed_model": {
|
||||
"name": "clip_embed_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
|
||||
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
|
||||
"name": "clip-vit-large-patch14",
|
||||
"base": "any",
|
||||
"type": "clip_embed"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
|
||||
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
|
||||
"name": "FLUX.1-schnell_ae",
|
||||
"base": "flux",
|
||||
"type": "vae"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "Multi ControlNet (Canny & Depth)",
|
||||
"id": "default_2d05e719-a6b9-4e64-9310-b875d3b2f9d2",
|
||||
"name": "Text to Image - SD1.5, Control",
|
||||
"author": "InvokeAI",
|
||||
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "ControlNet, canny, depth",
|
||||
"tags": "sd1.5, control, text to image",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -216,14 +217,7 @@
|
||||
},
|
||||
"control_model": {
|
||||
"name": "control_model",
|
||||
"label": "Control Model (select canny)",
|
||||
"value": {
|
||||
"key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d",
|
||||
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
|
||||
"name": "sd-controlnet-canny",
|
||||
"base": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "Control Model (select canny)"
|
||||
},
|
||||
"control_weight": {
|
||||
"name": "control_weight",
|
||||
@@ -370,14 +364,7 @@
|
||||
},
|
||||
"control_model": {
|
||||
"name": "control_model",
|
||||
"label": "Control Model (select depth)",
|
||||
"value": {
|
||||
"key": "87e8855c-671f-4c9e-bbbb-8ed47ccb4aac",
|
||||
"hash": "blake3:2550bf22a53942dfa28ab2fed9d10d80851112531f44d977168992edf9d0534c",
|
||||
"name": "control_v11f1p_sd15_depth",
|
||||
"base": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "Control Model (select depth)"
|
||||
},
|
||||
"control_weight": {
|
||||
"name": "control_weight",
|
||||
@@ -1014,4 +1001,4 @@
|
||||
"targetHandle": "image_resolution"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "MultiDiffusion SD1.5",
|
||||
"id": "default_f96e794f-eb3e-4d01-a960-9b4e43402bcf",
|
||||
"name": "Upscaler - SD1.5, MultiDiffusion",
|
||||
"author": "Invoke",
|
||||
"description": "A workflow to upscale an input image with tiled upscaling, using SD1.5 based models.",
|
||||
"version": "1.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "tiled, upscaling, sdxl",
|
||||
"tags": "sd1.5, upscaling",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -52,7 +53,6 @@
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"id": "e5b5fb01-8906-463a-963a-402dbc42f79b",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "33fe76a0-5efd-4482-a7f0-e2abf1223dc2",
|
||||
@@ -135,14 +135,7 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "e7b402e5-62e5-4acb-8c39-bee6bdb758ab",
|
||||
"hash": "c8659e796168d076368256b57edbc1b48d6dafc1712f1bb37cc57c7c06889a6b",
|
||||
"name": "526mix",
|
||||
"base": "sd-1",
|
||||
"type": "main"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -384,21 +377,11 @@
|
||||
},
|
||||
"image": {
|
||||
"name": "image",
|
||||
"label": "Image to Upscale",
|
||||
"value": {
|
||||
"image_name": "ee7009f7-a35d-488b-a2a6-21237ef5ae05.png"
|
||||
}
|
||||
"label": "Image to Upscale"
|
||||
},
|
||||
"image_to_image_model": {
|
||||
"name": "image_to_image_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "38bb1a29-8ede-42ba-b77f-64b3478896eb",
|
||||
"hash": "blake3:e52fdbee46a484ebe9b3b20ea0aac0a35a453ab6d0d353da00acfd35ce7a91ed",
|
||||
"name": "4xNomosWebPhoto_esrgan",
|
||||
"base": "sdxl",
|
||||
"type": "spandrel_image_to_image"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"tile_size": {
|
||||
"name": "tile_size",
|
||||
@@ -437,14 +420,7 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "ControlNet Model - Choose a Tile ControlNet",
|
||||
"value": {
|
||||
"key": "20645e4d-ef97-4c5a-9243-b834a3483925",
|
||||
"hash": "f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e",
|
||||
"name": "tile",
|
||||
"base": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "ControlNet Model - Choose a Tile ControlNet"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1427,4 +1403,4 @@
|
||||
"targetHandle": "noise"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "MultiDiffusion SDXL",
|
||||
"id": "default_35658541-6d41-4a20-8ec5-4bf2561faed0",
|
||||
"name": "Upscaler - SDXL, MultiDiffusion",
|
||||
"author": "Invoke",
|
||||
"description": "A workflow to upscale an input image with tiled upscaling, using SDXL based models.",
|
||||
"version": "1.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "tiled, upscaling, sdxl",
|
||||
"tags": "sdxl, upscaling",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -56,7 +57,6 @@
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"id": "dd607062-9e1b-48b9-89ad-9762cdfbb8f4",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "71a116e1-c631-48b3-923d-acea4753b887",
|
||||
@@ -341,14 +341,7 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "ControlNet Model - Choose a Tile ControlNet",
|
||||
"value": {
|
||||
"key": "74f4651f-0ace-4b7b-b616-e98360257797",
|
||||
"hash": "blake3:167a5b84583aaed3e5c8d660b45830e82e1c602743c689d3c27773c6c8b85b4a",
|
||||
"name": "controlnet-tile-sdxl-1.0",
|
||||
"base": "sdxl",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "ControlNet Model - Choose a Tile ControlNet"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -801,14 +794,7 @@
|
||||
"inputs": {
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "ff926845-090e-4d46-b81e-30289ee47474",
|
||||
"hash": "9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
|
||||
"name": "VAEFix",
|
||||
"base": "sdxl",
|
||||
"type": "vae"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -832,14 +818,7 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "SDXL Model",
|
||||
"value": {
|
||||
"key": "ab191f73-68d2-492c-8aec-b438a8cf0f45",
|
||||
"hash": "blake3:2d50e940627e3bf555f015280ec0976d5c1fa100f7bc94e95ffbfc770e98b6fe",
|
||||
"name": "CustomXLv7",
|
||||
"base": "sdxl",
|
||||
"type": "main"
|
||||
}
|
||||
"label": "SDXL Model"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1642,4 +1621,4 @@
|
||||
"targetHandle": "noise"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "Prompt from File",
|
||||
"id": "default_d7a1c60f-ca2f-4f90-9e33-75a826ca6d8f",
|
||||
"name": "Text to Image - SD1.5, Prompt from File",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample workflow using Prompt from File node",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, prompt from file, default",
|
||||
"tags": "sd1.5, text to image",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -512,4 +513,4 @@
|
||||
"targetHandle": "vae"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@
|
||||
Workflows placed in this directory will be synced to the `workflow_library` as
|
||||
_default workflows_ on app startup.
|
||||
|
||||
- Default workflows must have an id that starts with "default\_". The ID must be retained when the workflow is updated. You may need to do this manually.
|
||||
- Default workflows are not editable by users. If they are loaded and saved,
|
||||
they will save as a copy of the default workflow.
|
||||
- Default workflows must have the `meta.category` property set to `"default"`.
|
||||
An exception will be raised during sync if this is not set correctly.
|
||||
- Default workflows appear on the "Default Workflows" tab of the Workflow
|
||||
Library.
|
||||
- Default workflows should not reference any resources that are user-created or installed. That includes images and models. For example, if a default workflow references Juggernaut as an SDXL model, when a user loads the workflow, even if they have a version of Juggernaut installed, it will have a different UUID. They may see a warning. So, it's best to ship default workflows without any references to these types of resources.
|
||||
|
||||
After adding or updating default workflows, you **must** start the app up and
|
||||
load them to ensure:
|
||||
|
||||
@@ -1,382 +1,375 @@
|
||||
{
|
||||
"name": "SD3.5 Text to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 3.5",
|
||||
"version": "1.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD3.5, default",
|
||||
"id": "default_dbe46d95-22aa-43fb-9c16-94400d0ce2fd",
|
||||
"name": "Text to Image - SD3.5",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 3.5",
|
||||
"version": "1.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "SD3.5, text to image",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"fieldName": "prompt"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"fieldName": "model"
|
||||
},
|
||||
"id": "e3a51d6b-8208-4d6d-b187-fcfe8b32934c",
|
||||
"nodes": [
|
||||
{
|
||||
{
|
||||
"nodeId": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"fieldName": "prompt"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "sd3_model_loader",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "f7b20be9-92a8-4cfb-bca4-6c3b5535c10b",
|
||||
"hash": "placeholder",
|
||||
"name": "stable-diffusion-3.5-medium",
|
||||
"base": "sd-3",
|
||||
"type": "main"
|
||||
}
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_l_model": {
|
||||
"name": "clip_l_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g_model": {
|
||||
"name": "clip_g_model",
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": ""
|
||||
}
|
||||
"type": "sd3_model_loader",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_l_model": {
|
||||
"name": "clip_l_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g_model": {
|
||||
"name": "clip_g_model",
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": -55.58689609637031,
|
||||
"y": -111.53602444662268
|
||||
}
|
||||
},
|
||||
{
|
||||
"position": {
|
||||
"x": -55.58689609637031,
|
||||
"y": -111.53602444662268
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 470.45870147220353,
|
||||
"y": 350.3141781644303
|
||||
}
|
||||
},
|
||||
{
|
||||
"position": {
|
||||
"x": 470.45870147220353,
|
||||
"y": 350.3141781644303
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "sd3_l2i",
|
||||
"version": "1.3.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
"type": "sd3_l2i",
|
||||
"version": "1.3.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1192.3097009334897,
|
||||
"y": -366.0994675072209
|
||||
}
|
||||
},
|
||||
{
|
||||
"position": {
|
||||
"x": 1192.3097009334897,
|
||||
"y": -366.0994675072209
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 408.16054647924784,
|
||||
"y": 65.06415352118786
|
||||
}
|
||||
},
|
||||
{
|
||||
"position": {
|
||||
"x": 408.16054647924784,
|
||||
"y": 65.06415352118786
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 378.9283412440941,
|
||||
"y": -302.65777497352553
|
||||
}
|
||||
},
|
||||
{
|
||||
"position": {
|
||||
"x": 378.9283412440941,
|
||||
"y": -302.65777497352553
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "sd3_denoise",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"positive_conditioning": {
|
||||
"name": "positive_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"name": "negative_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"cfg_scale": {
|
||||
"name": "cfg_scale",
|
||||
"label": "",
|
||||
"value": 3.5
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"label": "",
|
||||
"value": 30
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
"type": "sd3_denoise",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"positive_conditioning": {
|
||||
"name": "positive_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"name": "negative_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"cfg_scale": {
|
||||
"name": "cfg_scale",
|
||||
"label": "",
|
||||
"value": 3.5
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"label": "",
|
||||
"value": 30
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 813.7814762740603,
|
||||
"y": -142.20529727605867
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 813.7814762740603,
|
||||
"y": -142.20529727605867
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cvae-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48bvae",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-3b4f7f27-cfc0-4373-a009-99c5290d0cd6t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-e17d34e7-6ed1-493c-9a85-4fcd291cb084t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ctransformer-c7539f7b-7ac5-49b9-93eb-87ede611409ftransformer",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f7e394ac-6394-4096-abcb-de0d346506b3value-c7539f7b-7ac5-49b9-93eb-87ede611409fseed",
|
||||
"type": "default",
|
||||
"source": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c7539f7b-7ac5-49b9-93eb-87ede611409flatents-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48blatents",
|
||||
"type": "default",
|
||||
"source": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-e17d34e7-6ed1-493c-9a85-4fcd291cb084conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3b4f7f27-cfc0-4373-a009-99c5290d0cd6conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cvae-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48bvae",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-3b4f7f27-cfc0-4373-a009-99c5290d0cd6t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-e17d34e7-6ed1-493c-9a85-4fcd291cb084t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ctransformer-c7539f7b-7ac5-49b9-93eb-87ede611409ftransformer",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f7e394ac-6394-4096-abcb-de0d346506b3value-c7539f7b-7ac5-49b9-93eb-87ede611409fseed",
|
||||
"type": "default",
|
||||
"source": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c7539f7b-7ac5-49b9-93eb-87ede611409flatents-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48blatents",
|
||||
"type": "default",
|
||||
"source": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-e17d34e7-6ed1-493c-9a85-4fcd291cb084conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3b4f7f27-cfc0-4373-a009-99c5290d0cd6conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"id": "default_7dde3e36-d78f-4152-9eea-00ef9c8124ed",
|
||||
"name": "Text to Image - SD1.5",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD1.5, SD2, default",
|
||||
"tags": "SD1.5, text to image",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -416,4 +417,4 @@
|
||||
"targetHandle": "vae"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"id": "default_5e8b008d-c697-45d0-8883-085a954c6ace",
|
||||
"name": "Text to Image - SDXL",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for SDXL",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SDXL, default",
|
||||
"tags": "SDXL, text to image",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -45,14 +46,7 @@
|
||||
"inputs": {
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": "VAE (use the FP16 model)",
|
||||
"value": {
|
||||
"key": "f20f9e5c-1bce-4c46-a84d-34ebfa7df069",
|
||||
"hash": "blake3:9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
|
||||
"name": "sdxl-vae-fp16-fix",
|
||||
"base": "sdxl",
|
||||
"type": "vae"
|
||||
}
|
||||
"label": "VAE (use the FP16 model)"
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
@@ -202,14 +196,7 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "4a63b226-e8ff-4da4-854e-0b9f04b562ba",
|
||||
"hash": "blake3:d279309ea6e5ee6e8fd52504275865cc280dac71cbf528c5b07c98b888bddaba",
|
||||
"name": "dreamshaper-xl-v2-turbo",
|
||||
"base": "sdxl",
|
||||
"type": "main"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
@@ -714,4 +701,4 @@
|
||||
"targetHandle": "style"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "Text to Image with LoRA",
|
||||
"id": "default_e71d153c-2089-43c7-bd2c-f61f37d4c1c1",
|
||||
"name": "Text to Image - SD1.5, LoRA",
|
||||
"author": "InvokeAI",
|
||||
"description": "Simple text to image workflow with a LoRA",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text to image, lora, default",
|
||||
"tags": "sd1.5, text to image, lora",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
{
|
||||
"name": "Tiled Upscaling (Beta)",
|
||||
"id": "default_43b0d7f7-6a12-4dcf-a5a4-50c940cbee29",
|
||||
"name": "Upscaler - SD1.5, Tiled",
|
||||
"author": "Invoke",
|
||||
"description": "A workflow to upscale an input image with tiled upscaling. ",
|
||||
"version": "2.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "tiled, upscaling, sd1.5",
|
||||
"tags": "sd1.5, upscaling",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
@@ -85,14 +86,7 @@
|
||||
},
|
||||
"ip_adapter_model": {
|
||||
"name": "ip_adapter_model",
|
||||
"label": "IP-Adapter Model (select ip_adapter_sd15)",
|
||||
"value": {
|
||||
"key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e",
|
||||
"hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5",
|
||||
"name": "ip_adapter_sd15",
|
||||
"base": "sd-1",
|
||||
"type": "ip_adapter"
|
||||
}
|
||||
"label": "IP-Adapter Model (select ip_adapter_sd15)"
|
||||
},
|
||||
"clip_vision_model": {
|
||||
"name": "clip_vision_model",
|
||||
@@ -200,14 +194,7 @@
|
||||
},
|
||||
"control_model": {
|
||||
"name": "control_model",
|
||||
"label": "Control Model (select contro_v11f1e_sd15_tile)",
|
||||
"value": {
|
||||
"key": "773843c8-db1f-4502-8f65-59782efa7960",
|
||||
"hash": "blake3:f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e",
|
||||
"name": "control_v11f1e_sd15_tile",
|
||||
"base": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
"label": "Control Model (select control_v11f1e_sd15_tile)"
|
||||
},
|
||||
"control_weight": {
|
||||
"name": "control_weight",
|
||||
@@ -1815,4 +1802,4 @@
|
||||
"targetHandle": "unet"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,10 +41,36 @@ class WorkflowRecordsStorageBase(ABC):
|
||||
self,
|
||||
order_by: WorkflowRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
category: WorkflowCategory,
|
||||
categories: Optional[list[WorkflowCategory]],
|
||||
page: int,
|
||||
per_page: Optional[int],
|
||||
query: Optional[str],
|
||||
tags: Optional[list[str]],
|
||||
has_been_opened: Optional[bool],
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
"""Gets many workflows."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def counts_by_category(
|
||||
self,
|
||||
categories: list[WorkflowCategory],
|
||||
has_been_opened: Optional[bool] = None,
|
||||
) -> dict[str, int]:
|
||||
"""Gets a dictionary of counts for each of the provided categories."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def counts_by_tag(
|
||||
self,
|
||||
tags: list[str],
|
||||
categories: Optional[list[WorkflowCategory]] = None,
|
||||
has_been_opened: Optional[bool] = None,
|
||||
) -> dict[str, int]:
|
||||
"""Gets a dictionary of counts for each of the provided tags."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_opened_at(self, workflow_id: str) -> None:
|
||||
"""Open a workflow."""
|
||||
pass
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import semver
|
||||
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, field_validator
|
||||
@@ -36,9 +36,7 @@ class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
||||
|
||||
class WorkflowMeta(BaseModel):
|
||||
version: str = Field(description="The version of the workflow schema.")
|
||||
category: WorkflowCategory = Field(
|
||||
default=WorkflowCategory.User, description="The category of the workflow (user or default)."
|
||||
)
|
||||
category: WorkflowCategory = Field(description="The category of the workflow (user or default).")
|
||||
|
||||
@field_validator("version")
|
||||
def validate_version(cls, version: str):
|
||||
@@ -100,7 +98,9 @@ class WorkflowRecordDTOBase(BaseModel):
|
||||
name: str = Field(description="The name of the workflow.")
|
||||
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the workflow.")
|
||||
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the workflow.")
|
||||
opened_at: Union[datetime.datetime, str] = Field(description="The opened timestamp of the workflow.")
|
||||
opened_at: Optional[Union[datetime.datetime, str]] = Field(
|
||||
default=None, description="The opened timestamp of the workflow."
|
||||
)
|
||||
|
||||
|
||||
class WorkflowRecordDTO(WorkflowRecordDTOBase):
|
||||
@@ -118,6 +118,15 @@ WorkflowRecordDTOValidator = TypeAdapter(WorkflowRecordDTO)
|
||||
class WorkflowRecordListItemDTO(WorkflowRecordDTOBase):
|
||||
description: str = Field(description="The description of the workflow.")
|
||||
category: WorkflowCategory = Field(description="The description of the workflow.")
|
||||
tags: str = Field(description="The tags of the workflow.")
|
||||
|
||||
|
||||
WorkflowRecordListItemDTOValidator = TypeAdapter(WorkflowRecordListItemDTO)
|
||||
|
||||
|
||||
class WorkflowRecordWithThumbnailDTO(WorkflowRecordDTO):
|
||||
thumbnail_url: str | None = Field(default=None, description="The URL of the workflow thumbnail.")
|
||||
|
||||
|
||||
class WorkflowRecordListItemWithThumbnailDTO(WorkflowRecordListItemDTO):
|
||||
thumbnail_url: str | None = Field(default=None, description="The URL of the workflow thumbnail.")
|
||||
|
||||
@@ -14,11 +14,13 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowRecordListItemDTO,
|
||||
WorkflowRecordListItemDTOValidator,
|
||||
WorkflowRecordOrderBy,
|
||||
WorkflowValidator,
|
||||
WorkflowWithoutID,
|
||||
WorkflowWithoutIDValidator,
|
||||
)
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
SQL_TIME_FORMAT = "%Y-%m-%d %H:%M:%f"
|
||||
|
||||
|
||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
@@ -32,15 +34,6 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||
"""Gets a workflow by ID. Updates the opened_at column."""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE workflow_library
|
||||
SET opened_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||
@@ -55,9 +48,10 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
return WorkflowRecordDTO.from_dict(dict(row))
|
||||
|
||||
def create(self, workflow: WorkflowWithoutID) -> WorkflowRecordDTO:
|
||||
if workflow.meta.category is WorkflowCategory.Default:
|
||||
raise ValueError("Default workflows cannot be created via this method")
|
||||
|
||||
try:
|
||||
# Only user workflows may be created by this method
|
||||
assert workflow.meta.category is WorkflowCategory.User
|
||||
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
@@ -77,6 +71,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
return self.get(workflow_with_id.id)
|
||||
|
||||
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||
if workflow.meta.category is WorkflowCategory.Default:
|
||||
raise ValueError("Default workflows cannot be updated")
|
||||
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
@@ -94,6 +91,9 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
return self.get(workflow.id)
|
||||
|
||||
def delete(self, workflow_id: str) -> None:
|
||||
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
|
||||
raise ValueError("Default workflows cannot be deleted")
|
||||
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
@@ -113,45 +113,108 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
self,
|
||||
order_by: WorkflowRecordOrderBy,
|
||||
direction: SQLiteDirection,
|
||||
category: WorkflowCategory,
|
||||
categories: Optional[list[WorkflowCategory]],
|
||||
page: int = 0,
|
||||
per_page: Optional[int] = None,
|
||||
query: Optional[str] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
has_been_opened: Optional[bool] = None,
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
# sanitize!
|
||||
assert order_by in WorkflowRecordOrderBy
|
||||
assert direction in SQLiteDirection
|
||||
assert category in WorkflowCategory
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library WHERE category = ?"
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
category,
|
||||
name,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
opened_at
|
||||
FROM workflow_library
|
||||
WHERE category = ?
|
||||
"""
|
||||
main_params: list[int | str] = [category.value]
|
||||
count_params: list[int | str] = [category.value]
|
||||
|
||||
# We will construct the query dynamically based on the query params
|
||||
|
||||
# The main query to get the workflows / counts
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
category,
|
||||
name,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
opened_at,
|
||||
tags
|
||||
FROM workflow_library
|
||||
"""
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library"
|
||||
|
||||
# Start with an empty list of conditions and params
|
||||
conditions: list[str] = []
|
||||
params: list[str | int] = []
|
||||
|
||||
if categories:
|
||||
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
|
||||
|
||||
# Ensure all categories are valid (is this necessary?)
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
|
||||
# Construct a placeholder string for the number of categories
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
|
||||
# Construct the condition string & params
|
||||
category_condition = f"category IN ({placeholders})"
|
||||
category_params = [category.value for category in categories]
|
||||
|
||||
conditions.append(category_condition)
|
||||
params.extend(category_params)
|
||||
|
||||
if tags:
|
||||
# Tags is a list of strings, and a single string in the DB
|
||||
# The string in the DB has no guaranteed format
|
||||
|
||||
# Construct a list of conditions for each tag
|
||||
tags_conditions = ["tags LIKE ?" for _ in tags]
|
||||
tags_conditions_joined = " OR ".join(tags_conditions)
|
||||
tags_condition = f"({tags_conditions_joined})"
|
||||
|
||||
# And the params for the tags, case-insensitive
|
||||
tags_params = [f"%{t.strip()}%" for t in tags]
|
||||
|
||||
conditions.append(tags_condition)
|
||||
params.extend(tags_params)
|
||||
|
||||
if has_been_opened:
|
||||
conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
conditions.append("opened_at IS NULL")
|
||||
|
||||
# Ignore whitespace in the query
|
||||
stripped_query = query.strip() if query else None
|
||||
if stripped_query:
|
||||
# Construct a wildcard query for the name, description, and tags
|
||||
wildcard_query = "%" + stripped_query + "%"
|
||||
main_query += " AND name LIKE ? OR description LIKE ? "
|
||||
count_query += " AND name LIKE ? OR description LIKE ?;"
|
||||
main_params.extend([wildcard_query, wildcard_query])
|
||||
count_params.extend([wildcard_query, wildcard_query])
|
||||
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
|
||||
|
||||
conditions.append(query_condition)
|
||||
params.extend([wildcard_query, wildcard_query, wildcard_query])
|
||||
|
||||
if conditions:
|
||||
# If there are conditions, add a WHERE clause and then join the conditions
|
||||
main_query += " WHERE "
|
||||
count_query += " WHERE "
|
||||
|
||||
all_conditions = " AND ".join(conditions)
|
||||
main_query += all_conditions
|
||||
count_query += all_conditions
|
||||
|
||||
# After this point, the query and params differ for the main query and the count query
|
||||
main_params = params.copy()
|
||||
count_params = params.copy()
|
||||
|
||||
# Main query also gets ORDER BY and LIMIT/OFFSET
|
||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||
|
||||
if per_page:
|
||||
main_query += " LIMIT ? OFFSET ?"
|
||||
main_params.extend([per_page, page * per_page])
|
||||
|
||||
# Put a ring on it
|
||||
main_query += ";"
|
||||
count_query += ";"
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(main_query, main_params)
|
||||
rows = cursor.fetchall()
|
||||
@@ -173,6 +236,122 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
total=total,
|
||||
)
|
||||
|
||||
def counts_by_tag(
|
||||
self,
|
||||
tags: list[str],
|
||||
categories: Optional[list[WorkflowCategory]] = None,
|
||||
has_been_opened: Optional[bool] = None,
|
||||
) -> dict[str, int]:
|
||||
if not tags:
|
||||
return {}
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
result: dict[str, int] = {}
|
||||
# Base conditions for categories and selected tags
|
||||
base_conditions: list[str] = []
|
||||
base_params: list[str | int] = []
|
||||
|
||||
# Add category conditions
|
||||
if categories:
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
base_conditions.append(f"category IN ({placeholders})")
|
||||
base_params.extend([category.value for category in categories])
|
||||
|
||||
if has_been_opened:
|
||||
base_conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
base_conditions.append("opened_at IS NULL")
|
||||
|
||||
# For each tag to count, run a separate query
|
||||
for tag in tags:
|
||||
# Start with the base conditions
|
||||
conditions = base_conditions.copy()
|
||||
params = base_params.copy()
|
||||
|
||||
# Add this specific tag condition
|
||||
conditions.append("tags LIKE ?")
|
||||
params.append(f"%{tag.strip()}%")
|
||||
|
||||
# Construct the full query
|
||||
stmt = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM workflow_library
|
||||
"""
|
||||
|
||||
if conditions:
|
||||
stmt += " WHERE " + " AND ".join(conditions)
|
||||
|
||||
cursor.execute(stmt, params)
|
||||
count = cursor.fetchone()[0]
|
||||
result[tag] = count
|
||||
|
||||
return result
|
||||
|
||||
def counts_by_category(
|
||||
self,
|
||||
categories: list[WorkflowCategory],
|
||||
has_been_opened: Optional[bool] = None,
|
||||
) -> dict[str, int]:
|
||||
cursor = self._conn.cursor()
|
||||
result: dict[str, int] = {}
|
||||
# Base conditions for categories
|
||||
base_conditions: list[str] = []
|
||||
base_params: list[str | int] = []
|
||||
|
||||
# Add category conditions
|
||||
if categories:
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
base_conditions.append(f"category IN ({placeholders})")
|
||||
base_params.extend([category.value for category in categories])
|
||||
|
||||
if has_been_opened:
|
||||
base_conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
base_conditions.append("opened_at IS NULL")
|
||||
|
||||
# For each category to count, run a separate query
|
||||
for category in categories:
|
||||
# Start with the base conditions
|
||||
conditions = base_conditions.copy()
|
||||
params = base_params.copy()
|
||||
|
||||
# Add this specific category condition
|
||||
conditions.append("category = ?")
|
||||
params.append(category.value)
|
||||
|
||||
# Construct the full query
|
||||
stmt = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM workflow_library
|
||||
"""
|
||||
|
||||
if conditions:
|
||||
stmt += " WHERE " + " AND ".join(conditions)
|
||||
|
||||
cursor.execute(stmt, params)
|
||||
count = cursor.fetchone()[0]
|
||||
result[category.value] = count
|
||||
|
||||
return result
|
||||
|
||||
def update_opened_at(self, workflow_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE workflow_library
|
||||
SET opened_at = STRFTIME('{SQL_TIME_FORMAT}', 'NOW')
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
def _sync_default_workflows(self) -> None:
|
||||
"""Syncs default workflows to the database. Internal use only."""
|
||||
|
||||
@@ -187,27 +366,68 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
"""
|
||||
|
||||
try:
|
||||
workflows: list[Workflow] = []
|
||||
cursor = self._conn.cursor()
|
||||
workflows_from_file: list[Workflow] = []
|
||||
workflows_to_update: list[Workflow] = []
|
||||
workflows_to_add: list[Workflow] = []
|
||||
workflows_dir = Path(__file__).parent / Path("default_workflows")
|
||||
workflow_paths = workflows_dir.glob("*.json")
|
||||
for path in workflow_paths:
|
||||
bytes_ = path.read_bytes()
|
||||
workflow_without_id = WorkflowWithoutIDValidator.validate_json(bytes_)
|
||||
workflow = Workflow(**workflow_without_id.model_dump(), id=uuid_string())
|
||||
workflows.append(workflow)
|
||||
# Only default workflows may be managed by this method
|
||||
assert all(w.meta.category is WorkflowCategory.Default for w in workflows)
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM workflow_library
|
||||
WHERE category = 'default';
|
||||
"""
|
||||
)
|
||||
for w in workflows:
|
||||
workflow_from_file = WorkflowValidator.validate_json(bytes_)
|
||||
|
||||
assert workflow_from_file.id.startswith("default_"), (
|
||||
f'Invalid default workflow ID (must start with "default_"): {workflow_from_file.id}'
|
||||
)
|
||||
|
||||
assert workflow_from_file.meta.category is WorkflowCategory.Default, (
|
||||
f"Invalid default workflow category: {workflow_from_file.meta.category}"
|
||||
)
|
||||
|
||||
workflows_from_file.append(workflow_from_file)
|
||||
|
||||
try:
|
||||
workflow_from_db = self.get(workflow_from_file.id).workflow
|
||||
if workflow_from_file != workflow_from_db:
|
||||
self._invoker.services.logger.debug(
|
||||
f"Updating library workflow {workflow_from_file.name} ({workflow_from_file.id})"
|
||||
)
|
||||
workflows_to_update.append(workflow_from_file)
|
||||
continue
|
||||
except WorkflowNotFoundError:
|
||||
self._invoker.services.logger.debug(
|
||||
f"Adding missing default workflow {workflow_from_file.name} ({workflow_from_file.id})"
|
||||
)
|
||||
workflows_to_add.append(workflow_from_file)
|
||||
continue
|
||||
|
||||
library_workflows_from_db = self.get_many(
|
||||
order_by=WorkflowRecordOrderBy.Name,
|
||||
direction=SQLiteDirection.Ascending,
|
||||
categories=[WorkflowCategory.Default],
|
||||
).items
|
||||
|
||||
workflows_from_file_ids = [w.id for w in workflows_from_file]
|
||||
|
||||
for w in library_workflows_from_db:
|
||||
if w.workflow_id not in workflows_from_file_ids:
|
||||
self._invoker.services.logger.debug(
|
||||
f"Deleting obsolete default workflow {w.name} ({w.workflow_id})"
|
||||
)
|
||||
# We cannot use the `delete` method here, as it only deletes non-default workflows
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE from workflow_library
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(w.workflow_id,),
|
||||
)
|
||||
|
||||
for w in workflows_to_add:
|
||||
# We cannot use the `create` method here, as it only creates non-default workflows
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR REPLACE INTO workflow_library (
|
||||
INSERT INTO workflow_library (
|
||||
workflow_id,
|
||||
workflow
|
||||
)
|
||||
@@ -215,6 +435,18 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
""",
|
||||
(w.id, w.model_dump_json()),
|
||||
)
|
||||
|
||||
for w in workflows_to_update:
|
||||
# We cannot use the `update` method here, as it only updates non-default workflows
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE workflow_library
|
||||
SET workflow = ?
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(w.model_dump_json(), w.id),
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class WorkflowThumbnailServiceBase(ABC):
|
||||
"""Base class for workflow thumbnail services"""
|
||||
|
||||
@abstractmethod
|
||||
def get_path(self, workflow_id: str, with_hash: bool = True) -> Path:
|
||||
"""Gets the path to a workflow thumbnail"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_url(self, workflow_id: str, with_hash: bool = True) -> str | None:
|
||||
"""Gets the URL of a workflow thumbnail"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(self, workflow_id: str, image: Image.Image) -> None:
|
||||
"""Saves a workflow thumbnail"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, workflow_id: str) -> None:
|
||||
"""Deletes a workflow thumbnail"""
|
||||
pass
|
||||
@@ -0,0 +1,22 @@
|
||||
class WorkflowThumbnailFileNotFoundException(Exception):
|
||||
"""Raised when a workflow thumbnail file is not found"""
|
||||
|
||||
def __init__(self, message: str = "Workflow thumbnail file not found"):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class WorkflowThumbnailFileSaveException(Exception):
|
||||
"""Raised when a workflow thumbnail file cannot be saved"""
|
||||
|
||||
def __init__(self, message: str = "Workflow thumbnail file cannot be saved"):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class WorkflowThumbnailFileDeleteException(Exception):
|
||||
"""Raised when a workflow thumbnail file cannot be deleted"""
|
||||
|
||||
def __init__(self, message: str = "Workflow thumbnail file cannot be deleted"):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
@@ -0,0 +1,87 @@
|
||||
from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowCategory
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_base import WorkflowThumbnailServiceBase
|
||||
from invokeai.app.services.workflow_thumbnails.workflow_thumbnails_common import (
|
||||
WorkflowThumbnailFileDeleteException,
|
||||
WorkflowThumbnailFileNotFoundException,
|
||||
WorkflowThumbnailFileSaveException,
|
||||
)
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.app.util.thumbnails import make_thumbnail
|
||||
|
||||
|
||||
class WorkflowThumbnailFileStorageDisk(WorkflowThumbnailServiceBase):
|
||||
def __init__(self, thumbnails_path: Path):
|
||||
self._workflow_thumbnail_folder = thumbnails_path
|
||||
self._validate_storage_folders()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
|
||||
def get(self, workflow_id: str) -> PILImageType:
|
||||
try:
|
||||
path = self.get_path(workflow_id)
|
||||
|
||||
return Image.open(path)
|
||||
except FileNotFoundError as e:
|
||||
raise WorkflowThumbnailFileNotFoundException from e
|
||||
|
||||
def save(self, workflow_id: str, image: PILImageType) -> None:
|
||||
try:
|
||||
self._validate_storage_folders()
|
||||
image_path = self._workflow_thumbnail_folder / (workflow_id + ".webp")
|
||||
thumbnail = make_thumbnail(image, 256)
|
||||
thumbnail.save(image_path, format="webp")
|
||||
|
||||
except Exception as e:
|
||||
raise WorkflowThumbnailFileSaveException from e
|
||||
|
||||
def get_path(self, workflow_id: str, with_hash: bool = True) -> Path:
|
||||
workflow = self._invoker.services.workflow_records.get(workflow_id).workflow
|
||||
if workflow.meta.category is WorkflowCategory.Default:
|
||||
default_thumbnails_dir = Path(__file__).parent / Path("default_workflow_thumbnails")
|
||||
path = default_thumbnails_dir / (workflow_id + ".png")
|
||||
else:
|
||||
path = self._workflow_thumbnail_folder / (workflow_id + ".webp")
|
||||
|
||||
return path
|
||||
|
||||
def get_url(self, workflow_id: str, with_hash: bool = True) -> str | None:
|
||||
path = self.get_path(workflow_id)
|
||||
if not self._validate_path(path):
|
||||
return
|
||||
|
||||
url = self._invoker.services.urls.get_workflow_thumbnail_url(workflow_id)
|
||||
|
||||
# The image URL never changes, so we must add random query string to it to prevent caching
|
||||
if with_hash:
|
||||
url += f"?{uuid_string()}"
|
||||
|
||||
return url
|
||||
|
||||
def delete(self, workflow_id: str) -> None:
|
||||
try:
|
||||
path = self.get_path(workflow_id)
|
||||
|
||||
if not self._validate_path(path):
|
||||
raise WorkflowThumbnailFileNotFoundException
|
||||
|
||||
path.unlink()
|
||||
|
||||
except WorkflowThumbnailFileNotFoundException as e:
|
||||
raise WorkflowThumbnailFileNotFoundException from e
|
||||
except Exception as e:
|
||||
raise WorkflowThumbnailFileDeleteException from e
|
||||
|
||||
def _validate_path(self, path: Path) -> bool:
|
||||
"""Validates the path given for an image."""
|
||||
return path.exists()
|
||||
|
||||
def _validate_storage_folders(self) -> None:
|
||||
"""Checks if the required folders exist and create them if they don't"""
|
||||
self._workflow_thumbnail_folder.mkdir(parents=True, exist_ok=True)
|
||||
@@ -3,7 +3,11 @@ from typing import Optional
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
from invokeai.backend.flux.text_conditioning import FluxRegionalTextConditioning, FluxTextConditioning
|
||||
from invokeai.backend.flux.text_conditioning import (
|
||||
FluxReduxConditioning,
|
||||
FluxRegionalTextConditioning,
|
||||
FluxTextConditioning,
|
||||
)
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.mask import to_standard_float_mask
|
||||
@@ -32,14 +36,19 @@ class RegionalPromptingExtension:
|
||||
return order[block_index % len(order)]
|
||||
|
||||
@classmethod
|
||||
def from_text_conditioning(cls, text_conditioning: list[FluxTextConditioning], img_seq_len: int):
|
||||
def from_text_conditioning(
|
||||
cls,
|
||||
text_conditioning: list[FluxTextConditioning],
|
||||
redux_conditioning: list[FluxReduxConditioning],
|
||||
img_seq_len: int,
|
||||
):
|
||||
"""Create a RegionalPromptingExtension from a list of text conditionings.
|
||||
|
||||
Args:
|
||||
text_conditioning (list[FluxTextConditioning]): The text conditionings to use for regional prompting.
|
||||
img_seq_len (int): The image sequence length (i.e. packed_height * packed_width).
|
||||
"""
|
||||
regional_text_conditioning = cls._concat_regional_text_conditioning(text_conditioning)
|
||||
regional_text_conditioning = cls._concat_regional_text_conditioning(text_conditioning, redux_conditioning)
|
||||
attn_mask_with_restricted_img_self_attn = cls._prepare_restricted_attn_mask(
|
||||
regional_text_conditioning, img_seq_len
|
||||
)
|
||||
@@ -202,6 +211,7 @@ class RegionalPromptingExtension:
|
||||
def _concat_regional_text_conditioning(
|
||||
cls,
|
||||
text_conditionings: list[FluxTextConditioning],
|
||||
redux_conditionings: list[FluxReduxConditioning],
|
||||
) -> FluxRegionalTextConditioning:
|
||||
"""Concatenate regional text conditioning data into a single conditioning tensor (with associated masks)."""
|
||||
concat_t5_embeddings: list[torch.Tensor] = []
|
||||
@@ -217,18 +227,27 @@ class RegionalPromptingExtension:
|
||||
global_clip_embedding = text_conditioning.clip_embeddings
|
||||
break
|
||||
|
||||
# Handle T5 text embeddings.
|
||||
cur_t5_embedding_len = 0
|
||||
for text_conditioning in text_conditionings:
|
||||
concat_t5_embeddings.append(text_conditioning.t5_embeddings)
|
||||
|
||||
concat_t5_embedding_ranges.append(
|
||||
Range(start=cur_t5_embedding_len, end=cur_t5_embedding_len + text_conditioning.t5_embeddings.shape[1])
|
||||
)
|
||||
|
||||
image_masks.append(text_conditioning.mask)
|
||||
|
||||
cur_t5_embedding_len += text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# Handle Redux embeddings.
|
||||
for redux_conditioning in redux_conditionings:
|
||||
concat_t5_embeddings.append(redux_conditioning.redux_embeddings)
|
||||
concat_t5_embedding_ranges.append(
|
||||
Range(
|
||||
start=cur_t5_embedding_len, end=cur_t5_embedding_len + redux_conditioning.redux_embeddings.shape[1]
|
||||
)
|
||||
)
|
||||
image_masks.append(redux_conditioning.mask)
|
||||
cur_t5_embedding_len += redux_conditioning.redux_embeddings.shape[1]
|
||||
|
||||
t5_embeddings = torch.cat(concat_t5_embeddings, dim=1)
|
||||
|
||||
# Initialize the txt_ids tensor.
|
||||
|
||||
17
invokeai/backend/flux/redux/flux_redux_model.py
Normal file
17
invokeai/backend/flux/redux/flux_redux_model.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import torch
|
||||
|
||||
# This model definition is based on:
|
||||
# https://github.com/black-forest-labs/flux/blob/716724eb276d94397be99710a0a54d352664e23b/src/flux/modules/image_embedders.py#L66
|
||||
|
||||
|
||||
class FluxReduxModel(torch.nn.Module):
|
||||
def __init__(self, redux_dim: int = 1152, txt_in_features: int = 4096) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.redux_dim = redux_dim
|
||||
|
||||
self.redux_up = torch.nn.Linear(redux_dim, txt_in_features * 3)
|
||||
self.redux_down = torch.nn.Linear(txt_in_features * 3, txt_in_features)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return self.redux_down(torch.nn.functional.silu(self.redux_up(x)))
|
||||
11
invokeai/backend/flux/redux/flux_redux_state_dict_utils.py
Normal file
11
invokeai/backend/flux/redux/flux_redux_state_dict_utils.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
def is_state_dict_likely_flux_redux(state_dict: Dict[str, Any]) -> bool:
|
||||
"""Checks if the provided state dict is likely a FLUX Redux model."""
|
||||
|
||||
expected_keys = {"redux_down.bias", "redux_down.weight", "redux_up.bias", "redux_up.weight"}
|
||||
if set(state_dict.keys()) == expected_keys:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -13,6 +13,13 @@ class FluxTextConditioning:
|
||||
mask: torch.Tensor | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxReduxConditioning:
|
||||
redux_embeddings: torch.Tensor
|
||||
# If mask is None, the prompt is a global prompt.
|
||||
mask: torch.Tensor | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxRegionalTextConditioning:
|
||||
# Concatenated text embeddings.
|
||||
|
||||
@@ -91,10 +91,10 @@ class PromptFormatter:
|
||||
|
||||
switches = []
|
||||
switches.append(f'"{opt.prompt}"')
|
||||
switches.append(f"-s{opt.steps or t2i.steps}")
|
||||
switches.append(f"-W{opt.width or t2i.width}")
|
||||
switches.append(f"-H{opt.height or t2i.height}")
|
||||
switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}")
|
||||
switches.append(f"-s{opt.steps or t2i.steps}")
|
||||
switches.append(f"-W{opt.width or t2i.width}")
|
||||
switches.append(f"-H{opt.height or t2i.height}")
|
||||
switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}")
|
||||
switches.append(f"-A{opt.sampler_name or t2i.sampler_name}")
|
||||
# to do: put model name into the t2i object
|
||||
# switches.append(f'--model{t2i.model_name}')
|
||||
@@ -109,7 +109,7 @@ class PromptFormatter:
|
||||
if opt.gfpgan_strength:
|
||||
switches.append(f"-G{opt.gfpgan_strength}")
|
||||
if opt.upscale:
|
||||
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
|
||||
switches.append(f"-U {' '.join([str(u) for u in opt.upscale])}")
|
||||
if opt.variation_amount > 0:
|
||||
switches.append(f"-v{opt.variation_amount}")
|
||||
if opt.with_variations:
|
||||
|
||||
@@ -76,6 +76,8 @@ class ModelType(str, Enum):
|
||||
T2IAdapter = "t2i_adapter"
|
||||
T5Encoder = "t5_encoder"
|
||||
SpandrelImageToImage = "spandrel_image_to_image"
|
||||
SigLIP = "siglip"
|
||||
FluxRedux = "flux_redux"
|
||||
|
||||
|
||||
class SubModelType(str, Enum):
|
||||
@@ -528,6 +530,28 @@ class SpandrelImageToImageConfig(ModelConfigBase):
|
||||
return Tag(f"{ModelType.SpandrelImageToImage.value}.{ModelFormat.Checkpoint.value}")
|
||||
|
||||
|
||||
class SigLIPConfig(DiffusersConfigBase):
|
||||
"""Model config for SigLIP."""
|
||||
|
||||
type: Literal[ModelType.SigLIP] = ModelType.SigLIP
|
||||
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.SigLIP.value}.{ModelFormat.Diffusers.value}")
|
||||
|
||||
|
||||
class FluxReduxConfig(ModelConfigBase):
|
||||
"""Model config for FLUX Tools Redux model."""
|
||||
|
||||
type: Literal[ModelType.FluxRedux] = ModelType.FluxRedux
|
||||
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.FluxRedux.value}.{ModelFormat.Checkpoint.value}")
|
||||
|
||||
|
||||
def get_model_discriminator_value(v: Any) -> str:
|
||||
"""
|
||||
Computes the discriminator value for a model config.
|
||||
@@ -575,6 +599,8 @@ AnyModelConfig = Annotated[
|
||||
Annotated[CLIPEmbedDiffusersConfig, CLIPEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPLEmbedDiffusersConfig, CLIPLEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPGEmbedDiffusersConfig, CLIPGEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[SigLIPConfig, SigLIPConfig.get_tag()],
|
||||
Annotated[FluxReduxConfig, FluxReduxConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
||||
@@ -70,7 +70,7 @@ def get_pretty_snapshot_diff(snapshot_1: Optional[MemorySnapshot], snapshot_2: O
|
||||
|
||||
def get_msg_line(prefix: str, val1: int, val2: int) -> str:
|
||||
diff = val2 - val1
|
||||
return f"{prefix: <30} ({(diff/GB):+5.3f}): {(val1/GB):5.3f}GB -> {(val2/GB):5.3f}GB\n"
|
||||
return f"{prefix: <30} ({(diff / GB):+5.3f}): {(val1 / GB):5.3f}GB -> {(val2 / GB):5.3f}GB\n"
|
||||
|
||||
msg = ""
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ class ModelCache:
|
||||
self._cached_models[key] = cache_record
|
||||
self._cache_stack.append(key)
|
||||
self._logger.debug(
|
||||
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)"
|
||||
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size / MB:.2f}MB)"
|
||||
)
|
||||
|
||||
@synchronized
|
||||
@@ -303,7 +303,7 @@ class ModelCache:
|
||||
# 2. If the model can't fit fully into VRAM, then unload all other models and load as much of the model as
|
||||
# possible.
|
||||
vram_bytes_freed = self._offload_unlocked_models(model_vram_needed, working_mem_bytes)
|
||||
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed/MB):.2f}MB")
|
||||
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed / MB):.2f}MB")
|
||||
|
||||
# Check the updated vram_available after offloading.
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
@@ -317,7 +317,7 @@ class ModelCache:
|
||||
vram_bytes_freed_from_own_model = self._move_model_to_ram(cache_entry, -vram_available)
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
self._logger.debug(
|
||||
f"Unloaded {vram_bytes_freed_from_own_model/MB:.2f}MB from the model being locked ({cache_entry.key})."
|
||||
f"Unloaded {vram_bytes_freed_from_own_model / MB:.2f}MB from the model being locked ({cache_entry.key})."
|
||||
)
|
||||
|
||||
# Move as much of the model as possible into VRAM.
|
||||
@@ -333,10 +333,12 @@ class ModelCache:
|
||||
self._logger.info(
|
||||
f"Loaded model '{cache_entry.key}' ({cache_entry.cached_model.model.__class__.__name__}) onto "
|
||||
f"{self._execution_device.type} device in {(time.time() - start_time):.2f}s. "
|
||||
f"Total model size: {model_total_bytes/MB:.2f}MB, "
|
||||
f"VRAM: {model_cur_vram_bytes/MB:.2f}MB ({loaded_percent:.1%})"
|
||||
f"Total model size: {model_total_bytes / MB:.2f}MB, "
|
||||
f"VRAM: {model_cur_vram_bytes / MB:.2f}MB ({loaded_percent:.1%})"
|
||||
)
|
||||
self._logger.debug(
|
||||
f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded / MB):.2f}MB, "
|
||||
)
|
||||
self._logger.debug(f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded/MB):.2f}MB, ")
|
||||
self._logger.debug(
|
||||
f"After loading: {self._get_vram_state_str(model_cur_vram_bytes, model_total_bytes, vram_available)}"
|
||||
)
|
||||
@@ -495,10 +497,10 @@ class ModelCache:
|
||||
"""Helper function for preparing a VRAM state log string."""
|
||||
model_cur_vram_bytes_percent = model_cur_vram_bytes / model_total_bytes if model_total_bytes > 0 else 0
|
||||
return (
|
||||
f"model_total={model_total_bytes/MB:.0f} MB, "
|
||||
+ f"model_vram={model_cur_vram_bytes/MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), "
|
||||
f"model_total={model_total_bytes / MB:.0f} MB, "
|
||||
+ f"model_vram={model_cur_vram_bytes / MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), "
|
||||
# + f"vram_total={int(self._max_vram_cache_size * GB)/MB:.0f} MB, "
|
||||
+ f"vram_available={(vram_available/MB):.0f} MB, "
|
||||
+ f"vram_available={(vram_available / MB):.0f} MB, "
|
||||
)
|
||||
|
||||
def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: Optional[int] = None) -> int:
|
||||
@@ -509,7 +511,7 @@ class ModelCache:
|
||||
int: The number of bytes freed based on believed model sizes. The actual change in VRAM may be different.
|
||||
"""
|
||||
self._logger.debug(
|
||||
f"Offloading unlocked models with goal of making room for {vram_bytes_required/MB:.2f}MB of VRAM."
|
||||
f"Offloading unlocked models with goal of making room for {vram_bytes_required / MB:.2f}MB of VRAM."
|
||||
)
|
||||
vram_bytes_freed = 0
|
||||
# TODO(ryand): Give more thought to the offloading policy used here.
|
||||
@@ -527,7 +529,7 @@ class ModelCache:
|
||||
cache_entry_bytes_freed = self._move_model_to_ram(cache_entry, vram_bytes_to_free)
|
||||
if cache_entry_bytes_freed > 0:
|
||||
self._logger.debug(
|
||||
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed/MB):.0f} MB."
|
||||
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed / MB):.0f} MB."
|
||||
)
|
||||
vram_bytes_freed += cache_entry_bytes_freed
|
||||
|
||||
@@ -609,7 +611,7 @@ class ModelCache:
|
||||
external references to the model, there's nothing that the cache can do about it, and those models will not be
|
||||
garbage-collected.
|
||||
"""
|
||||
self._logger.debug(f"Making room for {bytes_needed/MB:.2f}MB of RAM.")
|
||||
self._logger.debug(f"Making room for {bytes_needed / MB:.2f}MB of RAM.")
|
||||
self._log_cache_state(title="Before dropping models:")
|
||||
|
||||
ram_bytes_available = self._get_ram_available()
|
||||
@@ -625,7 +627,7 @@ class ModelCache:
|
||||
if not cache_entry.is_locked:
|
||||
ram_bytes_freed += cache_entry.cached_model.total_bytes()
|
||||
self._logger.debug(
|
||||
f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes()/MB):.2f}MB."
|
||||
f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes() / MB):.2f}MB."
|
||||
)
|
||||
self._delete_cache_entry(cache_entry)
|
||||
del cache_entry
|
||||
@@ -650,7 +652,7 @@ class ModelCache:
|
||||
gc.collect()
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed/MB:.2f}MB of RAM.")
|
||||
self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed / MB:.2f}MB of RAM.")
|
||||
self._log_cache_state(title="After dropping models:")
|
||||
|
||||
def _delete_cache_entry(self, cache_entry: CacheRecord) -> None:
|
||||
|
||||
@@ -25,6 +25,7 @@ from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import (
|
||||
)
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
|
||||
from invokeai.backend.flux.util import ae_params, params
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
@@ -39,6 +40,7 @@ from invokeai.backend.model_manager.config import (
|
||||
CLIPEmbedDiffusersConfig,
|
||||
ControlNetCheckpointConfig,
|
||||
ControlNetDiffusersConfig,
|
||||
FluxReduxConfig,
|
||||
IPAdapterCheckpointConfig,
|
||||
MainBnbQuantized4bCheckpointConfig,
|
||||
MainCheckpointConfig,
|
||||
@@ -391,3 +393,25 @@ class FluxIpAdapterModel(ModelLoader):
|
||||
|
||||
model.load_xlabs_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.FluxRedux, format=ModelFormat.Checkpoint)
|
||||
class FluxReduxModelLoader(ModelLoader):
|
||||
"""Class to load FLUX Redux models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, FluxReduxConfig):
|
||||
raise ValueError(f"Unexpected model config type: {type(config)}.")
|
||||
|
||||
sd = load_file(Path(config.path))
|
||||
|
||||
with accelerate.init_empty_weights():
|
||||
model = FluxReduxModel()
|
||||
|
||||
model.load_state_dict(sd, assign=True)
|
||||
model.to(dtype=torch.bfloat16)
|
||||
return model
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.SigLIP, format=ModelFormat.Diffusers)
|
||||
class SigLIPModelLoader(ModelLoader):
|
||||
"""Class for loading SigLIP models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if submodel_type is not None:
|
||||
raise ValueError("Unexpected submodel requested for LLaVA OneVision model.")
|
||||
|
||||
model_path = Path(config.path)
|
||||
model = SigLipPipeline.load_from_path(model_path)
|
||||
model.to(dtype=self._torch_dtype)
|
||||
return model
|
||||
@@ -18,6 +18,7 @@ from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
from invokeai.backend.textual_inversion import TextualInversionModelRaw
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
@@ -48,6 +49,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
|
||||
GroundingDinoPipeline,
|
||||
SegmentAnythingPipeline,
|
||||
DepthAnythingPipeline,
|
||||
SigLipPipeline,
|
||||
),
|
||||
):
|
||||
return model.calc_size()
|
||||
|
||||
@@ -115,19 +115,19 @@ class ModelMerger(object):
|
||||
base_models: Set[BaseModelType] = set()
|
||||
variant = None if self._installer.app_config.precision == "float32" else "fp16"
|
||||
|
||||
assert (
|
||||
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
|
||||
), "When merging three models, only the 'add_difference' merge method is supported"
|
||||
assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, (
|
||||
"When merging three models, only the 'add_difference' merge method is supported"
|
||||
)
|
||||
|
||||
for key in model_keys:
|
||||
info = store.get_model(key)
|
||||
model_names.append(info.name)
|
||||
assert isinstance(
|
||||
info, MainDiffusersConfig
|
||||
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
|
||||
assert info.variant == ModelVariantType(
|
||||
"normal"
|
||||
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
|
||||
assert isinstance(info, MainDiffusersConfig), (
|
||||
f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
|
||||
)
|
||||
assert info.variant == ModelVariantType("normal"), (
|
||||
f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
|
||||
)
|
||||
|
||||
# tally base models used
|
||||
base_models.add(info.base)
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.backend.flux.controlnet.state_dict_utils import (
|
||||
is_state_dict_xlabs_controlnet,
|
||||
)
|
||||
from invokeai.backend.flux.ip_adapter.state_dict_utils import is_state_dict_xlabs_ip_adapter
|
||||
from invokeai.backend.flux.redux.flux_redux_state_dict_utils import is_state_dict_likely_flux_redux
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
@@ -139,6 +140,7 @@ class ModelProbe(object):
|
||||
"FluxControlNetModel": ModelType.ControlNet,
|
||||
"SD3Transformer2DModel": ModelType.Main,
|
||||
"CLIPTextModelWithProjection": ModelType.CLIPEmbed,
|
||||
"SiglipModel": ModelType.SigLIP,
|
||||
}
|
||||
|
||||
TYPE2VARIANT: Dict[ModelType, Callable[[str], Optional[AnyVariant]]] = {ModelType.CLIPEmbed: get_clip_variant_type}
|
||||
@@ -267,6 +269,9 @@ class ModelProbe(object):
|
||||
if isinstance(ckpt, dict) and is_state_dict_likely_flux_control(ckpt):
|
||||
return ModelType.ControlLoRa
|
||||
|
||||
if isinstance(ckpt, dict) and is_state_dict_likely_flux_redux(ckpt):
|
||||
return ModelType.FluxRedux
|
||||
|
||||
for key in [str(k) for k in ckpt.keys()]:
|
||||
if key.startswith(
|
||||
(
|
||||
@@ -752,6 +757,16 @@ class SpandrelImageToImageCheckpointProbe(CheckpointProbeBase):
|
||||
return BaseModelType.Any
|
||||
|
||||
|
||||
class SigLIPCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FluxReduxCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
return BaseModelType.Flux
|
||||
|
||||
|
||||
########################################################
|
||||
# classes for probing folders
|
||||
#######################################################
|
||||
@@ -1022,6 +1037,16 @@ class SpandrelImageToImageFolderProbe(FolderProbeBase):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class SigLIPFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
return BaseModelType.Any
|
||||
|
||||
|
||||
class FluxReduxFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class T2IAdapterFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
config_file = self.model_path / "config.json"
|
||||
@@ -1055,6 +1080,8 @@ ModelProbe.register_probe("diffusers", ModelType.CLIPEmbed, CLIPEmbedFolderProbe
|
||||
ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.SpandrelImageToImage, SpandrelImageToImageFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.SigLIP, SigLIPFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.FluxRedux, FluxReduxFolderProbe)
|
||||
|
||||
ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.VAE, VaeCheckpointProbe)
|
||||
@@ -1066,5 +1093,7 @@ ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpoint
|
||||
ModelProbe.register_probe("checkpoint", ModelType.CLIPVision, CLIPVisionCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.T2IAdapter, T2IAdapterCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.SpandrelImageToImage, SpandrelImageToImageCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.SigLIP, SigLIPCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.FluxRedux, FluxReduxCheckpointProbe)
|
||||
|
||||
ModelProbe.register_probe("onnx", ModelType.ONNX, ONNXFolderProbe)
|
||||
|
||||
@@ -593,6 +593,26 @@ swinir = StarterModel(
|
||||
|
||||
# endregion
|
||||
|
||||
# region SigLIP
|
||||
siglip = StarterModel(
|
||||
name="SigLIP - google/siglip-so400m-patch14-384",
|
||||
base=BaseModelType.Any,
|
||||
source="google/siglip-so400m-patch14-384",
|
||||
description="A SigLIP model (used by FLUX Redux).",
|
||||
type=ModelType.SigLIP,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region FLUX Redux
|
||||
flux_redux = StarterModel(
|
||||
name="FLUX Redux",
|
||||
base=BaseModelType.Flux,
|
||||
source="black-forest-labs/FLUX.1-Redux-dev::flux1-redux-dev.safetensors",
|
||||
description="FLUX Redux model (for image variation).",
|
||||
type=ModelType.FluxRedux,
|
||||
dependencies=[siglip],
|
||||
)
|
||||
# endregion
|
||||
|
||||
# List of starter models, displayed on the frontend.
|
||||
# The order/sort of this list is not changed by the frontend - set it how you want it here.
|
||||
@@ -661,6 +681,8 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
t5_base_encoder,
|
||||
t5_8b_quantized_encoder,
|
||||
clip_l_encoder,
|
||||
siglip,
|
||||
flux_redux,
|
||||
]
|
||||
|
||||
sd1_bundle: list[StarterModel] = [
|
||||
@@ -708,6 +730,7 @@ flux_bundle: list[StarterModel] = [
|
||||
ip_adapter_flux,
|
||||
flux_canny_control_lora,
|
||||
flux_depth_control_lora,
|
||||
flux_redux,
|
||||
]
|
||||
|
||||
STARTER_BUNDLES: dict[str, list[StarterModel]] = {
|
||||
|
||||
@@ -37,19 +37,21 @@ class Struct_mallinfo2(ctypes.Structure):
|
||||
|
||||
def __str__(self) -> str:
|
||||
s = ""
|
||||
s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
|
||||
s += (
|
||||
f"{'arena': <10}= {(self.arena / 2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
|
||||
)
|
||||
s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n"
|
||||
s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n"
|
||||
s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n"
|
||||
s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
|
||||
s += f"{'hblkhd': <10}= {(self.hblkhd / 2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
|
||||
s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n"
|
||||
s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
|
||||
s += f"{'fsmblks': <10}= {(self.fsmblks / 2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
|
||||
s += (
|
||||
f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
|
||||
f"{'uordblks': <10}= {(self.uordblks / 2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
|
||||
" (GB)\n"
|
||||
)
|
||||
s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
|
||||
s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n"
|
||||
s += f"{'fordblks': <10}= {(self.fordblks / 2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
|
||||
s += f"{'keepcost': <10}= {(self.keepcost / 2**30):15.5f} # Top-most, releasable space (GB)\n"
|
||||
return s
|
||||
|
||||
|
||||
|
||||
@@ -73,36 +73,36 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
|
||||
for j in range(2):
|
||||
# loop over resnets/attentions for downblocks
|
||||
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
||||
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
|
||||
sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
|
||||
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no attention layers in down_blocks.3
|
||||
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
||||
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
|
||||
sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
|
||||
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
|
||||
|
||||
for j in range(3):
|
||||
# loop over resnets/attentions for upblocks
|
||||
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
|
||||
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
|
||||
sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
|
||||
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
|
||||
|
||||
# if i > 0: commentout for sdxl
|
||||
# no attention layers in up_blocks.0
|
||||
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
|
||||
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
|
||||
sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
|
||||
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no downsample in down_blocks.3
|
||||
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
||||
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
|
||||
sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
|
||||
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
|
||||
|
||||
# no upsample in up_blocks.3
|
||||
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
||||
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
|
||||
sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{2}." # change for sdxl
|
||||
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
|
||||
|
||||
hf_mid_atn_prefix = "mid_block.attentions.0."
|
||||
@@ -111,7 +111,7 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
|
||||
|
||||
for j in range(2):
|
||||
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
||||
sd_mid_res_prefix = f"middle_block.{2*j}."
|
||||
sd_mid_res_prefix = f"middle_block.{2 * j}."
|
||||
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
||||
|
||||
unet_conversion_map_resnet = [
|
||||
@@ -133,13 +133,13 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
|
||||
unet_conversion_map.append((sd, hf))
|
||||
|
||||
for j in range(2):
|
||||
hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
|
||||
sd_time_embed_prefix = f"time_embed.{j*2}."
|
||||
hf_time_embed_prefix = f"time_embedding.linear_{j + 1}."
|
||||
sd_time_embed_prefix = f"time_embed.{j * 2}."
|
||||
unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
|
||||
|
||||
for j in range(2):
|
||||
hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
|
||||
sd_label_embed_prefix = f"label_emb.0.{j*2}."
|
||||
hf_label_embed_prefix = f"add_embedding.linear_{j + 1}."
|
||||
sd_label_embed_prefix = f"label_emb.0.{j * 2}."
|
||||
unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
|
||||
|
||||
unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
|
||||
|
||||
43
invokeai/backend/sig_lip/sig_lip_pipeline.py
Normal file
43
invokeai/backend/sig_lip/sig_lip_pipeline.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
from transformers import SiglipImageProcessor, SiglipVisionModel
|
||||
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
|
||||
class SigLipPipeline(RawModel):
|
||||
"""A wrapper for a SigLIP model + processor."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
siglip_processor: SiglipImageProcessor,
|
||||
siglip_model: SiglipVisionModel,
|
||||
):
|
||||
self._siglip_processor = siglip_processor
|
||||
self._siglip_model = siglip_model
|
||||
|
||||
@classmethod
|
||||
def load_from_path(cls, path: str | Path):
|
||||
siglip_model = SiglipVisionModel.from_pretrained(path, local_files_only=True)
|
||||
assert isinstance(siglip_model, SiglipVisionModel)
|
||||
siglip_processor = SiglipImageProcessor.from_pretrained(path, local_files_only=True)
|
||||
assert isinstance(siglip_processor, SiglipImageProcessor)
|
||||
return cls(siglip_processor, siglip_model)
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
self._siglip_model.to(device=device, dtype=dtype)
|
||||
|
||||
def encode_image(self, x: Image.Image, device: torch.device, dtype: torch.dtype) -> torch.Tensor:
|
||||
imgs = self._siglip_processor.preprocess(images=[x], do_resize=True, return_tensors="pt", do_convert_rgb=True)
|
||||
encoded_x = self._siglip_model(**imgs.to(device=device, dtype=dtype)).last_hidden_state
|
||||
return encoded_x
|
||||
|
||||
def calc_size(self) -> int:
|
||||
"""Get size of the model in memory in bytes."""
|
||||
# HACK(ryand): Fix this issue with circular imports.
|
||||
from invokeai.backend.model_manager.load.model_util import calc_module_size
|
||||
|
||||
return calc_module_size(self._siglip_model)
|
||||
@@ -60,14 +60,13 @@
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.46",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.5.1",
|
||||
"@reduxjs/toolkit": "2.6.1",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"@xyflow/react": "^12.4.2",
|
||||
"async-mutex": "^0.5.0",
|
||||
"chakra-react-select": "^4.9.2",
|
||||
"cmdk": "^1.0.0",
|
||||
"compare-versions": "^6.1.1",
|
||||
"dateformat": "^5.0.3",
|
||||
"fracturedjsonjs": "^4.0.2",
|
||||
"framer-motion": "^11.10.0",
|
||||
"i18next": "^23.15.1",
|
||||
@@ -101,7 +100,7 @@
|
||||
"react-resizable-panels": "^2.1.4",
|
||||
"react-textarea-autosize": "^8.5.7",
|
||||
"react-use": "^17.5.1",
|
||||
"react-virtuoso": "^4.10.4",
|
||||
"react-virtuoso": "^4.12.5",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.1.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
@@ -131,7 +130,6 @@
|
||||
"@storybook/react": "^8.3.4",
|
||||
"@storybook/react-vite": "^8.5.5",
|
||||
"@storybook/theming": "^8.3.4",
|
||||
"@types/dateformat": "^5.0.2",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/node": "^20.16.10",
|
||||
"@types/react": "^18.3.11",
|
||||
|
||||
42
invokeai/frontend/web/pnpm-lock.yaml
generated
42
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -30,8 +30,8 @@ dependencies:
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
'@reduxjs/toolkit':
|
||||
specifier: 2.5.1
|
||||
version: 2.5.1(react-redux@9.1.2)(react@18.3.1)
|
||||
specifier: 2.6.1
|
||||
version: 2.6.1(react-redux@9.1.2)(react@18.3.1)
|
||||
'@roarr/browser-log-writer':
|
||||
specifier: ^1.3.0
|
||||
version: 1.3.0
|
||||
@@ -50,9 +50,6 @@ dependencies:
|
||||
compare-versions:
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
dateformat:
|
||||
specifier: ^5.0.3
|
||||
version: 5.0.3
|
||||
fracturedjsonjs:
|
||||
specifier: ^4.0.2
|
||||
version: 4.0.2
|
||||
@@ -153,8 +150,8 @@ dependencies:
|
||||
specifier: ^17.5.1
|
||||
version: 17.5.1(react-dom@18.3.1)(react@18.3.1)
|
||||
react-virtuoso:
|
||||
specifier: ^4.10.4
|
||||
version: 4.10.4(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^4.12.5
|
||||
version: 4.12.5(react-dom@18.3.1)(react@18.3.1)
|
||||
redux-dynamic-middlewares:
|
||||
specifier: ^2.2.0
|
||||
version: 2.2.0
|
||||
@@ -226,9 +223,6 @@ devDependencies:
|
||||
'@storybook/theming':
|
||||
specifier: ^8.3.4
|
||||
version: 8.3.4(storybook@8.3.4)
|
||||
'@types/dateformat':
|
||||
specifier: ^5.0.2
|
||||
version: 5.0.2
|
||||
'@types/lodash-es':
|
||||
specifier: ^4.17.12
|
||||
version: 4.17.12
|
||||
@@ -2317,8 +2311,8 @@ packages:
|
||||
- supports-color
|
||||
dev: true
|
||||
|
||||
/@reduxjs/toolkit@2.5.1(react-redux@9.1.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-UHhy3p0oUpdhnSxyDjaRDYaw8Xra75UiLbCiRozVPHjfDwNYkh0TsVm/1OmTW8Md+iDAJmYPWUKMvsMc2GtpNg==}
|
||||
/@reduxjs/toolkit@2.6.1(react-redux@9.1.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-SSlIqZNYhqm/oMkXbtofwZSt9lrncblzo6YcZ9zoX+zLngRBrCOjK4lNLdkNucJF58RHOWrD9txT3bT3piH7Zw==}
|
||||
peerDependencies:
|
||||
react: ^16.9.0 || ^17.0.0 || ^18 || ^19
|
||||
react-redux: ^7.2.1 || ^8.1.3 || ^9.0.0
|
||||
@@ -3355,10 +3349,6 @@ packages:
|
||||
'@types/d3-selection': 3.0.10
|
||||
dev: false
|
||||
|
||||
/@types/dateformat@5.0.2:
|
||||
resolution: {integrity: sha512-M95hNBMa/hnwErH+a+VOD/sYgTmo15OTYTM2Hr52/e0OdOuY+Crag+kd3/ioZrhg0WGbl9Sm3hR7UU+MH6rfOw==}
|
||||
dev: true
|
||||
|
||||
/@types/diff-match-patch@1.0.36:
|
||||
resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==}
|
||||
dev: false
|
||||
@@ -3435,7 +3425,7 @@ packages:
|
||||
/@types/lodash.mergewith@4.6.7:
|
||||
resolution: {integrity: sha512-3m+lkO5CLRRYU0fhGRp7zbsGi6+BZj0uTVSwvcKU+nSlhjA9/QRNfuSGnD2mX6hQA7ZbmcCkzk5h4ZYGOtk14A==}
|
||||
dependencies:
|
||||
'@types/lodash': 4.17.15
|
||||
'@types/lodash': 4.17.16
|
||||
dev: false
|
||||
|
||||
/@types/lodash.mergewith@4.6.9:
|
||||
@@ -3447,8 +3437,8 @@ packages:
|
||||
/@types/lodash@4.17.10:
|
||||
resolution: {integrity: sha512-YpS0zzoduEhuOWjAotS6A5AVCva7X4lVlYLF0FYHAY9sdraBfnatttHItlWeZdGhuEkf+OzMNg2ZYAx8t+52uQ==}
|
||||
|
||||
/@types/lodash@4.17.15:
|
||||
resolution: {integrity: sha512-w/P33JFeySuhN6JLkysYUK2gEmy9kHHFN7E8ro0tkfmlDOgxBDzWEZ/J8cWA+fHqFevpswDTFZnDx+R9lbL6xw==}
|
||||
/@types/lodash@4.17.16:
|
||||
resolution: {integrity: sha512-HX7Em5NYQAXKW+1T+FiuG27NGwzJfCX3s1GjOa7ujxZa52kjJLOr4FUxT+giF6Tgxv1e+/czV/iTtBw27WTU9g==}
|
||||
dev: false
|
||||
|
||||
/@types/mdx@2.0.13:
|
||||
@@ -4856,11 +4846,6 @@ packages:
|
||||
'@babel/runtime': 7.25.7
|
||||
dev: true
|
||||
|
||||
/dateformat@5.0.3:
|
||||
resolution: {integrity: sha512-Kvr6HmPXUMerlLcLF+Pwq3K7apHpYmGDVqrxcDasBg86UcKeTSNWbEzU8bwdXnxnR44FtMhJAxI4Bov6Y/KUfA==}
|
||||
engines: {node: '>=12.20'}
|
||||
dev: false
|
||||
|
||||
/de-indent@1.0.2:
|
||||
resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==}
|
||||
dev: true
|
||||
@@ -7913,12 +7898,11 @@ packages:
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/react-virtuoso@4.10.4(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-G/gprhTbK+lzMxoo/iStcZxVEGph/cIhc3WANEpt92RuMw+LiCZOmBfKoeoZOHlm/iyftTrDJhGaTCpxyucnkQ==}
|
||||
engines: {node: '>=10'}
|
||||
/react-virtuoso@4.12.5(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-YeCbRRsC9CLf0buD0Rct7WsDbzf+yBU1wGbo05/XjbcN2nJuhgh040m3y3+6HVogTZxEqVm45ac9Fpae4/MxRQ==}
|
||||
peerDependencies:
|
||||
react: '>=16 || >=17 || >= 18'
|
||||
react-dom: '>=16 || >=17 || >= 18'
|
||||
react: '>=16 || >=17 || >= 18 || >= 19'
|
||||
react-dom: '>=16 || >=17 || >= 18 || >=19'
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
@@ -113,7 +113,8 @@
|
||||
"end": "Ende",
|
||||
"layout": "Layout",
|
||||
"board": "Ordner",
|
||||
"combinatorial": "Kombinatorisch"
|
||||
"combinatorial": "Kombinatorisch",
|
||||
"saveChanges": "Änderungen speichern"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -761,7 +762,16 @@
|
||||
"workflowDeleted": "Arbeitsablauf gelöscht",
|
||||
"errorCopied": "Fehler kopiert",
|
||||
"layerCopiedToClipboard": "Ebene in die Zwischenablage kopiert",
|
||||
"sentToCanvas": "An Leinwand gesendet"
|
||||
"sentToCanvas": "An Leinwand gesendet",
|
||||
"problemDeletingWorkflow": "Problem beim Löschen des Arbeitsablaufs",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Es darf maximal 1 PNG- oder JPEG-Bild sein.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Es dürfen maximal {{count}} PNG- oder JPEG-Bilder sein.",
|
||||
"problemRetrievingWorkflow": "Problem beim Abrufen des Arbeitsablaufs",
|
||||
"uploadFailedInvalidUploadDesc": "Müssen PNG- oder JPEG-Bilder sein.",
|
||||
"pasteSuccess": "Eingefügt in {{destination}}",
|
||||
"pasteFailed": "Einfügen fehlgeschlagen",
|
||||
"unableToCopy": "Kopieren nicht möglich",
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Bild hochladen",
|
||||
@@ -1314,7 +1324,8 @@
|
||||
"nodeName": "Knotenname",
|
||||
"description": "Beschreibung",
|
||||
"loadWorkflowDesc": "Arbeitsablauf laden?",
|
||||
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen."
|
||||
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen.",
|
||||
"loadingTemplates": "Lade {{name}}"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Korrektur für hohe Auflösungen",
|
||||
|
||||
@@ -116,6 +116,8 @@
|
||||
"dontShowMeThese": "Don't show me these",
|
||||
"editor": "Editor",
|
||||
"error": "Error",
|
||||
"error_withCount_one": "{{count}} error",
|
||||
"error_withCount_other": "{{count}} errors",
|
||||
"file": "File",
|
||||
"folder": "Folder",
|
||||
"format": "format",
|
||||
@@ -149,6 +151,7 @@
|
||||
"safetensors": "Safetensors",
|
||||
"save": "Save",
|
||||
"saveAs": "Save As",
|
||||
"saveChanges": "Save Changes",
|
||||
"settingsLabel": "Settings",
|
||||
"simple": "Simple",
|
||||
"somethingWentWrong": "Something went wrong",
|
||||
@@ -761,6 +764,7 @@
|
||||
"deleteMsg2": "This WILL delete the model from disk if it is in the InvokeAI root folder. If you are using a custom location, then the model WILL NOT be deleted from disk.",
|
||||
"description": "Description",
|
||||
"edit": "Edit",
|
||||
"fluxRedux": "FLUX Redux",
|
||||
"height": "Height",
|
||||
"huggingFace": "HuggingFace",
|
||||
"huggingFacePlaceholder": "owner/model-name",
|
||||
@@ -835,6 +839,7 @@
|
||||
"settings": "Settings",
|
||||
"simpleModelPlaceholder": "URL or path to a local file or diffusers folder",
|
||||
"source": "Source",
|
||||
"sigLip": "SigLIP",
|
||||
"spandrelImageToImage": "Image to Image (Spandrel)",
|
||||
"starterBundles": "Starter Bundles",
|
||||
"starterBundleHelpText": "Easily install all models needed to get started with a base model, including a main model, controlnets, IP adapters, and more. Selecting a bundle will skip any models that you already have installed.",
|
||||
@@ -1202,6 +1207,7 @@
|
||||
"informationalPopoversDisabled": "Informational Popovers Disabled",
|
||||
"informationalPopoversDisabledDesc": "Informational popovers have been disabled. Enable them in Settings.",
|
||||
"enableModelDescriptions": "Enable Model Descriptions in Dropdowns",
|
||||
"enableHighlightFocusedRegions": "Highlight Focused Regions",
|
||||
"modelDescriptionsDisabled": "Model Descriptions in Dropdowns Disabled",
|
||||
"modelDescriptionsDisabledDesc": "Model descriptions in dropdowns have been disabled. Enable them in Settings.",
|
||||
"enableInvisibleWatermark": "Enable Invisible Watermark",
|
||||
@@ -1283,9 +1289,9 @@
|
||||
"somethingWentWrong": "Something Went Wrong",
|
||||
"uploadFailed": "Upload failed",
|
||||
"imagesWillBeAddedTo": "Uploaded images will be added to board {{boardName}}'s assets.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Must be maximum of 1 PNG or JPEG image.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Must be maximum of {{count}} PNG or JPEG images.",
|
||||
"uploadFailedInvalidUploadDesc": "Must be PNG or JPEG images.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Must be maximum of 1 PNG, JPEG or WEBP image.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Must be maximum of {{count}} PNG, JPEG or WEBP images.",
|
||||
"uploadFailedInvalidUploadDesc": "Must be PNG, JPEG or WEBP images.",
|
||||
"workflowLoaded": "Workflow Loaded",
|
||||
"problemRetrievingWorkflow": "Problem Retrieving Workflow",
|
||||
"workflowDeleted": "Workflow Deleted",
|
||||
@@ -1682,12 +1688,22 @@
|
||||
"created": "Created",
|
||||
"descending": "Descending",
|
||||
"workflows": "Workflows",
|
||||
"workflowLibrary": "Library",
|
||||
"workflowLibrary": "Workflow Library",
|
||||
"loadMore": "Load More",
|
||||
"allLoaded": "All Workflows Loaded",
|
||||
"searchPlaceholder": "Search by name, description or tags",
|
||||
"filterByTags": "Filter by Tags",
|
||||
"yourWorkflows": "Your Workflows",
|
||||
"recentlyOpened": "Recently Opened",
|
||||
"noRecentWorkflows": "No Recent Workflows",
|
||||
"private": "Private",
|
||||
"shared": "Shared",
|
||||
"browseWorkflows": "Browse Workflows",
|
||||
"deselectAll": "Deselect All",
|
||||
"opened": "Opened",
|
||||
"openWorkflow": "Open Workflow",
|
||||
"updated": "Updated",
|
||||
"uploadWorkflow": "Load from File",
|
||||
"uploadAndSaveWorkflow": "Upload to Library",
|
||||
"deleteWorkflow": "Delete Workflow",
|
||||
"deleteWorkflow2": "Are you sure you want to delete this workflow? This cannot be undone.",
|
||||
"unnamedWorkflow": "Unnamed Workflow",
|
||||
@@ -1714,11 +1730,14 @@
|
||||
"loadWorkflow": "$t(common.load) Workflow",
|
||||
"autoLayout": "Auto Layout",
|
||||
"edit": "Edit",
|
||||
"view": "View",
|
||||
"download": "Download",
|
||||
"copyShareLink": "Copy Share Link",
|
||||
"copyShareLinkForWorkflow": "Copy Share Link for Workflow",
|
||||
"delete": "Delete",
|
||||
"openLibrary": "Open Library",
|
||||
"workflowThumbnail": "Workflow Thumbnail",
|
||||
"saveChanges": "Save Changes",
|
||||
"builder": {
|
||||
"deleteAllElements": "Delete All Form Elements",
|
||||
"resetAllNodeFields": "Reset All Node Fields",
|
||||
@@ -1727,6 +1746,8 @@
|
||||
"row": "Row",
|
||||
"column": "Column",
|
||||
"container": "Container",
|
||||
"containerRowLayout": "Container (row layout)",
|
||||
"containerColumnLayout": "Container (column layout)",
|
||||
"heading": "Heading",
|
||||
"text": "Text",
|
||||
"divider": "Divider",
|
||||
@@ -1747,7 +1768,9 @@
|
||||
"containerPlaceholder": "Empty Container",
|
||||
"headingPlaceholder": "Empty Heading",
|
||||
"textPlaceholder": "Empty Text",
|
||||
"workflowBuilderAlphaWarning": "The workflow builder is currently in alpha. There may be breaking changes before the stable release."
|
||||
"workflowBuilderAlphaWarning": "The workflow builder is currently in alpha. There may be breaking changes before the stable release.",
|
||||
"minimum": "Minimum",
|
||||
"maximum": "Maximum"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
@@ -2299,6 +2322,7 @@
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "To get started, make sure to download or import models needed to run Invoke. Then, enter a prompt in the box and click <StrongComponent>Invoke</StrongComponent> to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the <StrongComponent>Gallery</StrongComponent> or edit them to the <StrongComponent>Canvas</StrongComponent>.",
|
||||
"toGetStarted": "To get started, enter a prompt in the box and click <StrongComponent>Invoke</StrongComponent> to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the <StrongComponent>Gallery</StrongComponent> or edit them to the <StrongComponent>Canvas</StrongComponent>.",
|
||||
"toGetStartedWorkflow": "To get started, fill in the fields on the left and press <StrongComponent>Invoke</StrongComponent> to generate your image. Want to explore more workflows? Click the <StrongComponent>folder icon</StrongComponent> next to the workflow title to see a list of other templates you can try.",
|
||||
"gettingStartedSeries": "Want more guidance? Check out our <LinkComponent>Getting Started Series</LinkComponent> for tips on unlocking the full potential of the Invoke Studio.",
|
||||
"lowVRAMMode": "For best performance, follow our <LinkComponent>Low VRAM guide</LinkComponent>.",
|
||||
"noModelsInstalled": "It looks like you don't have any models installed! You can <DownloadStarterModelsButton>download a starter model bundle</DownloadStarterModelsButton> or <ImportModelsButton>import models</ImportModelsButton>."
|
||||
@@ -2306,8 +2330,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Memory Management: New setting for users with Nvidia GPUs to reduce VRAM usage.",
|
||||
"Performance: Continued improvements to overall application performance and responsiveness."
|
||||
"Workflows: New and improved Workflow Library.",
|
||||
"FLUX: Support for FLUX Redux in Workflows and Canvas."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -1771,7 +1771,6 @@
|
||||
"projectWorkflows": "Workflows du projet",
|
||||
"copyShareLink": "Copier le lien de partage",
|
||||
"chooseWorkflowFromLibrary": "Choisir le Workflow dans la Bibliothèque",
|
||||
"uploadAndSaveWorkflow": "Importer dans la bibliothèque",
|
||||
"edit": "Modifer",
|
||||
"deleteWorkflow2": "Êtes-vous sûr de vouloir supprimer ce Workflow ? Cette action ne peut pas être annulé.",
|
||||
"download": "Télécharger",
|
||||
|
||||
@@ -109,7 +109,11 @@
|
||||
"board": "Bacheca",
|
||||
"layout": "Schema",
|
||||
"row": "Riga",
|
||||
"column": "Colonna"
|
||||
"column": "Colonna",
|
||||
"saveChanges": "Salva modifiche",
|
||||
"error_withCount_one": "{{count}} errore",
|
||||
"error_withCount_many": "{{count}} errori",
|
||||
"error_withCount_other": "{{count}} errori"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -784,7 +788,7 @@
|
||||
"serverError": "Errore del Server",
|
||||
"connected": "Connesso al server",
|
||||
"canceled": "Elaborazione annullata",
|
||||
"uploadFailedInvalidUploadDesc": "Devono essere immagini PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc": "Devono essere immagini PNG, JPEG o WEBP.",
|
||||
"parameterSet": "Parametro richiamato",
|
||||
"parameterNotSet": "Parametro non richiamato",
|
||||
"problemCopyingImage": "Impossibile copiare l'immagine",
|
||||
@@ -835,9 +839,9 @@
|
||||
"linkCopied": "Collegamento copiato",
|
||||
"addedToUncategorized": "Aggiunto alle risorse della bacheca $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Le immagini caricate verranno aggiunte alle risorse della bacheca {{boardName}}.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG, JPEG o WEBP.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG, JPEG o WEBP.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG, JPEG o WEBP.",
|
||||
"outOfMemoryErrorDescLocal": "Segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent> per ridurre gli OOM.",
|
||||
"pasteFailed": "Incolla non riuscita",
|
||||
"pasteSuccess": "Incollato su {{destination}}",
|
||||
@@ -1704,7 +1708,7 @@
|
||||
"saveWorkflow": "Salva flusso di lavoro",
|
||||
"openWorkflow": "Apri flusso di lavoro",
|
||||
"clearWorkflowSearchFilter": "Cancella il filtro di ricerca del flusso di lavoro",
|
||||
"workflowLibrary": "Libreria",
|
||||
"workflowLibrary": "Libreria flussi di lavoro",
|
||||
"workflowSaved": "Flusso di lavoro salvato",
|
||||
"unnamedWorkflow": "Flusso di lavoro senza nome",
|
||||
"savingWorkflow": "Salvataggio del flusso di lavoro...",
|
||||
@@ -1734,7 +1738,6 @@
|
||||
"userWorkflows": "Flussi di lavoro utente",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
"defaultWorkflows": "Flussi di lavoro predefiniti",
|
||||
"uploadAndSaveWorkflow": "Carica nella libreria",
|
||||
"chooseWorkflowFromLibrary": "Scegli il flusso di lavoro dalla libreria",
|
||||
"deleteWorkflow2": "Vuoi davvero eliminare questo flusso di lavoro? Questa operazione non può essere annullata.",
|
||||
"edit": "Modifica",
|
||||
@@ -1771,8 +1774,24 @@
|
||||
"divider": "Divisore",
|
||||
"container": "Contenitore",
|
||||
"text": "Testo",
|
||||
"numberInput": "Ingresso numerico"
|
||||
}
|
||||
"numberInput": "Ingresso numerico",
|
||||
"containerRowLayout": "Contenitore (disposizione riga)",
|
||||
"containerColumnLayout": "Contenitore (disposizione colonna)"
|
||||
},
|
||||
"loadMore": "Carica altro",
|
||||
"searchPlaceholder": "Cerca per nome, descrizione o etichetta",
|
||||
"filterByTags": "Filtra per etichetta",
|
||||
"shared": "Condiviso",
|
||||
"browseWorkflows": "Sfoglia i flussi di lavoro",
|
||||
"allLoaded": "Tutti i flussi di lavoro caricati",
|
||||
"saveChanges": "Salva modifiche",
|
||||
"yourWorkflows": "I tuoi flussi di lavoro",
|
||||
"recentlyOpened": "Aperto di recente",
|
||||
"workflowThumbnail": "Miniatura del flusso di lavoro",
|
||||
"private": "Privato",
|
||||
"deselectAll": "Deseleziona tutto",
|
||||
"noRecentWorkflows": "Nessun flusso di lavoro recente",
|
||||
"view": "Visualizza"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -2322,7 +2341,8 @@
|
||||
"toGetStarted": "Per iniziare, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
|
||||
"noModelsInstalled": "Sembra che non hai installato alcun modello! Puoi <DownloadStarterModelsButton>scaricare un pacchetto di modelli di avvio</DownloadStarterModelsButton> o <ImportModelsButton>importare modelli</ImportModelsButton>.",
|
||||
"toGetStartedLocal": "Per iniziare, assicurati di scaricare o importare i modelli necessari per eseguire Invoke. Quindi, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
|
||||
"lowVRAMMode": "Per prestazioni ottimali, segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent>."
|
||||
"lowVRAMMode": "Per prestazioni ottimali, segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent>.",
|
||||
"toGetStartedWorkflow": "Per iniziare, compila i campi a sinistra e premi <StrongComponent>Invoke</StrongComponent> per generare la tua immagine. Vuoi esplorare altri flussi di lavoro? Fai clic sull'<StrongComponent>icona della cartella</StrongComponent> accanto al titolo del flusso di lavoro per visualizzare un elenco di altri modelli che puoi provare."
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Novità in Invoke",
|
||||
@@ -2330,8 +2350,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Editor del flusso di lavoro: nuovo generatore di moduli trascina-e-rilascia per una creazione più facile del flusso di lavoro.",
|
||||
"Altri miglioramenti: messa in coda dei lotti più rapida, migliore ampliamento, selettore colore migliorato e nodi metadati."
|
||||
"Gestione della memoria: nuova impostazione per gli utenti con GPU Nvidia per ridurre l'utilizzo della VRAM.",
|
||||
"Prestazioni: continui miglioramenti alle prestazioni e alla reattività complessive dell'applicazione."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -1566,7 +1566,6 @@
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"deleteWorkflow2": "Вы уверены, что хотите удалить этот рабочий процесс? Это нельзя отменить.",
|
||||
"chooseWorkflowFromLibrary": "Выбрать рабочий процесс из библиотеки",
|
||||
"uploadAndSaveWorkflow": "Загрузить в библиотеку",
|
||||
"edit": "Редактировать",
|
||||
"download": "Скачать",
|
||||
"copyShareLink": "Скопировать ссылку на общий доступ",
|
||||
|
||||
@@ -235,7 +235,8 @@
|
||||
"column": "Cột",
|
||||
"layout": "Bố Cục",
|
||||
"row": "Hàng",
|
||||
"board": "Bảng"
|
||||
"board": "Bảng",
|
||||
"saveChanges": "Lưu Thay Đổi"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
@@ -766,7 +767,9 @@
|
||||
"urlUnauthorizedErrorMessage2": "Tìm hiểu thêm.",
|
||||
"urlForbidden": "Bạn không có quyền truy cập vào model này",
|
||||
"urlForbiddenErrorMessage": "Bạn có thể cần yêu cầu quyền truy cập từ trang web đang cung cấp model.",
|
||||
"urlUnauthorizedErrorMessage": "Bạn có thể cần thiếp lập một token API để dùng được model này."
|
||||
"urlUnauthorizedErrorMessage": "Bạn có thể cần thiếp lập một token API để dùng được model này.",
|
||||
"fluxRedux": "FLUX Redux",
|
||||
"sigLip": "SigLIP"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -979,7 +982,7 @@
|
||||
"unknownInput": "Đầu Vào Không Rõ: {{name}}",
|
||||
"validateConnections": "Xác Thực Kết Nối Và Đồ Thị",
|
||||
"workflowNotes": "Ghi Chú",
|
||||
"workflowTags": "Thẻ Tên",
|
||||
"workflowTags": "Nhãn",
|
||||
"editMode": "Chỉnh sửa trong Trình Biên Tập Workflow",
|
||||
"edit": "Chỉnh Sửa",
|
||||
"executionStateInProgress": "Đang Xử Lý",
|
||||
@@ -2021,7 +2024,7 @@
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên ảnh</UploadButton> hoặc kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
|
||||
@@ -2137,7 +2140,7 @@
|
||||
"toast": {
|
||||
"imageUploadFailed": "Tải Lên Ảnh Thất Bại",
|
||||
"layerCopiedToClipboard": "Sao Chép Layer Vào Clipboard",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Tối đa là {{count}} ảnh PNG hoặc JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Tối đa là {{count}} ảnh PNG, JPEG hoặc WEBP.",
|
||||
"imageCopied": "Ảnh Đã Được Sao Chép",
|
||||
"sentToUpscale": "Chuyển Vào Upscale",
|
||||
"unableToLoadImage": "Không Thể Tải Hình Ảnh",
|
||||
@@ -2149,7 +2152,7 @@
|
||||
"unableToLoadImageMetadata": "Không Thể Tải Metadata Của Ảnh",
|
||||
"workflowLoaded": "Workflow Đã Tải",
|
||||
"uploadFailed": "Tải Lên Thất Bại",
|
||||
"uploadFailedInvalidUploadDesc": "Phải là ảnh PNG hoặc JPEG.",
|
||||
"uploadFailedInvalidUploadDesc": "Phải là ảnh PNG, JPEG hoặc WEBP.",
|
||||
"serverError": "Lỗi Server",
|
||||
"addedToBoard": "Thêm vào tài nguyên của bảng {{name}}",
|
||||
"sessionRef": "Phiên: {{sessionId}}",
|
||||
@@ -2252,11 +2255,10 @@
|
||||
"convertGraph": "Chuyển Đổi Đồ Thị",
|
||||
"saveWorkflowToProject": "Lưu Workflow Vào Dự Án",
|
||||
"workflowName": "Tên Workflow",
|
||||
"workflowLibrary": "Thư Viện",
|
||||
"workflowLibrary": "Thư Viện Workflow",
|
||||
"opened": "Ngày Mở",
|
||||
"deleteWorkflow": "Xoá Workflow",
|
||||
"workflowEditorMenu": "Menu Biên Tập Workflow",
|
||||
"uploadAndSaveWorkflow": "Tải Lên Thư Viện",
|
||||
"openLibrary": "Mở Thư Viện",
|
||||
"builder": {
|
||||
"resetAllNodeFields": "Tải Lại Các Vùng Node",
|
||||
@@ -2287,7 +2289,18 @@
|
||||
"heading": "Đầu Dòng",
|
||||
"text": "Văn Bản",
|
||||
"divider": "Gạch Chia"
|
||||
}
|
||||
},
|
||||
"yourWorkflows": "Workflow Của Bạn",
|
||||
"browseWorkflows": "Khám Phá Workflow",
|
||||
"workflowThumbnail": "Ảnh Minh Họa Workflow",
|
||||
"saveChanges": "Lưu Thay Đổi",
|
||||
"allLoaded": "Đã Tải Tất Cả Workflow",
|
||||
"shared": "Nhóm",
|
||||
"searchPlaceholder": "Tìm theo tên, mô tả, hoặc nhãn",
|
||||
"filterByTags": "Lọc Theo Nhãn",
|
||||
"recentlyOpened": "Mở Gần Đây",
|
||||
"private": "Cá Nhân",
|
||||
"loadMore": "Tải Thêm"
|
||||
},
|
||||
"upscaling": {
|
||||
"missingUpscaleInitialImage": "Thiếu ảnh dùng để upscale",
|
||||
@@ -2322,8 +2335,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Trình Biên Tập Workflow: trình tạo vùng nhập dưới dạng kéo thả nhằm tạo dựng workflow dễ dàng hơn.",
|
||||
"Các nâng cấp khác: Xếp hàng tạo sinh theo nhóm nhanh hơn, upscale tốt hơn, trình chọn màu được cải thiện, và node chứa metadata."
|
||||
"Trình Quản Lý Bộ Nhớ: Thiết lập mới cho người dùng với GPU Nvidia để giảm lượng VRAM sử dụng.",
|
||||
"Hiệu suất: Các cải thiện tiếp theo nhằm gói gọn hiệu suất và khả năng phản hồi của ứng dụng."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -1629,7 +1629,6 @@
|
||||
"projectWorkflows": "项目工作流程",
|
||||
"copyShareLink": "复制分享链接",
|
||||
"chooseWorkflowFromLibrary": "从库中选择工作流程",
|
||||
"uploadAndSaveWorkflow": "上传到库",
|
||||
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
|
||||
},
|
||||
"accordions": {
|
||||
|
||||
@@ -26,7 +26,8 @@ import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicP
|
||||
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
|
||||
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/WorkflowListMenu/ShareWorkflowModal';
|
||||
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal';
|
||||
import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal';
|
||||
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
|
||||
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { useReadinessWatcher } from 'features/queue/store/readiness';
|
||||
@@ -39,7 +40,9 @@ import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
import { DeleteWorkflowDialog } from 'features/workflowLibrary/components/DeleteLibraryWorkflowConfirmationAlertDialog';
|
||||
import { LoadWorkflowConfirmationAlertDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
||||
import { LoadWorkflowFromGraphModal } from 'features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal';
|
||||
import { NewWorkflowConfirmationAlertDialog } from 'features/workflowLibrary/components/NewWorkflowConfirmationAlertDialog';
|
||||
import { SaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog';
|
||||
import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback, useEffect } from 'react';
|
||||
@@ -48,7 +51,6 @@ import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
import { useSocketIO } from 'services/events/useSocketIO';
|
||||
|
||||
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
|
||||
|
||||
const DEFAULT_CONFIG = {};
|
||||
|
||||
interface Props {
|
||||
@@ -73,28 +75,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
{!didStudioInit && <Loading />}
|
||||
</Box>
|
||||
<HookIsolator config={config} studioInitAction={studioInitAction} />
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<StylePresetModal />
|
||||
<CancelAllExceptCurrentQueueItemConfirmationAlertDialog />
|
||||
<ClearQueueConfirmationsAlertDialog />
|
||||
<NewWorkflowConfirmationAlertDialog />
|
||||
<LoadWorkflowConfirmationAlertDialog />
|
||||
<DeleteStylePresetDialog />
|
||||
<DeleteWorkflowDialog />
|
||||
<ShareWorkflowModal />
|
||||
<RefreshAfterResetModal />
|
||||
<DeleteBoardModal />
|
||||
<GlobalImageHotkeys />
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
<CanvasManagerProviderGate>
|
||||
<CanvasPasteModal />
|
||||
</CanvasManagerProviderGate>
|
||||
<ModalIsolator />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
@@ -140,3 +121,36 @@ const HookIsolator = memo(
|
||||
}
|
||||
);
|
||||
HookIsolator.displayName = 'HookIsolator';
|
||||
|
||||
const ModalIsolator = memo(() => {
|
||||
return (
|
||||
<>
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<StylePresetModal />
|
||||
<WorkflowLibraryModal />
|
||||
<CancelAllExceptCurrentQueueItemConfirmationAlertDialog />
|
||||
<ClearQueueConfirmationsAlertDialog />
|
||||
<NewWorkflowConfirmationAlertDialog />
|
||||
<LoadWorkflowConfirmationAlertDialog />
|
||||
<DeleteStylePresetDialog />
|
||||
<DeleteWorkflowDialog />
|
||||
<ShareWorkflowModal />
|
||||
<RefreshAfterResetModal />
|
||||
<DeleteBoardModal />
|
||||
<GlobalImageHotkeys />
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
<SaveWorkflowAsDialog />
|
||||
<CanvasManagerProviderGate>
|
||||
<CanvasPasteModal />
|
||||
</CanvasManagerProviderGate>
|
||||
<LoadWorkflowFromGraphModal />
|
||||
</>
|
||||
);
|
||||
});
|
||||
ModalIsolator.displayName = 'ModalIsolator';
|
||||
|
||||
@@ -16,10 +16,18 @@ import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
|
||||
import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/projectId';
|
||||
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
|
||||
import { createStore } from 'app/store/store';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
|
||||
import {
|
||||
$workflowLibraryCategoriesOptions,
|
||||
$workflowLibrarySortOptions,
|
||||
$workflowLibraryTagCategoriesOptions,
|
||||
DEFAULT_WORKFLOW_LIBRARY_CATEGORIES,
|
||||
DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS,
|
||||
DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES,
|
||||
} from 'features/nodes/store/workflowLibrarySlice';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||
@@ -48,6 +56,8 @@ interface Props extends PropsWithChildren {
|
||||
isDebugging?: boolean;
|
||||
logo?: ReactNode;
|
||||
workflowCategories?: WorkflowCategory[];
|
||||
workflowTagCategories?: WorkflowTagCategory[];
|
||||
workflowSortOptions?: WorkflowSortOption[];
|
||||
loggingOverrides?: LoggingOverrides;
|
||||
}
|
||||
|
||||
@@ -68,6 +78,8 @@ const InvokeAIUI = ({
|
||||
isDebugging = false,
|
||||
logo,
|
||||
workflowCategories,
|
||||
workflowTagCategories,
|
||||
workflowSortOptions,
|
||||
loggingOverrides,
|
||||
}: Props) => {
|
||||
useLayoutEffect(() => {
|
||||
@@ -195,14 +207,34 @@ const InvokeAIUI = ({
|
||||
|
||||
useEffect(() => {
|
||||
if (workflowCategories) {
|
||||
$workflowCategories.set(workflowCategories);
|
||||
$workflowLibraryCategoriesOptions.set(workflowCategories);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$workflowCategories.set([]);
|
||||
$workflowLibraryCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_CATEGORIES);
|
||||
};
|
||||
}, [workflowCategories]);
|
||||
|
||||
useEffect(() => {
|
||||
if (workflowTagCategories) {
|
||||
$workflowLibraryTagCategoriesOptions.set(workflowTagCategories);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$workflowLibraryTagCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES);
|
||||
};
|
||||
}, [workflowTagCategories]);
|
||||
|
||||
useEffect(() => {
|
||||
if (workflowSortOptions) {
|
||||
$workflowLibrarySortOptions.set(workflowSortOptions);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$workflowLibrarySortOptions.set(DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS);
|
||||
};
|
||||
}, [workflowSortOptions]);
|
||||
|
||||
useEffect(() => {
|
||||
if (socketOptions) {
|
||||
$socketOptions.set(socketOptions);
|
||||
|
||||
@@ -11,11 +11,11 @@ import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageVi
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
|
||||
import { $isWorkflowLibraryModalOpen } from 'features/nodes/store/workflowLibraryModal';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { activeTabCanvasRightPanelChanged, setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { useGetAndLoadLibraryWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadLibraryWorkflow';
|
||||
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
||||
import { atom } from 'nanostores';
|
||||
import { useCallback, useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -57,7 +57,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const { t } = useTranslation();
|
||||
const didParseOpenAPISchema = useStore($hasTemplates);
|
||||
const store = useAppStore();
|
||||
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
|
||||
const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
|
||||
|
||||
const handleSendToCanvas = useCallback(
|
||||
async (imageName: string) => {
|
||||
@@ -113,10 +113,15 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const handleLoadWorkflow = useCallback(
|
||||
async (workflowId: string) => {
|
||||
// This shows a toast
|
||||
await getAndLoadWorkflow(workflowId);
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
await loadWorkflowWithDialog({
|
||||
type: 'library',
|
||||
data: workflowId,
|
||||
onSuccess: () => {
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
},
|
||||
});
|
||||
},
|
||||
[getAndLoadWorkflow, store]
|
||||
[loadWorkflowWithDialog, store]
|
||||
);
|
||||
|
||||
const handleSelectStylePreset = useCallback(
|
||||
@@ -166,7 +171,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
case 'viewAllWorkflows':
|
||||
// Go to the workflows tab and open the workflow library modal
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
$isWorkflowListMenuIsOpen.set(true);
|
||||
$isWorkflowLibraryModalOpen.set(true);
|
||||
break;
|
||||
case 'viewAllStylePresets':
|
||||
// Go to the canvas tab and open the style presets menu
|
||||
|
||||
@@ -27,7 +27,6 @@ import { addDynamicPromptsListener } from 'app/store/middleware/listenerMiddlewa
|
||||
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
|
||||
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
|
||||
import { addUpdateAllNodesRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested';
|
||||
import { addWorkflowLoadRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
|
||||
import { addArchivedOrDeletedBoardListener } from './listeners/addArchivedOrDeletedBoardListener';
|
||||
@@ -89,7 +88,6 @@ addArchivedOrDeletedBoardListener(startAppListening);
|
||||
addGetOpenAPISchemaListener(startAppListening);
|
||||
|
||||
// Workflows
|
||||
addWorkflowLoadRequestedListener(startAppListening);
|
||||
addUpdateAllNodesRequestedListener(startAppListening);
|
||||
|
||||
// Models
|
||||
|
||||
@@ -31,6 +31,7 @@ import type { AnyModelConfig } from 'services/api/types';
|
||||
import {
|
||||
isCLIPEmbedModelConfig,
|
||||
isControlLayerModelConfig,
|
||||
isFluxReduxModelConfig,
|
||||
isFluxVAEModelConfig,
|
||||
isIPAdapterModelConfig,
|
||||
isLoRAModelConfig,
|
||||
@@ -77,6 +78,7 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
|
||||
handleT5EncoderModels(models, state, dispatch, log);
|
||||
handleCLIPEmbedModels(models, state, dispatch, log);
|
||||
handleFLUXVAEModels(models, state, dispatch, log);
|
||||
handleFLUXReduxModels(models, state, dispatch, log);
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -209,6 +211,10 @@ const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log)
|
||||
const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const ipaModels = models.filter(isIPAdapterModelConfig);
|
||||
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
|
||||
if (entity.ipAdapter.type !== 'ip_adapter') {
|
||||
return;
|
||||
}
|
||||
|
||||
const selectedIPAdapterModel = entity.ipAdapter.model;
|
||||
// `null` is a valid IP adapter model - no need to do anything.
|
||||
if (!selectedIPAdapterModel) {
|
||||
@@ -224,6 +230,10 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
|
||||
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
|
||||
entity.referenceImages.forEach(({ id: referenceImageId, ipAdapter }) => {
|
||||
if (ipAdapter.type !== 'ip_adapter') {
|
||||
return;
|
||||
}
|
||||
|
||||
const selectedIPAdapterModel = ipAdapter.model;
|
||||
// `null` is a valid IP adapter model - no need to do anything.
|
||||
if (!selectedIPAdapterModel) {
|
||||
@@ -241,6 +251,49 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
});
|
||||
};
|
||||
|
||||
const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const fluxReduxModels = models.filter(isFluxReduxModelConfig);
|
||||
|
||||
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
|
||||
if (entity.ipAdapter.type !== 'flux_redux') {
|
||||
return;
|
||||
}
|
||||
const selectedFLUXReduxModel = entity.ipAdapter.model;
|
||||
// `null` is a valid FLUX Redux model - no need to do anything.
|
||||
if (!selectedFLUXReduxModel) {
|
||||
return;
|
||||
}
|
||||
const isModelAvailable = fluxReduxModels.some((m) => m.key === selectedFLUXReduxModel.key);
|
||||
if (isModelAvailable) {
|
||||
return;
|
||||
}
|
||||
log.debug({ selectedFLUXReduxModel }, 'Selected FLUX Redux model is not available, clearing');
|
||||
dispatch(referenceImageIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
|
||||
});
|
||||
|
||||
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
|
||||
entity.referenceImages.forEach(({ id: referenceImageId, ipAdapter }) => {
|
||||
if (ipAdapter.type !== 'flux_redux') {
|
||||
return;
|
||||
}
|
||||
|
||||
const selectedFLUXReduxModel = ipAdapter.model;
|
||||
// `null` is a valid FLUX Redux model - no need to do anything.
|
||||
if (!selectedFLUXReduxModel) {
|
||||
return;
|
||||
}
|
||||
const isModelAvailable = fluxReduxModels.some((m) => m.key === selectedFLUXReduxModel.key);
|
||||
if (isModelAvailable) {
|
||||
return;
|
||||
}
|
||||
log.debug({ selectedFLUXReduxModel }, 'Selected FLUX Redux model is not available, clearing');
|
||||
dispatch(
|
||||
rgIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), referenceImageId, modelConfig: null })
|
||||
);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const handlePostProcessingModel: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedPostProcessingModel = state.upscale.postProcessingModel;
|
||||
const allSpandrelModels = models.filter(isSpandrelImageToImageModelConfig);
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
export const $workflowCategories = atom<WorkflowCategory[]>(['user', 'default']);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user