Compare commits

...

111 Commits

Author SHA1 Message Date
Riccardo Giovanetti
7adac4581a translationBot(ui): update translation (Italian)
Currently translated at 98.7% (1800 of 1822 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.7% (1798 of 1820 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.7% (1796 of 1818 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-03-17 10:49:22 +11:00
Hosted Weblate
962db86cac translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-03-17 10:49:22 +11:00
psychedelicious
d65ec0e250 feat(ui): configurable form field constraints (WIP3) 2025-03-17 10:47:01 +11:00
psychedelicious
7fdde5e84a tests(ui): fix constrainNumber 2025-03-17 10:47:01 +11:00
psychedelicious
895956bcfe chore(ui): lint 2025-03-17 10:47:01 +11:00
psychedelicious
f27d26cfa2 feat(ui): configurable form field constraints (WIP2) 2025-03-17 10:47:01 +11:00
psychedelicious
965bcba6c2 feat(ui): configurable form field constraints (WIP) 2025-03-17 10:47:01 +11:00
psychedelicious
c9f2460ff2 fix(ui): generator widget should stretch to fill when added to builder 2025-03-17 10:41:59 +11:00
psychedelicious
5abbbf4b5b feat(ui): allow pasting images on workflows tab when workflows not focused 2025-03-17 10:37:27 +11:00
psychedelicious
e66688edbf feat(ui): only paste into canvas when canvas is focused 2025-03-17 10:37:27 +11:00
joshistoast
a519483f95 refactor(ui): ♻️ memoize merged styles, simplify data attribute conditional 2025-03-17 10:34:49 +11:00
joshistoast
75c91604bb fix: 🐛 export the region wrapper
am silly
2025-03-17 10:34:49 +11:00
joshistoast
53bdaba7b6 style: 🚨 linting 2025-03-17 10:34:49 +11:00
joshistoast
f3f405ca77 refactor(ui): ♻️ remove forward ref usage 2025-03-17 10:34:49 +11:00
joshistoast
dda69950a7 refactor(ui): ♻️ apply memoization, system style objects, and data attribute to region highlight wrapper 2025-03-17 10:34:49 +11:00
joshistoast
b2198b9fa7 feat: 🔧 region highlighting disabled by default
some users may not like this
2025-03-17 10:34:49 +11:00
joshistoast
02b91e8e7b feat: highlight focused regions
adds a region wrapper with a highlight effect when that region is focused, this behavior can be toggled as a setting
2025-03-17 10:34:49 +11:00
psychedelicious
09bf7c35eb chore(ui): typegen 2025-03-17 10:32:19 +11:00
psychedelicious
deb9a65b3d chore(ui): update whats new 2025-03-17 10:32:19 +11:00
psychedelicious
5be9a7227c chore: remove all explicit image references in default workflows 2025-03-17 10:32:19 +11:00
psychedelicious
bb9f886bd4 docs: update default workflows dev docs 2025-03-17 10:32:19 +11:00
psychedelicious
46520946f8 chore: remove all explicit model references in default workflows 2025-03-17 10:32:19 +11:00
psychedelicious
830880a6fc chore(nodes): update titles of all model-specific nodes to reference their models
Also bump versions on all of them.
2025-03-17 10:32:19 +11:00
psychedelicious
63b94a8ff3 feat(ui): add sd3.5 default workflows tag 2025-03-17 10:32:19 +11:00
psychedelicious
f12924a1e1 chore: update default workflow tags & names 2025-03-17 10:32:19 +11:00
psychedelicious
f8e51c86f5 chore: bump version to v5.8.0 2025-03-17 10:32:19 +11:00
psychedelicious
c84a646735 ci: pin tj-actions/changed-files
Closes #7793
2025-03-17 08:36:17 +11:00
psychedelicious
b52f8121af fix(ui): duplicate edges on reconnect
Closes #7127
2025-03-15 10:12:50 +11:00
psychedelicious
05bed3fddd fix(ui): do not mark workflow as touched when setting form field initial values 2025-03-15 10:10:21 +11:00
psychedelicious
87ea20192f chore(ui): knip 2025-03-14 20:54:58 +11:00
psychedelicious
2f9c95c462 fix(ui): return early in error-selecting hooks
Prevent an error when a node is deleted and the hook is being called
2025-03-14 20:54:58 +11:00
psychedelicious
47cadbb48e feat(ui): show field errors in tooltips 2025-03-14 20:54:58 +11:00
psychedelicious
23518b9830 feat(ui): useDebouncedAppSelector
Hook that replicates `useSelector`, but debounces calling the selector.
2025-03-14 20:54:58 +11:00
psychedelicious
94dcf391a6 tweak(ui): styling for image collection fields 2025-03-14 20:50:35 +11:00
psychedelicious
e7a60c01ed fix(ui): prevent vertical scrolling on row containers 2025-03-14 07:15:58 +11:00
Mary Hipp
4b54ccc29c getting started copy for workflows 2025-03-13 12:25:14 -04:00
Mary Hipp
c4183ec98c add with_hash to prevent rerenders on default 2025-03-13 10:29:22 -04:00
Mary Hipp
5a9cbe35e0 typegen fix 2025-03-13 10:29:22 -04:00
Mary Hipp
df18fe0298 make sure that recent view always sorts by opened_at even if not available as sort option in UI 2025-03-13 10:29:22 -04:00
Mary Hipp
e5591d145f allow workflow sort options to be passed in 2025-03-13 08:27:51 -04:00
psychedelicious
371c187fc3 chore: bump version to v5.8.0rc1 2025-03-13 23:00:01 +11:00
psychedelicious
e982c95687 fix(ui): respect line breaks in builder text and heading elements 2025-03-13 09:39:41 +11:00
psychedelicious
0eeb0dd67b feat(ui): use invoke logo for thumbnail fallback for default workflows 2025-03-13 08:45:12 +11:00
psychedelicious
28c74cbe38 revert(app): remove test image from default workflow thumbnails 2025-03-13 08:45:12 +11:00
psychedelicious
7414f68acc fix(ui): save as marks workflow as not touched 2025-03-13 08:45:12 +11:00
psychedelicious
a984462b80 tweak(ui): workflow library card layout to fit 2 lines of title and 3 lines of desc 2025-03-13 08:45:12 +11:00
psychedelicious
c6c2567203 tweak(ui): workflow description shows 1 line w/ tooltip for full content 2025-03-13 08:45:12 +11:00
psychedelicious
f05c8b909f fix(ui): mark workflow touched on form builder state changes 2025-03-13 07:10:59 +11:00
psychedelicious
73330a1308 chore(ui): lint 2025-03-13 07:10:59 +11:00
psychedelicious
6f568d48ed fix(ui): studio init action workflow loading 2025-03-13 07:10:59 +11:00
psychedelicious
81a97f3796 fix(ui): load workflow from object 2025-03-13 07:10:59 +11:00
psychedelicious
3f9535d2f9 fix(ui): load workflow from graph 2025-03-13 07:10:59 +11:00
psychedelicious
83bfbdcad4 feat(ui): more workflow loading standardization
There is now a single entrypoint for loading a workflow - `useLoadWorkflowWithDialog`.

The hook:
Handles loading workflows from various sources. If there are unsaved changes, the user will be prompted to confirm before loading the workflow.

It returns  a function that:
Loads a workflow from various sources. If there are unsaved changes, the user will be prompted to confirm before loading the workflow. The workflow will be loaded immediately if there are no unsaved changes. On success, error or completion, the corresponding callback will be called.

WHEW
2025-03-13 07:10:59 +11:00
psychedelicious
729428084c feat(ui): prompt when loading workflow from file if unsaved changes 2025-03-13 07:10:59 +11:00
psychedelicious
523a932ecc feat(ui): accept button on workflow load dialog is "Load" 2025-03-13 07:10:59 +11:00
psychedelicious
21be7d7157 feat(ui): allow load workflow confirm dialog to load workflows from object instead of only id 2025-03-13 07:10:59 +11:00
psychedelicious
a29fb18c0b feat(ui): standardize and clean up workflow loading hooks and logic 2025-03-13 07:10:59 +11:00
psychedelicious
aed446f013 fix(ui): make the workflow load from file menu item work the same as the button in library
Upload and save as instead of just upload as draft.
2025-03-13 07:10:59 +11:00
Mary Hipp
e81c9b0d6e add default for opened_at 2025-03-12 14:35:34 -04:00
psychedelicious
89f457c486 fix(ui): mark workflow as opened when creating a new workflow 2025-03-12 12:11:00 +11:00
psychedelicious
30ed09a36e fix(ui): default categories for oss 2025-03-12 12:11:00 +11:00
psychedelicious
3334652acc feat(db): drop the opened_at column instead of marking deprecated 2025-03-12 12:11:00 +11:00
psychedelicious
e83536f396 chore(ui): lint 2025-03-12 12:11:00 +11:00
psychedelicious
97593f95f6 feat(ui): on first load, if the selected library view has no workflows, switch to the first view that has workflows 2025-03-12 12:11:00 +11:00
psychedelicious
7f14cee17e chore(ui): typegen 2025-03-12 12:11:00 +11:00
psychedelicious
0a836d6fc1 feat(app): add method and route to get workflow library counts by category 2025-03-12 12:11:00 +11:00
psychedelicious
54e781d5bb tidy(app): remove unused method in workflow records service 2025-03-12 12:11:00 +11:00
psychedelicious
aa71d0c817 tweak(ui): 'is_recent' -> 'has_been_opened' 2025-03-12 12:11:00 +11:00
psychedelicious
07313e429d chore(ui): typegen 2025-03-12 12:11:00 +11:00
psychedelicious
bad5023238 tweak(app): 'is_recent' -> 'has_been_opened' 2025-03-12 12:11:00 +11:00
psychedelicious
73a0d2c06c fix(ui): memo WorkflowLibraryModal 2025-03-12 12:11:00 +11:00
psychedelicious
918e9c8ccc feat(app): drop and recreate index on opened_at
Not sure if this is strictly required but doing it anyways.
2025-03-12 12:11:00 +11:00
psychedelicious
1e388e9ca4 tweak(ui): align new and upload workflow buttons 2025-03-12 12:11:00 +11:00
psychedelicious
5b84d45932 perf(ui): memoize workflow library components 2025-03-12 12:11:00 +11:00
psychedelicious
dc3f1184b2 fix(ui): other stuff borked by rebase 2025-03-12 12:11:00 +11:00
psychedelicious
87438bcad7 fix(ui): rebase broke things 2025-03-12 12:11:00 +11:00
Mary Hipp
afd894fd04 update recent workflows UI 2025-03-12 12:11:00 +11:00
Mary Hipp
df305c0b99 allow opened_at to be nullable for workflows that the user has never opened 2025-03-12 12:11:00 +11:00
psychedelicious
deecb7f3c3 feat(ui): "Reset Filters" -> "Deselect All" 2025-03-12 08:00:18 +11:00
psychedelicious
dd5f353465 revert(ui): use reverted API for workflow library 2025-03-12 08:00:18 +11:00
psychedelicious
a8759ea0a6 chore(ui): typegen 2025-03-12 08:00:18 +11:00
psychedelicious
3ff529c718 revert(app): use OR logic for workflow library filtering 2025-03-12 08:00:18 +11:00
psychedelicious
3b0fecafb0 fix(ui): URL mismatch for tag_counts_with_filter 2025-03-12 08:00:18 +11:00
psychedelicious
099011000f chore(ui): lint 2025-03-12 08:00:18 +11:00
psychedelicious
155daa3137 feat(ui): hide filters with no workflows 2025-03-12 08:00:18 +11:00
psychedelicious
c493e223cf feat(ui): "Reset Tags" -> "Reset Filters" 2025-03-12 08:00:18 +11:00
psychedelicious
124ca23f8b feat(ui): use new tag filtering for workflow library 2025-03-12 08:00:18 +11:00
psychedelicious
a8023cbcb6 chore(ui): typegen 2025-03-12 08:00:18 +11:00
psychedelicious
b733d3897e feat(app): revised workflow library filtering by tag
- Replace `get_counts` method with `get_tag_counts_with_filter` which gets the counts for a list of tags, filtering by a list of selected tags
- Update `get_many` logic to apply tag filtering with AND logic, to match the new `get_tag_counts_with_filter` method
- Update workflow library router
2025-03-12 08:00:18 +11:00
psychedelicious
ef95b37ace fix(ui): workflow library infinite query providesTags 2025-03-12 08:00:18 +11:00
psychedelicious
4feff5a185 chore(ui): bump @reduxjs/toolkit from 1.6.0 to 1.6.1
This brings in some fixes for the new infinite query support.
2025-03-12 08:00:18 +11:00
psychedelicious
6c8dc32d5c docs(ui): add comments to workflow library cache invalidation 2025-03-12 08:00:18 +11:00
psychedelicious
e5da808b2f fix(ui): updating workflow content should not invalidate the infinite query cache 2025-03-12 08:00:18 +11:00
psychedelicious
7d3434da62 fix(ui): updating workflow opened at invalidates infinite query cache 2025-03-12 08:00:18 +11:00
psychedelicious
4cc70d9f16 feat(ui): add cache tags for workflow library's infinite query 2025-03-12 08:00:18 +11:00
psychedelicious
7988bc1a59 chore(ui): remove unused WorkflowsRecent RTKQ tag
This didn't actually do anything. Will be implementing the actual functionality that you'd _think_ this tag would do in a future change.
2025-03-12 08:00:18 +11:00
psychedelicious
1756d885f6 refactor(ui): split workflow library state into separate slice
Has no business being in the workflow state slice.
2025-03-12 08:00:18 +11:00
psychedelicious
9ec4d968aa chore: bump version to v5.8.0a2 2025-03-11 13:29:26 +11:00
Riccardo Giovanetti
76c09301f9 translationBot(ui): update translation (Italian)
Currently translated at 98.7% (1794 of 1816 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-03-11 11:33:01 +11:00
Linos
1cf8749754 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (1816 of 1816 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 99.9% (1815 of 1816 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-03-11 11:33:01 +11:00
Riku
5d6c468833 translationBot(ui): update translation (German)
Currently translated at 67.2% (1221 of 1816 strings)

Co-authored-by: Riku <riku.block@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2025-03-11 11:33:01 +11:00
Hosted Weblate
80b3f44ae8 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-03-11 11:33:01 +11:00
psychedelicious
c77c12aa1d fix(ui): missing builder translations 2025-03-11 11:28:51 +11:00
psychedelicious
731992c5ec fix(ui): restore accidentally deleted line 2025-03-11 11:17:19 +11:00
psychedelicious
c259899bf4 feat(ui): support for FLUX Redux in canvas
User facing:

When a FLUX main model is selected, users may now add Regional Reference Image layers.

When switching between FLUX Redux and FLUX IP Adapter, the settings will change to match the model type. (IP Adapter has weight, begin/end step, but Redux does not.) The image will be retained when switching between the two.

Otherwise it works the same way as IP Adapter - both in Global and Regional Reference Image layers.

---

Internal state handling:

Slightly awkward, but it was easiest to make FLUX Redux a second type of IP Adapter in redux state.

Global and regional reference images still have a single `ipAdapter` field, but it can have a type of `ip_adapter` or `flux_redux`.

Ideally, this field is called `config` or `settings` or something, but we are past that point. We _could_ do a migration to rename it, but I don't think it's worth the effort.

---

Other changes:
- Updated canvas layer validators to handle FLUX Redux.
- Updated model list loading logic to un-set FLUX Redux models in Canvas if they are not in the list (e.g. if the user deletes the model in the main app).
- Updated graph builders - new `addFLUXRedux` util & updated `addRegions` util.
- Updated the `buildModelsHook` util to return a hook that accepts a filter callback. This handles a discrepancy: FLUX IP Adapter does not support regional guidance, but FLUX Redux does. The Regional Guidance settings provide the filter to filter out FLUX IP Adapter models from the combined list of IP Adapter ahd Redux models.
2025-03-11 11:17:19 +11:00
psychedelicious
f62b9ad919 chore(ui): typegen 2025-03-11 11:17:19 +11:00
psychedelicious
57533657f9 feat(nodes): remove siglip from flux_redux, dl it jit when needed if we cannot find it
This follows the same pattern for IP Adapter w/ its CLIP Vision model. The SigLIP model is unlikely to ever change and we don't want to force the user to select it anywhere. Hardcoding it is safe and makes the UX much nicer.

The alternative is a model dropdown that will likely only ever have one valid choice in it.
2025-03-11 11:17:19 +11:00
psychedelicious
e35537e60a fix(mm): move flux_redux starter model to the flux bundle, make siglip a dependency of it 2025-03-11 11:17:19 +11:00
psychedelicious
a89d68b93a fix(ui): hide shared on workflow library 2025-03-10 12:29:48 -04:00
psychedelicious
59a8c0d441 feat(app): less janky custom node loading
- We don't need to copy the init file. Just crawl the custom nodes dir for modules and import them all. Dunno why I didn't do this initially.
- Pass the logger in as an arg. There was a race condition where if we got the logger directly in the load_custom_nodes function, the config would not have been loaded fully yet and we'd end up with the wrong custom nodes path!
- Remove permissions-setting logic, I do not believe it is relevant for custom nodes
- Minor cleanup of the utility
2025-03-08 09:42:13 +11:00
Riku
d5d08f6569 fix(ui): add webp to supported image types in toast messages 2025-03-07 20:38:16 +11:00
173 changed files with 3562 additions and 1899 deletions

View File

@@ -44,7 +44,12 @@ jobs:
- name: check for changed frontend files
if: ${{ inputs.always_run != true }}
id: changed-files
uses: tj-actions/changed-files@v42
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
# See:
# - CVE-2025-30066
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
# - https://github.com/tj-actions/changed-files/issues/2463
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
with:
files_yaml: |
frontend:

View File

@@ -44,7 +44,12 @@ jobs:
- name: check for changed frontend files
if: ${{ inputs.always_run != true }}
id: changed-files
uses: tj-actions/changed-files@v42
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
# See:
# - CVE-2025-30066
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
# - https://github.com/tj-actions/changed-files/issues/2463
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
with:
files_yaml: |
frontend:

View File

@@ -43,7 +43,12 @@ jobs:
- name: check for changed python files
if: ${{ inputs.always_run != true }}
id: changed-files
uses: tj-actions/changed-files@v42
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
# See:
# - CVE-2025-30066
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
# - https://github.com/tj-actions/changed-files/issues/2463
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
with:
files_yaml: |
python:

View File

@@ -77,7 +77,12 @@ jobs:
- name: check for changed python files
if: ${{ inputs.always_run != true }}
id: changed-files
uses: tj-actions/changed-files@v42
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
# See:
# - CVE-2025-30066
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
# - https://github.com/tj-actions/changed-files/issues/2463
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
with:
files_yaml: |
python:

View File

@@ -42,7 +42,12 @@ jobs:
- name: check for changed files
if: ${{ inputs.always_run != true }}
id: changed-files
uses: tj-actions/changed-files@v42
# Pinned to the _hash_ for v45.0.9 to prevent supply-chain attacks.
# See:
# - CVE-2025-30066
# - https://www.stepsecurity.io/blog/harden-runner-detection-tj-actions-changed-files-action-is-compromised
# - https://github.com/tj-actions/changed-files/issues/2463
uses: tj-actions/changed-files@a284dc1814e3fd07f2e34267fc8f81227ed29fb8
with:
files_yaml: |
src:

View File

@@ -105,6 +105,7 @@ async def list_workflows(
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories of workflow to get"),
tags: Optional[list[str]] = Query(default=None, description="The tags of workflow to get"),
query: Optional[str] = Query(default=None, description="The text to query by (matches name and description)"),
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
) -> PaginatedResults[WorkflowRecordListItemWithThumbnailDTO]:
"""Gets a page of workflows"""
workflows_with_thumbnails: list[WorkflowRecordListItemWithThumbnailDTO] = []
@@ -116,6 +117,7 @@ async def list_workflows(
query=query,
categories=categories,
tags=tags,
has_been_opened=has_been_opened,
)
for workflow in workflows.items:
workflows_with_thumbnails.append(
@@ -221,14 +223,29 @@ async def get_workflow_thumbnail(
raise HTTPException(status_code=404)
@workflows_router.get("/counts", operation_id="get_counts")
async def get_counts(
tags: Optional[list[str]] = Query(default=None, description="The tags to include"),
@workflows_router.get("/counts_by_tag", operation_id="get_counts_by_tag")
async def get_counts_by_tag(
tags: list[str] = Query(description="The tags to get counts for"),
categories: Optional[list[WorkflowCategory]] = Query(default=None, description="The categories to include"),
) -> int:
"""Gets a the count of workflows that include the specified tags and categories"""
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
) -> dict[str, int]:
"""Counts workflows by tag"""
return ApiDependencies.invoker.services.workflow_records.get_counts(tags=tags, categories=categories)
return ApiDependencies.invoker.services.workflow_records.counts_by_tag(
tags=tags, categories=categories, has_been_opened=has_been_opened
)
@workflows_router.get("/counts_by_category", operation_id="counts_by_category")
async def counts_by_category(
categories: list[WorkflowCategory] = Query(description="The categories to include"),
has_been_opened: Optional[bool] = Query(default=None, description="Whether to include/exclude recent workflows"),
) -> dict[str, int]:
"""Counts workflows by category"""
return ApiDependencies.invoker.services.workflow_records.counts_by_category(
categories=categories, has_been_opened=has_been_opened
)
@workflows_router.put(

View File

@@ -40,10 +40,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"compel",
title="Prompt",
title="Prompt - SD1.5",
tags=["prompt", "compel"],
category="conditioning",
version="1.2.0",
version="1.2.1",
)
class CompelInvocation(BaseInvocation):
"""Parse prompt using compel package to conditioning."""
@@ -233,10 +233,10 @@ class SDXLPromptInvocationBase:
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
title="Prompt - SDXL",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
version="1.2.0",
version="1.2.1",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
"""Parse prompt using compel package to conditioning."""
@@ -327,10 +327,10 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
@invocation(
"sdxl_refiner_compel_prompt",
title="SDXL Refiner Prompt",
title="Prompt - SDXL Refiner",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
version="1.1.1",
version="1.1.2",
)
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
"""Parse prompt using compel package to conditioning."""
@@ -376,10 +376,10 @@ class CLIPSkipInvocationOutput(BaseInvocationOutput):
@invocation(
"clip_skip",
title="CLIP Skip",
title="Apply CLIP Skip - SD1.5, SDXL",
tags=["clipskip", "clip", "skip"],
category="conditioning",
version="1.1.0",
version="1.1.1",
)
class CLIPSkipInvocation(BaseInvocation):
"""Skip layers in clip text_encoder model."""

View File

@@ -87,7 +87,7 @@ class ControlOutput(BaseInvocationOutput):
control: ControlField = OutputField(description=FieldDescriptions.control)
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
class ControlNetInvocation(BaseInvocation):
"""Collects ControlNet info to pass to other nodes"""

View File

@@ -1,64 +0,0 @@
"""
Invoke-managed custom node loader. See README.md for more information.
"""
import sys
import traceback
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
loaded_packs: list[str] = []
failed_packs: list[str] = []
custom_nodes_dir = Path(__file__).parent
for d in custom_nodes_dir.iterdir():
# skip files
if not d.is_dir():
continue
# skip hidden directories
if d.name.startswith("_") or d.name.startswith("."):
continue
# skip directories without an `__init__.py`
init = d / "__init__.py"
if not init.exists():
continue
module_name = init.parent.stem
# skip if already imported
if module_name in globals():
continue
# load the module, appending adding a suffix to identify it as a custom node pack
spec = spec_from_file_location(module_name, init.absolute())
if spec is None or spec.loader is None:
logger.warn(f"Could not load {init}")
continue
logger.info(f"Loading node pack {module_name}")
try:
module = module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
loaded_packs.append(module_name)
except Exception:
failed_packs.append(module_name)
full_error = traceback.format_exc()
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
del init, module_name
loaded_count = len(loaded_packs)
if loaded_count > 0:
logger.info(
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_dir}: {', '.join(loaded_packs)}"
)

View File

@@ -127,10 +127,10 @@ def get_scheduler(
@invocation(
"denoise_latents",
title="Denoise Latents",
title="Denoise - SD1.5, SDXL",
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
category="latents",
version="1.5.3",
version="1.5.4",
)
class DenoiseLatentsInvocation(BaseInvocation):
"""Denoises noisy latents to decodable images"""

View File

@@ -21,10 +21,10 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput):
@invocation(
"flux_control_lora_loader",
title="Flux Control LoRA",
title="Control LoRA - FLUX",
tags=["lora", "model", "flux"],
category="model",
version="1.1.0",
version="1.1.1",
classification=Classification.Prototype,
)
class FluxControlLoRALoaderInvocation(BaseInvocation):

View File

@@ -37,10 +37,10 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
@invocation(
"flux_model_loader",
title="Flux Main Model",
title="Main Model - FLUX",
tags=["model", "flux"],
category="model",
version="1.0.5",
version="1.0.6",
classification=Classification.Prototype,
)
class FluxModelLoaderInvocation(BaseInvocation):

View File

@@ -20,8 +20,11 @@ from invokeai.app.invocations.fields import (
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.redux.flux_redux_model import FluxReduxModel
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType
from invokeai.backend.model_manager.starter_models import siglip
from invokeai.backend.sig_lip.sig_lip_pipeline import SigLipPipeline
from invokeai.backend.util.devices import TorchDevice
@@ -35,16 +38,12 @@ class FluxReduxOutput(BaseInvocationOutput):
)
SIGLIP_STARTER_MODEL_NAME = "SigLIP - google/siglip-so400m-patch14-384"
FLUX_REDUX_STARTER_MODEL_NAME = "FLUX Redux"
@invocation(
"flux_redux",
title="FLUX Redux",
tags=["ip_adapter", "control"],
category="ip_adapter",
version="1.0.0",
version="2.0.0",
classification=Classification.Prototype,
)
class FluxReduxInvocation(BaseInvocation):
@@ -61,11 +60,6 @@ class FluxReduxInvocation(BaseInvocation):
title="FLUX Redux Model",
ui_type=UIType.FluxReduxModel,
)
siglip_model: ModelIdentifierField = InputField(
description="The SigLIP model to use.",
title="SigLIP Model",
ui_type=UIType.SigLipModel,
)
def invoke(self, context: InvocationContext) -> FluxReduxOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
@@ -80,7 +74,8 @@ class FluxReduxInvocation(BaseInvocation):
@torch.no_grad()
def _siglip_encode(self, context: InvocationContext, image: Image.Image) -> torch.Tensor:
with context.models.load(self.siglip_model).model_on_device() as (_, siglip_pipeline):
siglip_model_config = self._get_siglip_model(context)
with context.models.load(siglip_model_config.key).model_on_device() as (_, siglip_pipeline):
assert isinstance(siglip_pipeline, SigLipPipeline)
return siglip_pipeline.encode_image(
x=image, device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
@@ -93,3 +88,32 @@ class FluxReduxInvocation(BaseInvocation):
dtype = next(flux_redux.parameters()).dtype
encoded_x = encoded_x.to(dtype=dtype)
return flux_redux(encoded_x)
def _get_siglip_model(self, context: InvocationContext) -> AnyModelConfig:
siglip_models = context.models.search_by_attrs(name=siglip.name, base=BaseModelType.Any, type=ModelType.SigLIP)
if not len(siglip_models) > 0:
context.logger.warning(
f"The SigLIP model required by FLUX Redux ({siglip.name}) is not installed. Downloading and installing now. This may take a while."
)
# TODO(psyche): Can the probe reliably determine the type of the model? Just hardcoding it bc I don't want to experiment now
config_overrides = ModelRecordChanges(name=siglip.name, type=ModelType.SigLIP)
# Queue the job
job = context._services.model_manager.install.heuristic_import(siglip.source, config=config_overrides)
# Wait for up to 10 minutes - model is ~3.5GB
context._services.model_manager.install.wait_for_job(job, timeout=600)
siglip_models = context.models.search_by_attrs(
name=siglip.name,
base=BaseModelType.Any,
type=ModelType.SigLIP,
)
if len(siglip_models) == 0:
context.logger.error("Error while fetching SigLIP for FLUX Redux")
assert len(siglip_models) == 1
return siglip_models[0]

View File

@@ -26,10 +26,10 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
@invocation(
"flux_text_encoder",
title="FLUX Text Encoding",
title="Prompt - FLUX",
tags=["prompt", "conditioning", "flux"],
category="conditioning",
version="1.1.1",
version="1.1.2",
classification=Classification.Prototype,
)
class FluxTextEncoderInvocation(BaseInvocation):

View File

@@ -22,10 +22,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"flux_vae_decode",
title="FLUX Latents to Image",
title="Latents to Image - FLUX",
tags=["latents", "image", "vae", "l2i", "flux"],
category="latents",
version="1.0.1",
version="1.0.2",
)
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an image from latents."""

View File

@@ -19,10 +19,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"flux_vae_encode",
title="FLUX Image to Latents",
title="Image to Latents - FLUX",
tags=["latents", "image", "vae", "i2l", "flux"],
category="latents",
version="1.0.0",
version="1.0.1",
)
class FluxVaeEncodeInvocation(BaseInvocation):
"""Encodes an image into latents."""

View File

@@ -19,9 +19,9 @@ class IdealSizeOutput(BaseInvocationOutput):
@invocation(
"ideal_size",
title="Ideal Size",
title="Ideal Size - SD1.5, SDXL",
tags=["latents", "math", "ideal_size"],
version="1.0.4",
version="1.0.5",
)
class IdealSizeInvocation(BaseInvocation):
"""Calculates the ideal size for generation to avoid duplication"""

View File

@@ -31,10 +31,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"i2l",
title="Image to Latents",
title="Image to Latents - SD1.5, SDXL",
tags=["latents", "image", "vae", "i2l"],
category="latents",
version="1.1.0",
version="1.1.1",
)
class ImageToLatentsInvocation(BaseInvocation):
"""Encodes an image into latents."""

View File

@@ -69,7 +69,13 @@ CLIP_VISION_MODEL_MAP: dict[Literal["ViT-L", "ViT-H", "ViT-G"], StarterModel] =
}
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.5.0")
@invocation(
"ip_adapter",
title="IP-Adapter - SD1.5, SDXL",
tags=["ip_adapter", "control"],
category="ip_adapter",
version="1.5.1",
)
class IPAdapterInvocation(BaseInvocation):
"""Collects IP-Adapter info to pass to other nodes."""

View File

@@ -31,10 +31,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"l2i",
title="Latents to Image",
title="Latents to Image - SD1.5, SDXL",
tags=["latents", "image", "vae", "l2i"],
category="latents",
version="1.3.1",
version="1.3.2",
)
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an image from latents."""

View File

@@ -1,40 +1,83 @@
import logging
import shutil
import sys
import traceback
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
def load_custom_nodes(custom_nodes_path: Path):
def load_custom_nodes(custom_nodes_path: Path, logger: logging.Logger):
"""
Loads all custom nodes from the custom_nodes_path directory.
This function copies a custom __init__.py file to the custom_nodes_path directory, effectively turning it into a
python module.
If custom_nodes_path does not exist, it creates it.
The custom __init__.py file itself imports all the custom node packs as python modules from the custom_nodes_path
directory.
It also copies the custom_nodes/README.md file to the custom_nodes_path directory. Because this file may change,
it is _always_ copied to the custom_nodes_path directory.
Then,the custom __init__.py file is programmatically imported using importlib. As it executes, it imports all the
custom node packs as python modules.
Then, it crawls the custom_nodes_path directory and imports all top-level directories as python modules.
If the directory does not contain an __init__.py file or starts with an `_` or `.`, it is skipped.
"""
# create the custom nodes directory if it does not exist
custom_nodes_path.mkdir(parents=True, exist_ok=True)
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
custom_nodes_readme_path = str(custom_nodes_path / "README.md")
# Copy the README file to the custom nodes directory
source_custom_nodes_readme_path = Path(__file__).parent / "custom_nodes/README.md"
target_custom_nodes_readme_path = Path(custom_nodes_path) / "README.md"
# copy our custom nodes __init__.py to the custom nodes directory
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
# copy our custom nodes README to the custom nodes directory
shutil.copy(source_custom_nodes_readme_path, target_custom_nodes_readme_path)
# set the same permissions as the destination directory, in case our source is read-only,
# so that the files are user-writable
for p in custom_nodes_path.glob("**/*"):
p.chmod(custom_nodes_path.stat().st_mode)
loaded_packs: list[str] = []
failed_packs: list[str] = []
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
if spec is None or spec.loader is None:
raise RuntimeError(f"Could not load custom nodes from {custom_nodes_init_path}")
module = module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
for d in custom_nodes_path.iterdir():
# skip files
if not d.is_dir():
continue
# skip hidden directories
if d.name.startswith("_") or d.name.startswith("."):
continue
# skip directories without an `__init__.py`
init = d / "__init__.py"
if not init.exists():
continue
module_name = init.parent.stem
# skip if already imported
if module_name in globals():
continue
# load the module
spec = spec_from_file_location(module_name, init.absolute())
if spec is None or spec.loader is None:
logger.warning(f"Could not load {init}")
continue
logger.info(f"Loading node pack {module_name}")
try:
module = module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
loaded_packs.append(module_name)
except Exception:
failed_packs.append(module_name)
full_error = traceback.format_exc()
logger.error(f"Failed to load node pack {module_name} (may have partially loaded):\n{full_error}")
del init, module_name
loaded_count = len(loaded_packs)
if loaded_count > 0:
logger.info(
f"Loaded {loaded_count} node pack{'s' if loaded_count != 1 else ''} from {custom_nodes_path}: {', '.join(loaded_packs)}"
)

View File

@@ -610,10 +610,10 @@ class LatentsMetaOutput(LatentsOutput, MetadataOutput):
@invocation(
"denoise_latents_meta",
title="Denoise Latents + metadata",
title=f"{DenoiseLatentsInvocation.UIConfig.title} + Metadata",
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
category="latents",
version="1.1.0",
version="1.1.1",
)
class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
def invoke(self, context: InvocationContext) -> LatentsMetaOutput:
@@ -675,10 +675,10 @@ class DenoiseLatentsMetaInvocation(DenoiseLatentsInvocation, WithMetadata):
@invocation(
"flux_denoise_meta",
title="Flux Denoise + metadata",
title=f"{FluxDenoiseInvocation.UIConfig.title} + Metadata",
tags=["flux", "latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
category="latents",
version="1.0.0",
version="1.0.1",
)
class FluxDenoiseLatentsMetaInvocation(FluxDenoiseInvocation, WithMetadata):
"""Run denoising process with a FLUX transformer model + metadata."""

View File

@@ -122,10 +122,10 @@ class ModelIdentifierOutput(BaseInvocationOutput):
@invocation(
"model_identifier",
title="Model identifier",
title="Any Model",
tags=["model"],
category="model",
version="1.0.0",
version="1.0.1",
classification=Classification.Prototype,
)
class ModelIdentifierInvocation(BaseInvocation):
@@ -144,10 +144,10 @@ class ModelIdentifierInvocation(BaseInvocation):
@invocation(
"main_model_loader",
title="Main Model",
title="Main Model - SD1.5",
tags=["model"],
category="model",
version="1.0.3",
version="1.0.4",
)
class MainModelLoaderInvocation(BaseInvocation):
"""Loads a main model, outputting its submodels."""
@@ -244,7 +244,7 @@ class LoRASelectorOutput(BaseInvocationOutput):
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
@invocation("lora_selector", title="LoRA Selector", tags=["model"], category="model", version="1.0.1")
@invocation("lora_selector", title="LoRA Model - SD1.5", tags=["model"], category="model", version="1.0.2")
class LoRASelectorInvocation(BaseInvocation):
"""Selects a LoRA model and weight."""
@@ -257,7 +257,9 @@ class LoRASelectorInvocation(BaseInvocation):
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.1.0")
@invocation(
"lora_collection_loader", title="LoRA Collection - SD1.5", tags=["model"], category="model", version="1.1.1"
)
class LoRACollectionLoader(BaseInvocation):
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
@@ -320,10 +322,10 @@ class SDXLLoRALoaderOutput(BaseInvocationOutput):
@invocation(
"sdxl_lora_loader",
title="SDXL LoRA",
title="LoRA Model - SDXL",
tags=["lora", "model"],
category="model",
version="1.0.3",
version="1.0.4",
)
class SDXLLoRALoaderInvocation(BaseInvocation):
"""Apply selected lora to unet and text_encoder."""
@@ -400,10 +402,10 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
@invocation(
"sdxl_lora_collection_loader",
title="SDXL LoRA Collection Loader",
title="LoRA Collection - SDXL",
tags=["model"],
category="model",
version="1.1.0",
version="1.1.1",
)
class SDXLLoRACollectionLoader(BaseInvocation):
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
@@ -469,7 +471,9 @@ class SDXLLoRACollectionLoader(BaseInvocation):
return output
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.3")
@invocation(
"vae_loader", title="VAE Model - SD1.5, SDXL, SD3, FLUX", tags=["vae", "model"], category="model", version="1.0.4"
)
class VAELoaderInvocation(BaseInvocation):
"""Loads a VAE model, outputting a VaeLoaderOutput"""
@@ -496,10 +500,10 @@ class SeamlessModeOutput(BaseInvocationOutput):
@invocation(
"seamless",
title="Seamless",
title="Apply Seamless - SD1.5, SDXL",
tags=["seamless", "model"],
category="model",
version="1.0.1",
version="1.0.2",
)
class SeamlessModeInvocation(BaseInvocation):
"""Applies the seamless transformation to the Model UNet and VAE."""
@@ -539,7 +543,7 @@ class SeamlessModeInvocation(BaseInvocation):
return SeamlessModeOutput(unet=unet, vae=vae)
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.1")
@invocation("freeu", title="Apply FreeU - SD1.5, SDXL", tags=["freeu"], category="unet", version="1.0.2")
class FreeUInvocation(BaseInvocation):
"""
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):

View File

@@ -72,10 +72,10 @@ class NoiseOutput(BaseInvocationOutput):
@invocation(
"noise",
title="Noise",
title="Create Latent Noise",
tags=["latents", "noise"],
category="latents",
version="1.0.2",
version="1.0.3",
)
class NoiseInvocation(BaseInvocation):
"""Generates latent noise."""

View File

@@ -32,10 +32,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"sd3_denoise",
title="SD3 Denoise",
title="Denoise - SD3",
tags=["image", "sd3"],
category="image",
version="1.1.0",
version="1.1.1",
classification=Classification.Prototype,
)
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):

View File

@@ -21,10 +21,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"sd3_i2l",
title="SD3 Image to Latents",
title="Image to Latents - SD3",
tags=["image", "latents", "vae", "i2l", "sd3"],
category="image",
version="1.0.0",
version="1.0.1",
classification=Classification.Prototype,
)
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):

View File

@@ -24,10 +24,10 @@ from invokeai.backend.util.devices import TorchDevice
@invocation(
"sd3_l2i",
title="SD3 Latents to Image",
title="Latents to Image - SD3",
tags=["latents", "image", "vae", "l2i", "sd3"],
category="latents",
version="1.3.1",
version="1.3.2",
)
class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Generates an image from latents."""

View File

@@ -30,10 +30,10 @@ class Sd3ModelLoaderOutput(BaseInvocationOutput):
@invocation(
"sd3_model_loader",
title="SD3 Main Model",
title="Main Model - SD3",
tags=["model", "sd3"],
category="model",
version="1.0.0",
version="1.0.1",
classification=Classification.Prototype,
)
class Sd3ModelLoaderInvocation(BaseInvocation):

View File

@@ -29,10 +29,10 @@ SD3_T5_MAX_SEQ_LEN = 256
@invocation(
"sd3_text_encoder",
title="SD3 Text Encoding",
title="Prompt - SD3",
tags=["prompt", "conditioning", "sd3"],
category="conditioning",
version="1.0.0",
version="1.0.1",
classification=Classification.Prototype,
)
class Sd3TextEncoderInvocation(BaseInvocation):

View File

@@ -24,7 +24,7 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.3")
@invocation("sdxl_model_loader", title="Main Model - SDXL", tags=["model", "sdxl"], category="model", version="1.0.4")
class SDXLModelLoaderInvocation(BaseInvocation):
"""Loads an sdxl base model, outputting its submodels."""
@@ -58,10 +58,10 @@ class SDXLModelLoaderInvocation(BaseInvocation):
@invocation(
"sdxl_refiner_model_loader",
title="SDXL Refiner Model",
title="Refiner Model - SDXL",
tags=["model", "sdxl", "refiner"],
category="model",
version="1.0.3",
version="1.0.4",
)
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
"""Loads an sdxl refiner model, outputting its submodels."""

View File

@@ -45,7 +45,11 @@ class T2IAdapterOutput(BaseInvocationOutput):
@invocation(
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.3"
"t2i_adapter",
title="T2I-Adapter - SD1.5, SDXL",
tags=["t2i_adapter", "control"],
category="t2i_adapter",
version="1.0.4",
)
class T2IAdapterInvocation(BaseInvocation):
"""Collects T2I-Adapter info to pass to other nodes."""

View File

@@ -53,11 +53,11 @@ def crop_controlnet_data(control_data: ControlNetData, latent_region: TBLR) -> C
@invocation(
"tiled_multi_diffusion_denoise_latents",
title="Tiled Multi-Diffusion Denoise Latents",
title="Tiled Multi-Diffusion Denoise - SD1.5, SDXL",
tags=["upscale", "denoise"],
category="latents",
classification=Classification.Beta,
version="1.0.0",
version="1.0.1",
)
class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
"""Tiled Multi-Diffusion denoising.

View File

@@ -59,7 +59,7 @@ def run_app() -> None:
# Load custom nodes. This must be done after importing the Graph class, which itself imports all modules from the
# invocations module. The ordering here is implicit, but important - we want to load custom nodes after all the
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path)
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
# Start the server.
config = uvicorn.Config(

View File

@@ -20,6 +20,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import build_migration_16
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import build_migration_17
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -57,6 +58,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_15())
migrator.register_migration(build_migration_16())
migrator.register_migration(build_migration_17())
migrator.register_migration(build_migration_18())
migrator.run_migrations()
return db

View File

@@ -0,0 +1,47 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration18Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
self._make_workflow_opened_at_nullable(cursor)
def _make_workflow_opened_at_nullable(self, cursor: sqlite3.Cursor) -> None:
"""
Make the `opened_at` column nullable in the `workflow_library` table. This is accomplished by:
- Dropping the existing `idx_workflow_library_opened_at` index (must be done before dropping the column)
- Dropping the existing `opened_at` column
- Adding a new nullable column `opened_at` (no data migration needed, all values will be NULL)
- Adding a new `idx_workflow_library_opened_at` index on the `opened_at` column
"""
# For index renaming in SQLite, we need to drop and recreate
cursor.execute("DROP INDEX IF EXISTS idx_workflow_library_opened_at;")
# Rename existing column to deprecated
cursor.execute("ALTER TABLE workflow_library DROP COLUMN opened_at;")
# Add new nullable column - all values will be NULL - no migration of data needed
cursor.execute("ALTER TABLE workflow_library ADD COLUMN opened_at DATETIME;")
# Create new index on the new column
cursor.execute(
"CREATE INDEX idx_workflow_library_opened_at ON workflow_library(opened_at);",
)
def build_migration_18() -> Migration:
"""
Build the migration from database version 17 to 18.
This migration does the following:
- Make the `opened_at` column nullable in the `workflow_library` table. This is accomplished by:
- Dropping the existing `idx_workflow_library_opened_at` index (must be done before dropping the column)
- Dropping the existing `opened_at` column
- Adding a new nullable column `opened_at` (no data migration needed, all values will be NULL)
- Adding a new `idx_workflow_library_opened_at` index on the `opened_at` column
"""
migration_18 = Migration(
from_version=17,
to_version=18,
callback=Migration18Callback(),
)
return migration_18

View File

@@ -1,11 +1,11 @@
{
"id": "default_686bb1d0-d086-4c70-9fa3-2f600b922023",
"name": "ESRGAN Upscaling with Canny ControlNet",
"name": "Upscaler - SD1.5, ESRGAN",
"author": "InvokeAI",
"description": "Sample workflow for using Upscaling with ControlNet with SD1.5",
"description": "Sample workflow for using ESRGAN to upscale with ControlNet with SD1.5",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "upscaling, controlnet, default",
"tags": "sd1.5, upscaling, control",
"notes": "",
"exposedFields": [
{
@@ -185,14 +185,7 @@
},
"control_model": {
"name": "control_model",
"label": "Control Model (select Canny)",
"value": {
"key": "a7b9c76f-4bc5-42aa-b918-c1c458a5bb24",
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
"name": "sd-controlnet-canny",
"base": "sd-1",
"type": "controlnet"
}
"label": "Control Model (select Canny)"
},
"control_weight": {
"name": "control_weight",
@@ -295,14 +288,7 @@
"inputs": {
"model": {
"name": "model",
"label": "",
"value": {
"key": "5cd43ca0-dd0a-418d-9f7e-35b2b9d5e106",
"hash": "blake3:6987f323017f597213cc3264250edf57056d21a40a0a85d83a1a33a7d44dc41a",
"name": "Deliberate_v5",
"base": "sd-1",
"type": "main"
}
"label": ""
}
},
"isOpen": true,
@@ -849,4 +835,4 @@
"targetHandle": "image_resolution"
}
]
}
}

View File

@@ -1,11 +1,11 @@
{
"id": "default_cbf0e034-7b54-4b2c-b670-3b1e2e4b4a88",
"name": "FLUX Image to Image",
"name": "Image to Image - FLUX",
"author": "InvokeAI",
"description": "A simple image-to-image workflow using a FLUX dev model. ",
"version": "1.1.0",
"contact": "",
"tags": "image2image, flux, image-to-image, image to image",
"tags": "flux, image to image",
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend using FLUX dev models for image-to-image workflows. The image-to-image performance with FLUX schnell models is poor.",
"exposedFields": [
{
@@ -201,36 +201,15 @@
},
"t5_encoder_model": {
"name": "t5_encoder_model",
"label": "",
"value": {
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
"name": "t5_bnb_int8_quantized_encoder",
"base": "any",
"type": "t5_encoder"
}
"label": ""
},
"clip_embed_model": {
"name": "clip_embed_model",
"label": "",
"value": {
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
"name": "clip-vit-large-patch14",
"base": "any",
"type": "clip_embed"
}
"label": ""
},
"vae_model": {
"name": "vae_model",
"label": "",
"value": {
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
"name": "FLUX.1-schnell_ae",
"base": "flux",
"type": "vae"
}
"label": ""
}
}
},

View File

@@ -1,11 +1,11 @@
{
"id": "default_dec5a2e9-f59c-40d9-8869-a056751d79b8",
"name": "Face Detailer with IP-Adapter & Canny (See Note in Details)",
"name": "Face Detailer - SD1.5",
"author": "kosmoskatten",
"description": "A workflow to add detail to and improve faces. This workflow is most effective when used with a model that creates realistic outputs. ",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "face detailer, IP-Adapter, Canny",
"tags": "sd1.5, reference image, control",
"notes": "Set this image as the blur mask: https://i.imgur.com/Gxi61zP.png",
"exposedFields": [
{
@@ -136,14 +136,7 @@
},
"control_model": {
"name": "control_model",
"label": "Control Model (select canny)",
"value": {
"key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d",
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
"name": "sd-controlnet-canny",
"base": "sd-1",
"type": "controlnet"
}
"label": "Control Model (select canny)"
},
"control_weight": {
"name": "control_weight",
@@ -197,14 +190,7 @@
},
"ip_adapter_model": {
"name": "ip_adapter_model",
"label": "IP-Adapter Model (select IP Adapter Face)",
"value": {
"key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e",
"hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5",
"name": "ip_adapter_sd15",
"base": "sd-1",
"type": "ip_adapter"
}
"label": "IP-Adapter Model (select IP Adapter Face)"
},
"clip_vision_model": {
"name": "clip_vision_model",

View File

@@ -1,11 +1,11 @@
{
"id": "default_444fe292-896b-44fd-bfc6-c0b5d220fffc",
"name": "FLUX Text to Image",
"name": "Text to Image - FLUX",
"author": "InvokeAI",
"description": "A simple text-to-image workflow using FLUX dev or schnell models.",
"version": "1.1.0",
"contact": "",
"tags": "text2image, flux, text to image",
"tags": "flux, text to image",
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
"exposedFields": [
{
@@ -169,36 +169,15 @@
},
"t5_encoder_model": {
"name": "t5_encoder_model",
"label": "",
"value": {
"key": "d18d5575-96b6-4da3-b3d8-eb58308d6705",
"hash": "random:f2f9ed74acdfb4bf6fec200e780f6c25f8dd8764a35e65d425d606912fdf573a",
"name": "t5_bnb_int8_quantized_encoder",
"base": "any",
"type": "t5_encoder"
}
"label": ""
},
"clip_embed_model": {
"name": "clip_embed_model",
"label": "",
"value": {
"key": "5a19d7e5-8d98-43cd-8a81-87515e4b3b4e",
"hash": "random:4bd08514c08fb6ff04088db9aeb45def3c488e8b5fd09a35f2cc4f2dc346f99f",
"name": "clip-vit-large-patch14",
"base": "any",
"type": "clip_embed"
}
"label": ""
},
"vae_model": {
"name": "vae_model",
"label": "",
"value": {
"key": "9172beab-5c1d-43f0-b2f0-6e0b956710d9",
"hash": "random:c54dde288e5fa2e6137f1c92e9d611f598049e6f16e360207b6d96c9f5a67ba0",
"name": "FLUX.1-schnell_ae",
"base": "flux",
"type": "vae"
}
"label": ""
}
}
},

View File

@@ -1,11 +1,11 @@
{
"id": "default_2d05e719-a6b9-4e64-9310-b875d3b2f9d2",
"name": "Multi ControlNet (Canny & Depth)",
"name": "Text to Image - SD1.5, Control",
"author": "InvokeAI",
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "ControlNet, canny, depth",
"tags": "sd1.5, control, text to image",
"notes": "",
"exposedFields": [
{
@@ -217,14 +217,7 @@
},
"control_model": {
"name": "control_model",
"label": "Control Model (select canny)",
"value": {
"key": "5bdaacf7-a7a3-4fb8-b394-cc0ffbb8941d",
"hash": "blake3:260c7f8e10aefea9868cfc68d89970e91033bd37132b14b903e70ee05ebf530e",
"name": "sd-controlnet-canny",
"base": "sd-1",
"type": "controlnet"
}
"label": "Control Model (select canny)"
},
"control_weight": {
"name": "control_weight",
@@ -371,14 +364,7 @@
},
"control_model": {
"name": "control_model",
"label": "Control Model (select depth)",
"value": {
"key": "87e8855c-671f-4c9e-bbbb-8ed47ccb4aac",
"hash": "blake3:2550bf22a53942dfa28ab2fed9d10d80851112531f44d977168992edf9d0534c",
"name": "control_v11f1p_sd15_depth",
"base": "sd-1",
"type": "controlnet"
}
"label": "Control Model (select depth)"
},
"control_weight": {
"name": "control_weight",

View File

@@ -1,11 +1,11 @@
{
"id": "default_f96e794f-eb3e-4d01-a960-9b4e43402bcf",
"name": "MultiDiffusion SD1.5",
"name": "Upscaler - SD1.5, MultiDiffusion",
"author": "Invoke",
"description": "A workflow to upscale an input image with tiled upscaling, using SD1.5 based models.",
"version": "1.0.0",
"contact": "invoke@invoke.ai",
"tags": "tiled, upscaling, sdxl",
"tags": "sd1.5, upscaling",
"notes": "",
"exposedFields": [
{
@@ -135,14 +135,7 @@
"inputs": {
"model": {
"name": "model",
"label": "",
"value": {
"key": "e7b402e5-62e5-4acb-8c39-bee6bdb758ab",
"hash": "c8659e796168d076368256b57edbc1b48d6dafc1712f1bb37cc57c7c06889a6b",
"name": "526mix",
"base": "sd-1",
"type": "main"
}
"label": ""
}
}
},
@@ -384,21 +377,11 @@
},
"image": {
"name": "image",
"label": "Image to Upscale",
"value": {
"image_name": "ee7009f7-a35d-488b-a2a6-21237ef5ae05.png"
}
"label": "Image to Upscale"
},
"image_to_image_model": {
"name": "image_to_image_model",
"label": "",
"value": {
"key": "38bb1a29-8ede-42ba-b77f-64b3478896eb",
"hash": "blake3:e52fdbee46a484ebe9b3b20ea0aac0a35a453ab6d0d353da00acfd35ce7a91ed",
"name": "4xNomosWebPhoto_esrgan",
"base": "sdxl",
"type": "spandrel_image_to_image"
}
"label": ""
},
"tile_size": {
"name": "tile_size",
@@ -437,14 +420,7 @@
"inputs": {
"model": {
"name": "model",
"label": "ControlNet Model - Choose a Tile ControlNet",
"value": {
"key": "20645e4d-ef97-4c5a-9243-b834a3483925",
"hash": "f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e",
"name": "tile",
"base": "sd-1",
"type": "controlnet"
}
"label": "ControlNet Model - Choose a Tile ControlNet"
}
}
},

View File

@@ -1,11 +1,11 @@
{
"id": "default_35658541-6d41-4a20-8ec5-4bf2561faed0",
"name": "MultiDiffusion SDXL",
"name": "Upscaler - SDXL, MultiDiffusion",
"author": "Invoke",
"description": "A workflow to upscale an input image with tiled upscaling, using SDXL based models.",
"version": "1.1.0",
"contact": "invoke@invoke.ai",
"tags": "tiled, upscaling, sdxl",
"tags": "sdxl, upscaling",
"notes": "",
"exposedFields": [
{
@@ -341,14 +341,7 @@
"inputs": {
"model": {
"name": "model",
"label": "ControlNet Model - Choose a Tile ControlNet",
"value": {
"key": "74f4651f-0ace-4b7b-b616-e98360257797",
"hash": "blake3:167a5b84583aaed3e5c8d660b45830e82e1c602743c689d3c27773c6c8b85b4a",
"name": "controlnet-tile-sdxl-1.0",
"base": "sdxl",
"type": "controlnet"
}
"label": "ControlNet Model - Choose a Tile ControlNet"
}
}
},
@@ -801,14 +794,7 @@
"inputs": {
"vae_model": {
"name": "vae_model",
"label": "",
"value": {
"key": "ff926845-090e-4d46-b81e-30289ee47474",
"hash": "9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
"name": "VAEFix",
"base": "sdxl",
"type": "vae"
}
"label": ""
}
}
},
@@ -832,14 +818,7 @@
"inputs": {
"model": {
"name": "model",
"label": "SDXL Model",
"value": {
"key": "ab191f73-68d2-492c-8aec-b438a8cf0f45",
"hash": "blake3:2d50e940627e3bf555f015280ec0976d5c1fa100f7bc94e95ffbfc770e98b6fe",
"name": "CustomXLv7",
"base": "sdxl",
"type": "main"
}
"label": "SDXL Model"
}
}
},

View File

@@ -1,11 +1,11 @@
{
"id": "default_d7a1c60f-ca2f-4f90-9e33-75a826ca6d8f",
"name": "Prompt from File",
"name": "Text to Image - SD1.5, Prompt from File",
"author": "InvokeAI",
"description": "Sample workflow using Prompt from File node",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "text2image, prompt from file, default, text to image",
"tags": "sd1.5, text to image",
"notes": "",
"exposedFields": [
{
@@ -513,4 +513,4 @@
"targetHandle": "vae"
}
]
}
}

View File

@@ -10,6 +10,7 @@ _default workflows_ on app startup.
An exception will be raised during sync if this is not set correctly.
- Default workflows appear on the "Default Workflows" tab of the Workflow
Library.
- Default workflows should not reference any resources that are user-created or installed. That includes images and models. For example, if a default workflow references Juggernaut as an SDXL model, when a user loads the workflow, even if they have a version of Juggernaut installed, it will have a different UUID. They may see a warning. So, it's best to ship default workflows without any references to these types of resources.
After adding or updating default workflows, you **must** start the app up and
load them to ensure:

View File

@@ -1,11 +1,11 @@
{
"id": "default_dbe46d95-22aa-43fb-9c16-94400d0ce2fd",
"name": "SD3.5 Text to Image",
"name": "Text to Image - SD3.5",
"author": "InvokeAI",
"description": "Sample text to image workflow for Stable Diffusion 3.5",
"version": "1.0.0",
"contact": "invoke@invoke.ai",
"tags": "text2image, SD3.5, text to image",
"tags": "SD3.5, text to image",
"notes": "",
"exposedFields": [
{
@@ -38,14 +38,7 @@
"inputs": {
"model": {
"name": "model",
"label": "",
"value": {
"key": "f7b20be9-92a8-4cfb-bca4-6c3b5535c10b",
"hash": "placeholder",
"name": "stable-diffusion-3.5-medium",
"base": "sd-3",
"type": "main"
}
"label": ""
},
"t5_encoder_model": {
"name": "t5_encoder_model",

View File

@@ -5,7 +5,7 @@
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "text2image, SD1.5, SD2, text to image",
"tags": "SD1.5, text to image",
"notes": "",
"exposedFields": [
{
@@ -417,4 +417,4 @@
"targetHandle": "vae"
}
]
}
}

View File

@@ -5,7 +5,7 @@
"description": "Sample text to image workflow for SDXL",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "text2image, SDXL, text to image",
"tags": "SDXL, text to image",
"notes": "",
"exposedFields": [
{
@@ -46,14 +46,7 @@
"inputs": {
"vae_model": {
"name": "vae_model",
"label": "VAE (use the FP16 model)",
"value": {
"key": "f20f9e5c-1bce-4c46-a84d-34ebfa7df069",
"hash": "blake3:9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
"name": "sdxl-vae-fp16-fix",
"base": "sdxl",
"type": "vae"
}
"label": "VAE (use the FP16 model)"
}
},
"isOpen": true,
@@ -203,14 +196,7 @@
"inputs": {
"model": {
"name": "model",
"label": "",
"value": {
"key": "4a63b226-e8ff-4da4-854e-0b9f04b562ba",
"hash": "blake3:d279309ea6e5ee6e8fd52504275865cc280dac71cbf528c5b07c98b888bddaba",
"name": "dreamshaper-xl-v2-turbo",
"base": "sdxl",
"type": "main"
}
"label": ""
}
},
"isOpen": true,
@@ -715,4 +701,4 @@
"targetHandle": "style"
}
]
}
}

View File

@@ -1,11 +1,11 @@
{
"id": "default_e71d153c-2089-43c7-bd2c-f61f37d4c1c1",
"name": "Text to Image with LoRA",
"name": "Text to Image - SD1.5, LoRA",
"author": "InvokeAI",
"description": "Simple text to image workflow with a LoRA",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "text to image, lora, text to image",
"tags": "sd1.5, text to image, lora",
"notes": "",
"exposedFields": [
{

View File

@@ -1,11 +1,11 @@
{
"id": "default_43b0d7f7-6a12-4dcf-a5a4-50c940cbee29",
"name": "Tiled Upscaling (Beta)",
"name": "Upscaler - SD1.5, Tiled",
"author": "Invoke",
"description": "A workflow to upscale an input image with tiled upscaling. ",
"version": "2.1.0",
"contact": "invoke@invoke.ai",
"tags": "tiled, upscaling, sd1.5",
"tags": "sd1.5, upscaling",
"notes": "",
"exposedFields": [
{
@@ -86,14 +86,7 @@
},
"ip_adapter_model": {
"name": "ip_adapter_model",
"label": "IP-Adapter Model (select ip_adapter_sd15)",
"value": {
"key": "1cc210bb-4d0a-4312-b36c-b5d46c43768e",
"hash": "blake3:3d669dffa7471b357b4df088b99ffb6bf4d4383d5e0ef1de5ec1c89728a3d5a5",
"name": "ip_adapter_sd15",
"base": "sd-1",
"type": "ip_adapter"
}
"label": "IP-Adapter Model (select ip_adapter_sd15)"
},
"clip_vision_model": {
"name": "clip_vision_model",
@@ -201,14 +194,7 @@
},
"control_model": {
"name": "control_model",
"label": "Control Model (select contro_v11f1e_sd15_tile)",
"value": {
"key": "773843c8-db1f-4502-8f65-59782efa7960",
"hash": "blake3:f0812e13758f91baf4e54b7dbb707b70642937d3b2098cd2b94cc36d3eba308e",
"name": "control_v11f1e_sd15_tile",
"base": "sd-1",
"type": "controlnet"
}
"label": "Control Model (select control_v11f1e_sd15_tile)"
},
"control_weight": {
"name": "control_weight",
@@ -1816,4 +1802,4 @@
"targetHandle": "unet"
}
]
}
}

View File

@@ -46,17 +46,28 @@ class WorkflowRecordsStorageBase(ABC):
per_page: Optional[int],
query: Optional[str],
tags: Optional[list[str]],
has_been_opened: Optional[bool],
) -> PaginatedResults[WorkflowRecordListItemDTO]:
"""Gets many workflows."""
pass
@abstractmethod
def get_counts(
def counts_by_category(
self,
tags: Optional[list[str]],
categories: Optional[list[WorkflowCategory]],
) -> int:
"""Gets the count of workflows for the given tags and categories."""
categories: list[WorkflowCategory],
has_been_opened: Optional[bool] = None,
) -> dict[str, int]:
"""Gets a dictionary of counts for each of the provided categories."""
pass
@abstractmethod
def counts_by_tag(
self,
tags: list[str],
categories: Optional[list[WorkflowCategory]] = None,
has_been_opened: Optional[bool] = None,
) -> dict[str, int]:
"""Gets a dictionary of counts for each of the provided tags."""
pass
@abstractmethod

View File

@@ -1,6 +1,6 @@
import datetime
from enum import Enum
from typing import Any, Union
from typing import Any, Optional, Union
import semver
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, field_validator
@@ -98,7 +98,9 @@ class WorkflowRecordDTOBase(BaseModel):
name: str = Field(description="The name of the workflow.")
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the workflow.")
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the workflow.")
opened_at: Union[datetime.datetime, str] = Field(description="The opened timestamp of the workflow.")
opened_at: Optional[Union[datetime.datetime, str]] = Field(
default=None, description="The opened timestamp of the workflow."
)
class WorkflowRecordDTO(WorkflowRecordDTOBase):

View File

@@ -118,6 +118,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
per_page: Optional[int] = None,
query: Optional[str] = None,
tags: Optional[list[str]] = None,
has_been_opened: Optional[bool] = None,
) -> PaginatedResults[WorkflowRecordListItemDTO]:
# sanitize!
assert order_by in WorkflowRecordOrderBy
@@ -175,6 +176,11 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
conditions.append(tags_condition)
params.extend(tags_params)
if has_been_opened:
conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
conditions.append("opened_at IS NULL")
# Ignore whitespace in the query
stripped_query = query.strip() if query else None
if stripped_query:
@@ -230,54 +236,105 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
total=total,
)
def get_counts(
def counts_by_tag(
self,
tags: Optional[list[str]],
categories: Optional[list[WorkflowCategory]],
) -> int:
tags: list[str],
categories: Optional[list[WorkflowCategory]] = None,
has_been_opened: Optional[bool] = None,
) -> dict[str, int]:
if not tags:
return {}
cursor = self._conn.cursor()
result: dict[str, int] = {}
# Base conditions for categories and selected tags
base_conditions: list[str] = []
base_params: list[str | int] = []
# Start with an empty list of conditions and params
conditions: list[str] = []
params: list[str | int] = []
if tags:
# Construct a list of conditions for each tag
tags_conditions = ["tags LIKE ?" for _ in tags]
tags_conditions_joined = " OR ".join(tags_conditions)
tags_condition = f"({tags_conditions_joined})"
# And the params for the tags, case-insensitive
tags_params = [f"%{t.strip()}%" for t in tags]
conditions.append(tags_condition)
params.extend(tags_params)
# Add category conditions
if categories:
# Ensure all categories are valid (is this necessary?)
assert all(c in WorkflowCategory for c in categories)
# Construct a placeholder string for the number of categories
placeholders = ", ".join("?" for _ in categories)
base_conditions.append(f"category IN ({placeholders})")
base_params.extend([category.value for category in categories])
# Construct the condition string & params
conditions.append(f"category IN ({placeholders})")
params.extend([category.value for category in categories])
if has_been_opened:
base_conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
# For each tag to count, run a separate query
for tag in tags:
# Start with the base conditions
conditions = base_conditions.copy()
params = base_params.copy()
if conditions:
# If there are conditions, add a WHERE clause and then join the conditions
stmt += " WHERE "
# Add this specific tag condition
conditions.append("tags LIKE ?")
params.append(f"%{tag.strip()}%")
all_conditions = " AND ".join(conditions)
stmt += all_conditions
# Construct the full query
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
cursor.execute(stmt, tuple(params))
return cursor.fetchone()[0]
if conditions:
stmt += " WHERE " + " AND ".join(conditions)
cursor.execute(stmt, params)
count = cursor.fetchone()[0]
result[tag] = count
return result
def counts_by_category(
self,
categories: list[WorkflowCategory],
has_been_opened: Optional[bool] = None,
) -> dict[str, int]:
cursor = self._conn.cursor()
result: dict[str, int] = {}
# Base conditions for categories
base_conditions: list[str] = []
base_params: list[str | int] = []
# Add category conditions
if categories:
assert all(c in WorkflowCategory for c in categories)
placeholders = ", ".join("?" for _ in categories)
base_conditions.append(f"category IN ({placeholders})")
base_params.extend([category.value for category in categories])
if has_been_opened:
base_conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
# For each category to count, run a separate query
for category in categories:
# Start with the base conditions
conditions = base_conditions.copy()
params = base_params.copy()
# Add this specific category condition
conditions.append("category = ?")
params.append(category.value)
# Construct the full query
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
if conditions:
stmt += " WHERE " + " AND ".join(conditions)
cursor.execute(stmt, params)
count = cursor.fetchone()[0]
result[category.value] = count
return result
def update_opened_at(self, workflow_id: str) -> None:
try:

View File

@@ -8,12 +8,12 @@ class WorkflowThumbnailServiceBase(ABC):
"""Base class for workflow thumbnail services"""
@abstractmethod
def get_path(self, workflow_id: str) -> Path:
def get_path(self, workflow_id: str, with_hash: bool = True) -> Path:
"""Gets the path to a workflow thumbnail"""
pass
@abstractmethod
def get_url(self, workflow_id: str) -> str | None:
def get_url(self, workflow_id: str, with_hash: bool = True) -> str | None:
"""Gets the URL of a workflow thumbnail"""
pass

View File

@@ -41,7 +41,7 @@ class WorkflowThumbnailFileStorageDisk(WorkflowThumbnailServiceBase):
except Exception as e:
raise WorkflowThumbnailFileSaveException from e
def get_path(self, workflow_id: str) -> Path:
def get_path(self, workflow_id: str, with_hash: bool = True) -> Path:
workflow = self._invoker.services.workflow_records.get(workflow_id).workflow
if workflow.meta.category is WorkflowCategory.Default:
default_thumbnails_dir = Path(__file__).parent / Path("default_workflow_thumbnails")
@@ -51,7 +51,7 @@ class WorkflowThumbnailFileStorageDisk(WorkflowThumbnailServiceBase):
return path
def get_url(self, workflow_id: str) -> str | None:
def get_url(self, workflow_id: str, with_hash: bool = True) -> str | None:
path = self.get_path(workflow_id)
if not self._validate_path(path):
return
@@ -59,7 +59,8 @@ class WorkflowThumbnailFileStorageDisk(WorkflowThumbnailServiceBase):
url = self._invoker.services.urls.get_workflow_thumbnail_url(workflow_id)
# The image URL never changes, so we must add random query string to it to prevent caching
url += f"?{uuid_string()}"
if with_hash:
url += f"?{uuid_string()}"
return url

View File

@@ -610,6 +610,7 @@ flux_redux = StarterModel(
source="black-forest-labs/FLUX.1-Redux-dev::flux1-redux-dev.safetensors",
description="FLUX Redux model (for image variation).",
type=ModelType.FluxRedux,
dependencies=[siglip],
)
# endregion
@@ -717,7 +718,6 @@ sdxl_bundle: list[StarterModel] = [
scribble_sdxl,
tile_sdxl,
swinir,
flux_redux,
]
flux_bundle: list[StarterModel] = [
@@ -730,7 +730,7 @@ flux_bundle: list[StarterModel] = [
ip_adapter_flux,
flux_canny_control_lora,
flux_depth_control_lora,
siglip,
flux_redux,
]
STARTER_BUNDLES: dict[str, list[StarterModel]] = {

View File

@@ -60,7 +60,7 @@
"@fontsource-variable/inter": "^5.1.0",
"@invoke-ai/ui-library": "^0.0.46",
"@nanostores/react": "^0.7.3",
"@reduxjs/toolkit": "2.6.0",
"@reduxjs/toolkit": "2.6.1",
"@roarr/browser-log-writer": "^1.3.0",
"@xyflow/react": "^12.4.2",
"async-mutex": "^0.5.0",

View File

@@ -30,8 +30,8 @@ dependencies:
specifier: ^0.7.3
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
'@reduxjs/toolkit':
specifier: 2.6.0
version: 2.6.0(react-redux@9.1.2)(react@18.3.1)
specifier: 2.6.1
version: 2.6.1(react-redux@9.1.2)(react@18.3.1)
'@roarr/browser-log-writer':
specifier: ^1.3.0
version: 1.3.0
@@ -2311,8 +2311,8 @@ packages:
- supports-color
dev: true
/@reduxjs/toolkit@2.6.0(react-redux@9.1.2)(react@18.3.1):
resolution: {integrity: sha512-mWJCYpewLRyTuuzRSEC/IwIBBkYg2dKtQas8mty5MaV2iXzcmicS3gW554FDeOvLnY3x13NIk8MB1e8wHO7rqQ==}
/@reduxjs/toolkit@2.6.1(react-redux@9.1.2)(react@18.3.1):
resolution: {integrity: sha512-SSlIqZNYhqm/oMkXbtofwZSt9lrncblzo6YcZ9zoX+zLngRBrCOjK4lNLdkNucJF58RHOWrD9txT3bT3piH7Zw==}
peerDependencies:
react: ^16.9.0 || ^17.0.0 || ^18 || ^19
react-redux: ^7.2.1 || ^8.1.3 || ^9.0.0

View File

@@ -113,7 +113,8 @@
"end": "Ende",
"layout": "Layout",
"board": "Ordner",
"combinatorial": "Kombinatorisch"
"combinatorial": "Kombinatorisch",
"saveChanges": "Änderungen speichern"
},
"gallery": {
"galleryImageSize": "Bildgröße",
@@ -761,7 +762,16 @@
"workflowDeleted": "Arbeitsablauf gelöscht",
"errorCopied": "Fehler kopiert",
"layerCopiedToClipboard": "Ebene in die Zwischenablage kopiert",
"sentToCanvas": "An Leinwand gesendet"
"sentToCanvas": "An Leinwand gesendet",
"problemDeletingWorkflow": "Problem beim Löschen des Arbeitsablaufs",
"uploadFailedInvalidUploadDesc_withCount_one": "Es darf maximal 1 PNG- oder JPEG-Bild sein.",
"uploadFailedInvalidUploadDesc_withCount_other": "Es dürfen maximal {{count}} PNG- oder JPEG-Bilder sein.",
"problemRetrievingWorkflow": "Problem beim Abrufen des Arbeitsablaufs",
"uploadFailedInvalidUploadDesc": "Müssen PNG- oder JPEG-Bilder sein.",
"pasteSuccess": "Eingefügt in {{destination}}",
"pasteFailed": "Einfügen fehlgeschlagen",
"unableToCopy": "Kopieren nicht möglich",
"unableToCopyDesc_theseSteps": "diese Schritte"
},
"accessibility": {
"uploadImage": "Bild hochladen",
@@ -1314,7 +1324,8 @@
"nodeName": "Knotenname",
"description": "Beschreibung",
"loadWorkflowDesc": "Arbeitsablauf laden?",
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen."
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen.",
"loadingTemplates": "Lade {{name}}"
},
"hrf": {
"enableHrf": "Korrektur für hohe Auflösungen",

View File

@@ -116,6 +116,8 @@
"dontShowMeThese": "Don't show me these",
"editor": "Editor",
"error": "Error",
"error_withCount_one": "{{count}} error",
"error_withCount_other": "{{count}} errors",
"file": "File",
"folder": "Folder",
"format": "format",
@@ -1205,6 +1207,7 @@
"informationalPopoversDisabled": "Informational Popovers Disabled",
"informationalPopoversDisabledDesc": "Informational popovers have been disabled. Enable them in Settings.",
"enableModelDescriptions": "Enable Model Descriptions in Dropdowns",
"enableHighlightFocusedRegions": "Highlight Focused Regions",
"modelDescriptionsDisabled": "Model Descriptions in Dropdowns Disabled",
"modelDescriptionsDisabledDesc": "Model descriptions in dropdowns have been disabled. Enable them in Settings.",
"enableInvisibleWatermark": "Enable Invisible Watermark",
@@ -1286,9 +1289,9 @@
"somethingWentWrong": "Something Went Wrong",
"uploadFailed": "Upload failed",
"imagesWillBeAddedTo": "Uploaded images will be added to board {{boardName}}'s assets.",
"uploadFailedInvalidUploadDesc_withCount_one": "Must be maximum of 1 PNG or JPEG image.",
"uploadFailedInvalidUploadDesc_withCount_other": "Must be maximum of {{count}} PNG or JPEG images.",
"uploadFailedInvalidUploadDesc": "Must be PNG or JPEG images.",
"uploadFailedInvalidUploadDesc_withCount_one": "Must be maximum of 1 PNG, JPEG or WEBP image.",
"uploadFailedInvalidUploadDesc_withCount_other": "Must be maximum of {{count}} PNG, JPEG or WEBP images.",
"uploadFailedInvalidUploadDesc": "Must be PNG, JPEG or WEBP images.",
"workflowLoaded": "Workflow Loaded",
"problemRetrievingWorkflow": "Problem Retrieving Workflow",
"workflowDeleted": "Workflow Deleted",
@@ -1692,10 +1695,11 @@
"filterByTags": "Filter by Tags",
"yourWorkflows": "Your Workflows",
"recentlyOpened": "Recently Opened",
"noRecentWorkflows": "No Recent Workflows",
"private": "Private",
"shared": "Shared",
"browseWorkflows": "Browse Workflows",
"resetTags": "Reset Tags",
"deselectAll": "Deselect All",
"opened": "Opened",
"openWorkflow": "Open Workflow",
"updated": "Updated",
@@ -1726,6 +1730,7 @@
"loadWorkflow": "$t(common.load) Workflow",
"autoLayout": "Auto Layout",
"edit": "Edit",
"view": "View",
"download": "Download",
"copyShareLink": "Copy Share Link",
"copyShareLinkForWorkflow": "Copy Share Link for Workflow",
@@ -1741,6 +1746,8 @@
"row": "Row",
"column": "Column",
"container": "Container",
"containerRowLayout": "Container (row layout)",
"containerColumnLayout": "Container (column layout)",
"heading": "Heading",
"text": "Text",
"divider": "Divider",
@@ -1761,7 +1768,9 @@
"containerPlaceholder": "Empty Container",
"headingPlaceholder": "Empty Heading",
"textPlaceholder": "Empty Text",
"workflowBuilderAlphaWarning": "The workflow builder is currently in alpha. There may be breaking changes before the stable release."
"workflowBuilderAlphaWarning": "The workflow builder is currently in alpha. There may be breaking changes before the stable release.",
"minimum": "Minimum",
"maximum": "Maximum"
}
},
"controlLayers": {
@@ -2313,6 +2322,7 @@
"newUserExperience": {
"toGetStartedLocal": "To get started, make sure to download or import models needed to run Invoke. Then, enter a prompt in the box and click <StrongComponent>Invoke</StrongComponent> to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the <StrongComponent>Gallery</StrongComponent> or edit them to the <StrongComponent>Canvas</StrongComponent>.",
"toGetStarted": "To get started, enter a prompt in the box and click <StrongComponent>Invoke</StrongComponent> to generate your first image. Select a prompt template to improve results. You can choose to save your images directly to the <StrongComponent>Gallery</StrongComponent> or edit them to the <StrongComponent>Canvas</StrongComponent>.",
"toGetStartedWorkflow": "To get started, fill in the fields on the left and press <StrongComponent>Invoke</StrongComponent> to generate your image. Want to explore more workflows? Click the <StrongComponent>folder icon</StrongComponent> next to the workflow title to see a list of other templates you can try.",
"gettingStartedSeries": "Want more guidance? Check out our <LinkComponent>Getting Started Series</LinkComponent> for tips on unlocking the full potential of the Invoke Studio.",
"lowVRAMMode": "For best performance, follow our <LinkComponent>Low VRAM guide</LinkComponent>.",
"noModelsInstalled": "It looks like you don't have any models installed! You can <DownloadStarterModelsButton>download a starter model bundle</DownloadStarterModelsButton> or <ImportModelsButton>import models</ImportModelsButton>."
@@ -2320,8 +2330,8 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"Memory Management: New setting for users with Nvidia GPUs to reduce VRAM usage.",
"Performance: Continued improvements to overall application performance and responsiveness."
"Workflows: New and improved Workflow Library.",
"FLUX: Support for FLUX Redux in Workflows and Canvas."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -1771,7 +1771,6 @@
"projectWorkflows": "Workflows du projet",
"copyShareLink": "Copier le lien de partage",
"chooseWorkflowFromLibrary": "Choisir le Workflow dans la Bibliothèque",
"uploadAndSaveWorkflow": "Importer dans la bibliothèque",
"edit": "Modifer",
"deleteWorkflow2": "Êtes-vous sûr de vouloir supprimer ce Workflow? Cette action ne peut pas être annulé.",
"download": "Télécharger",

View File

@@ -109,7 +109,11 @@
"board": "Bacheca",
"layout": "Schema",
"row": "Riga",
"column": "Colonna"
"column": "Colonna",
"saveChanges": "Salva modifiche",
"error_withCount_one": "{{count}} errore",
"error_withCount_many": "{{count}} errori",
"error_withCount_other": "{{count}} errori"
},
"gallery": {
"galleryImageSize": "Dimensione dell'immagine",
@@ -784,7 +788,7 @@
"serverError": "Errore del Server",
"connected": "Connesso al server",
"canceled": "Elaborazione annullata",
"uploadFailedInvalidUploadDesc": "Devono essere immagini PNG o JPEG.",
"uploadFailedInvalidUploadDesc": "Devono essere immagini PNG, JPEG o WEBP.",
"parameterSet": "Parametro richiamato",
"parameterNotSet": "Parametro non richiamato",
"problemCopyingImage": "Impossibile copiare l'immagine",
@@ -835,9 +839,9 @@
"linkCopied": "Collegamento copiato",
"addedToUncategorized": "Aggiunto alle risorse della bacheca $t(boards.uncategorized)",
"imagesWillBeAddedTo": "Le immagini caricate verranno aggiunte alle risorse della bacheca {{boardName}}.",
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG o JPEG.",
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG, JPEG o WEBP.",
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG, JPEG o WEBP.",
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG, JPEG o WEBP.",
"outOfMemoryErrorDescLocal": "Segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent> per ridurre gli OOM.",
"pasteFailed": "Incolla non riuscita",
"pasteSuccess": "Incollato su {{destination}}",
@@ -1704,7 +1708,7 @@
"saveWorkflow": "Salva flusso di lavoro",
"openWorkflow": "Apri flusso di lavoro",
"clearWorkflowSearchFilter": "Cancella il filtro di ricerca del flusso di lavoro",
"workflowLibrary": "Libreria",
"workflowLibrary": "Libreria flussi di lavoro",
"workflowSaved": "Flusso di lavoro salvato",
"unnamedWorkflow": "Flusso di lavoro senza nome",
"savingWorkflow": "Salvataggio del flusso di lavoro...",
@@ -1734,7 +1738,6 @@
"userWorkflows": "Flussi di lavoro utente",
"projectWorkflows": "Flussi di lavoro del progetto",
"defaultWorkflows": "Flussi di lavoro predefiniti",
"uploadAndSaveWorkflow": "Carica nella libreria",
"chooseWorkflowFromLibrary": "Scegli il flusso di lavoro dalla libreria",
"deleteWorkflow2": "Vuoi davvero eliminare questo flusso di lavoro? Questa operazione non può essere annullata.",
"edit": "Modifica",
@@ -1771,8 +1774,24 @@
"divider": "Divisore",
"container": "Contenitore",
"text": "Testo",
"numberInput": "Ingresso numerico"
}
"numberInput": "Ingresso numerico",
"containerRowLayout": "Contenitore (disposizione riga)",
"containerColumnLayout": "Contenitore (disposizione colonna)"
},
"loadMore": "Carica altro",
"searchPlaceholder": "Cerca per nome, descrizione o etichetta",
"filterByTags": "Filtra per etichetta",
"shared": "Condiviso",
"browseWorkflows": "Sfoglia i flussi di lavoro",
"allLoaded": "Tutti i flussi di lavoro caricati",
"saveChanges": "Salva modifiche",
"yourWorkflows": "I tuoi flussi di lavoro",
"recentlyOpened": "Aperto di recente",
"workflowThumbnail": "Miniatura del flusso di lavoro",
"private": "Privato",
"deselectAll": "Deseleziona tutto",
"noRecentWorkflows": "Nessun flusso di lavoro recente",
"view": "Visualizza"
},
"accordions": {
"compositing": {
@@ -2322,7 +2341,8 @@
"toGetStarted": "Per iniziare, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
"noModelsInstalled": "Sembra che non hai installato alcun modello! Puoi <DownloadStarterModelsButton>scaricare un pacchetto di modelli di avvio</DownloadStarterModelsButton> o <ImportModelsButton>importare modelli</ImportModelsButton>.",
"toGetStartedLocal": "Per iniziare, assicurati di scaricare o importare i modelli necessari per eseguire Invoke. Quindi, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
"lowVRAMMode": "Per prestazioni ottimali, segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent>."
"lowVRAMMode": "Per prestazioni ottimali, segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent>.",
"toGetStartedWorkflow": "Per iniziare, compila i campi a sinistra e premi <StrongComponent>Invoke</StrongComponent> per generare la tua immagine. Vuoi esplorare altri flussi di lavoro? Fai clic sull'<StrongComponent>icona della cartella</StrongComponent> accanto al titolo del flusso di lavoro per visualizzare un elenco di altri modelli che puoi provare."
},
"whatsNew": {
"whatsNewInInvoke": "Novità in Invoke",
@@ -2330,8 +2350,8 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"Editor del flusso di lavoro: nuovo generatore di moduli trascina-e-rilascia per una creazione più facile del flusso di lavoro.",
"Altri miglioramenti: messa in coda dei lotti più rapida, migliore ampliamento, selettore colore migliorato e nodi metadati."
"Gestione della memoria: nuova impostazione per gli utenti con GPU Nvidia per ridurre l'utilizzo della VRAM.",
"Prestazioni: continui miglioramenti alle prestazioni e alla reattività complessive dell'applicazione."
]
},
"system": {

View File

@@ -1566,7 +1566,6 @@
"defaultWorkflows": "Стандартные рабочие процессы",
"deleteWorkflow2": "Вы уверены, что хотите удалить этот рабочий процесс? Это нельзя отменить.",
"chooseWorkflowFromLibrary": "Выбрать рабочий процесс из библиотеки",
"uploadAndSaveWorkflow": "Загрузить в библиотеку",
"edit": "Редактировать",
"download": "Скачать",
"copyShareLink": "Скопировать ссылку на общий доступ",

View File

@@ -235,7 +235,8 @@
"column": "Cột",
"layout": "Bố Cục",
"row": "Hàng",
"board": "Bảng"
"board": "Bảng",
"saveChanges": "Lưu Thay Đổi"
},
"prompt": {
"addPromptTrigger": "Thêm Prompt Trigger",
@@ -766,7 +767,9 @@
"urlUnauthorizedErrorMessage2": "Tìm hiểu thêm.",
"urlForbidden": "Bạn không có quyền truy cập vào model này",
"urlForbiddenErrorMessage": "Bạn có thể cần yêu cầu quyền truy cập từ trang web đang cung cấp model.",
"urlUnauthorizedErrorMessage": "Bạn có thể cần thiếp lập một token API để dùng được model này."
"urlUnauthorizedErrorMessage": "Bạn có thể cần thiếp lập một token API để dùng được model này.",
"fluxRedux": "FLUX Redux",
"sigLip": "SigLIP"
},
"metadata": {
"guidance": "Hướng Dẫn",
@@ -979,7 +982,7 @@
"unknownInput": "Đầu Vào Không Rõ: {{name}}",
"validateConnections": "Xác Thực Kết Nối Và Đồ Thị",
"workflowNotes": "Ghi Chú",
"workflowTags": "Thẻ Tên",
"workflowTags": "Nhãn",
"editMode": "Chỉnh sửa trong Trình Biên Tập Workflow",
"edit": "Chỉnh Sửa",
"executionStateInProgress": "Đang Xử Lý",
@@ -2021,7 +2024,7 @@
},
"mergingLayers": "Đang gộp layer",
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
"referenceImageEmptyState": "<UploadButton>Tải lên ảnh</UploadButton> hoặc kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này để bắt đầu.",
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
"useImage": "Dùng Hình Ảnh",
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
@@ -2137,7 +2140,7 @@
"toast": {
"imageUploadFailed": "Tải Lên Ảnh Thất Bại",
"layerCopiedToClipboard": "Sao Chép Layer Vào Clipboard",
"uploadFailedInvalidUploadDesc_withCount_other": "Tối đa là {{count}} ảnh PNG hoặc JPEG.",
"uploadFailedInvalidUploadDesc_withCount_other": "Tối đa là {{count}} ảnh PNG, JPEG hoặc WEBP.",
"imageCopied": "Ảnh Đã Được Sao Chép",
"sentToUpscale": "Chuyển Vào Upscale",
"unableToLoadImage": "Không Thể Tải Hình Ảnh",
@@ -2149,7 +2152,7 @@
"unableToLoadImageMetadata": "Không Thể Tải Metadata Của Ảnh",
"workflowLoaded": "Workflow Đã Tải",
"uploadFailed": "Tải Lên Thất Bại",
"uploadFailedInvalidUploadDesc": "Phải là ảnh PNG hoặc JPEG.",
"uploadFailedInvalidUploadDesc": "Phải là ảnh PNG, JPEG hoặc WEBP.",
"serverError": "Lỗi Server",
"addedToBoard": "Thêm vào tài nguyên của bảng {{name}}",
"sessionRef": "Phiên: {{sessionId}}",
@@ -2252,11 +2255,10 @@
"convertGraph": "Chuyển Đổi Đồ Thị",
"saveWorkflowToProject": "Lưu Workflow Vào Dự Án",
"workflowName": "Tên Workflow",
"workflowLibrary": "Thư Viện",
"workflowLibrary": "Thư Viện Workflow",
"opened": "Ngày Mở",
"deleteWorkflow": "Xoá Workflow",
"workflowEditorMenu": "Menu Biên Tập Workflow",
"uploadAndSaveWorkflow": "Tải Lên Thư Viện",
"openLibrary": "Mở Thư Viện",
"builder": {
"resetAllNodeFields": "Tải Lại Các Vùng Node",
@@ -2287,7 +2289,18 @@
"heading": "Đầu Dòng",
"text": "Văn Bản",
"divider": "Gạch Chia"
}
},
"yourWorkflows": "Workflow Của Bạn",
"browseWorkflows": "Khám Phá Workflow",
"workflowThumbnail": "Ảnh Minh Họa Workflow",
"saveChanges": "Lưu Thay Đổi",
"allLoaded": "Đã Tải Tất Cả Workflow",
"shared": "Nhóm",
"searchPlaceholder": "Tìm theo tên, mô tả, hoặc nhãn",
"filterByTags": "Lọc Theo Nhãn",
"recentlyOpened": "Mở Gần Đây",
"private": "Cá Nhân",
"loadMore": "Tải Thêm"
},
"upscaling": {
"missingUpscaleInitialImage": "Thiếu ảnh dùng để upscale",
@@ -2322,8 +2335,8 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"Trình Biên Tập Workflow: trình tạo vùng nhập dưới dạng kéo thả nhằm tạo dựng workflow dễ dàng hơn.",
"Các nâng cấp khác: Xếp hàng tạo sinh theo nhóm nhanh hơn, upscale tốt hơn, trình chọn màu được cải thiện, và node chứa metadata."
"Trình Quản Lý Bộ Nhớ: Thiết lập mới cho người dùng với GPU Nvidia để giảm lượng VRAM sử dng.",
"Hiệu suất: Các cải thiện tiếp theo nhm gói gọn hiệu suất và khả năng phản hồi của ứng dụng."
]
},
"upsell": {

View File

@@ -1629,7 +1629,6 @@
"projectWorkflows": "项目工作流程",
"copyShareLink": "复制分享链接",
"chooseWorkflowFromLibrary": "从库中选择工作流程",
"uploadAndSaveWorkflow": "上传到库",
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
},
"accordions": {

View File

@@ -16,10 +16,18 @@ import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/projectId';
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
import { $store } from 'app/store/nanostores/store';
import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
import { createStore } from 'app/store/store';
import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
import {
$workflowLibraryCategoriesOptions,
$workflowLibrarySortOptions,
$workflowLibraryTagCategoriesOptions,
DEFAULT_WORKFLOW_LIBRARY_CATEGORIES,
DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS,
DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES,
} from 'features/nodes/store/workflowLibrarySlice';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import type { PropsWithChildren, ReactNode } from 'react';
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
@@ -48,6 +56,8 @@ interface Props extends PropsWithChildren {
isDebugging?: boolean;
logo?: ReactNode;
workflowCategories?: WorkflowCategory[];
workflowTagCategories?: WorkflowTagCategory[];
workflowSortOptions?: WorkflowSortOption[];
loggingOverrides?: LoggingOverrides;
}
@@ -68,6 +78,8 @@ const InvokeAIUI = ({
isDebugging = false,
logo,
workflowCategories,
workflowTagCategories,
workflowSortOptions,
loggingOverrides,
}: Props) => {
useLayoutEffect(() => {
@@ -195,14 +207,34 @@ const InvokeAIUI = ({
useEffect(() => {
if (workflowCategories) {
$workflowCategories.set(workflowCategories);
$workflowLibraryCategoriesOptions.set(workflowCategories);
}
return () => {
$workflowCategories.set([]);
$workflowLibraryCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_CATEGORIES);
};
}, [workflowCategories]);
useEffect(() => {
if (workflowTagCategories) {
$workflowLibraryTagCategoriesOptions.set(workflowTagCategories);
}
return () => {
$workflowLibraryTagCategoriesOptions.set(DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES);
};
}, [workflowTagCategories]);
useEffect(() => {
if (workflowSortOptions) {
$workflowLibrarySortOptions.set(workflowSortOptions);
}
return () => {
$workflowLibrarySortOptions.set(DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS);
};
}, [workflowSortOptions]);
useEffect(() => {
if (socketOptions) {
$socketOptions.set(socketOptions);

View File

@@ -15,7 +15,7 @@ import { $isWorkflowLibraryModalOpen } from 'features/nodes/store/workflowLibrar
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
import { toast } from 'features/toast/toast';
import { activeTabCanvasRightPanelChanged, setActiveTab } from 'features/ui/store/uiSlice';
import { useGetAndLoadLibraryWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadLibraryWorkflow';
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import { atom } from 'nanostores';
import { useCallback, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
@@ -57,7 +57,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const { t } = useTranslation();
const didParseOpenAPISchema = useStore($hasTemplates);
const store = useAppStore();
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
const handleSendToCanvas = useCallback(
async (imageName: string) => {
@@ -113,10 +113,15 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const handleLoadWorkflow = useCallback(
async (workflowId: string) => {
// This shows a toast
await getAndLoadWorkflow(workflowId);
store.dispatch(setActiveTab('workflows'));
await loadWorkflowWithDialog({
type: 'library',
data: workflowId,
onSuccess: () => {
store.dispatch(setActiveTab('workflows'));
},
});
},
[getAndLoadWorkflow, store]
[loadWorkflowWithDialog, store]
);
const handleSelectStylePreset = useCallback(

View File

@@ -31,6 +31,7 @@ import type { AnyModelConfig } from 'services/api/types';
import {
isCLIPEmbedModelConfig,
isControlLayerModelConfig,
isFluxReduxModelConfig,
isFluxVAEModelConfig,
isIPAdapterModelConfig,
isLoRAModelConfig,
@@ -77,6 +78,7 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
handleT5EncoderModels(models, state, dispatch, log);
handleCLIPEmbedModels(models, state, dispatch, log);
handleFLUXVAEModels(models, state, dispatch, log);
handleFLUXReduxModels(models, state, dispatch, log);
},
});
};
@@ -209,6 +211,10 @@ const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log)
const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
const ipaModels = models.filter(isIPAdapterModelConfig);
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
if (entity.ipAdapter.type !== 'ip_adapter') {
return;
}
const selectedIPAdapterModel = entity.ipAdapter.model;
// `null` is a valid IP adapter model - no need to do anything.
if (!selectedIPAdapterModel) {
@@ -224,6 +230,10 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, ipAdapter }) => {
if (ipAdapter.type !== 'ip_adapter') {
return;
}
const selectedIPAdapterModel = ipAdapter.model;
// `null` is a valid IP adapter model - no need to do anything.
if (!selectedIPAdapterModel) {
@@ -241,6 +251,49 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
});
};
const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
const fluxReduxModels = models.filter(isFluxReduxModelConfig);
selectCanvasSlice(state).referenceImages.entities.forEach((entity) => {
if (entity.ipAdapter.type !== 'flux_redux') {
return;
}
const selectedFLUXReduxModel = entity.ipAdapter.model;
// `null` is a valid FLUX Redux model - no need to do anything.
if (!selectedFLUXReduxModel) {
return;
}
const isModelAvailable = fluxReduxModels.some((m) => m.key === selectedFLUXReduxModel.key);
if (isModelAvailable) {
return;
}
log.debug({ selectedFLUXReduxModel }, 'Selected FLUX Redux model is not available, clearing');
dispatch(referenceImageIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
});
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, ipAdapter }) => {
if (ipAdapter.type !== 'flux_redux') {
return;
}
const selectedFLUXReduxModel = ipAdapter.model;
// `null` is a valid FLUX Redux model - no need to do anything.
if (!selectedFLUXReduxModel) {
return;
}
const isModelAvailable = fluxReduxModels.some((m) => m.key === selectedFLUXReduxModel.key);
if (isModelAvailable) {
return;
}
log.debug({ selectedFLUXReduxModel }, 'Selected FLUX Redux model is not available, clearing');
dispatch(
rgIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), referenceImageId, modelConfig: null })
);
});
});
};
const handlePostProcessingModel: ModelHandler = (models, state, dispatch, log) => {
const selectedPostProcessingModel = state.upscale.postProcessingModel;
const allSpandrelModels = models.filter(isSpandrelImageToImageModelConfig);

View File

@@ -1,4 +0,0 @@
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import { atom } from 'nanostores';
export const $workflowCategories = atom<WorkflowCategory[]>(['user', 'default', 'project']);

View File

@@ -19,6 +19,7 @@ import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/galle
import { hrfPersistConfig, hrfSlice } from 'features/hrf/store/hrfSlice';
import { modelManagerV2PersistConfig, modelManagerV2Slice } from 'features/modelManagerV2/store/modelManagerV2Slice';
import { nodesPersistConfig, nodesSlice, nodesUndoableConfig } from 'features/nodes/store/nodesSlice';
import { workflowLibraryPersistConfig, workflowLibrarySlice } from 'features/nodes/store/workflowLibrarySlice';
import { workflowSettingsPersistConfig, workflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
import { workflowPersistConfig, workflowSlice } from 'features/nodes/store/workflowSlice';
import { upscalePersistConfig, upscaleSlice } from 'features/parameters/store/upscaleSlice';
@@ -68,6 +69,7 @@ const allReducers = {
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
[canvasStagingAreaSlice.name]: canvasStagingAreaSlice.reducer,
[lorasSlice.name]: lorasSlice.reducer,
[workflowLibrarySlice.name]: workflowLibrarySlice.reducer,
};
const rootReducer = combineReducers(allReducers);
@@ -113,6 +115,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
[canvasStagingAreaPersistConfig.name]: canvasStagingAreaPersistConfig,
[lorasPersistConfig.name]: lorasPersistConfig,
[workflowLibraryPersistConfig.name]: workflowLibraryPersistConfig,
};
const unserialize: UnserializeFunction = (data, key) => {

View File

@@ -0,0 +1,43 @@
import type { Selector } from '@reduxjs/toolkit';
import { useAppStore } from 'app/store/nanostores/store';
import type { RootState } from 'app/store/store';
import { useEffect, useState } from 'react';
/**
* A hook that returns a debounced value from the app state.
*
* @param selector The redux selector
* @param debounceMs The debounce time in milliseconds
* @returns The debounced value
*/
export const useDebouncedAppSelector = <T>(selector: Selector<RootState, T>, debounceMs: number = 300) => {
const store = useAppStore();
const [value, setValue] = useState<T>(() => selector(store.getState()));
useEffect(() => {
let prevValue = selector(store.getState());
let timeout: number | null = null;
const unsubscribe = store.subscribe(() => {
const value = selector(store.getState());
if (value !== prevValue) {
if (timeout !== null) {
window.clearTimeout(timeout);
}
timeout = window.setTimeout(() => {
setValue(value);
prevValue = value;
}, debounceMs);
}
});
return () => {
unsubscribe();
if (timeout !== null) {
window.clearTimeout(timeout);
}
};
}, [debounceMs, selector, store]);
return value;
};

View File

@@ -0,0 +1,56 @@
import { Box, type BoxProps, type SystemStyleObject } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { type FocusRegionName, useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
import { selectSystemShouldEnableHighlightFocusedRegions } from 'features/system/store/systemSlice';
import { memo, useMemo, useRef } from 'react';
interface FocusRegionWrapperProps extends BoxProps {
region: FocusRegionName;
focusOnMount?: boolean;
}
const FOCUS_REGION_STYLES: SystemStyleObject = {
position: 'relative',
'&[data-highlighted="true"]::after': {
borderColor: 'blue.700',
},
'&::after': {
content: '""',
position: 'absolute',
inset: 0,
zIndex: 1,
borderRadius: 'base',
border: '2px solid',
borderColor: 'transparent',
pointerEvents: 'none',
transition: 'border-color 0.1s ease-in-out',
},
};
export const FocusRegionWrapper = memo(
({ region, focusOnMount = false, sx, children, ...boxProps }: FocusRegionWrapperProps) => {
const shouldHighlightFocusedRegions = useAppSelector(selectSystemShouldEnableHighlightFocusedRegions);
const ref = useRef<HTMLDivElement>(null);
const options = useMemo(() => ({ focusOnMount }), [focusOnMount]);
useFocusRegion(region, ref, options);
const isFocused = useIsRegionFocused(region);
const isHighlighted = isFocused && shouldHighlightFocusedRegions;
return (
<Box
ref={ref}
tabIndex={-1}
sx={useMemo(() => ({ ...FOCUS_REGION_STYLES, ...sx }), [sx])}
data-highlighted={isHighlighted}
{...boxProps}
>
{children}
</Box>
);
}
);
FocusRegionWrapper.displayName = 'FocusRegionWrapper';

View File

@@ -30,7 +30,7 @@ const log = logger('system');
/**
* The names of the focus regions.
*/
type FocusRegionName = 'gallery' | 'layers' | 'canvas' | 'workflows' | 'viewer';
export type FocusRegionName = 'gallery' | 'layers' | 'canvas' | 'workflows' | 'viewer';
/**
* A map of focus regions to the elements that are part of that region.

View File

@@ -9,7 +9,7 @@ import {
useAddRegionalGuidance,
useAddRegionalReferenceImage,
} from 'features/controlLayers/hooks/addLayerHooks';
import { selectIsFLUX, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
import { selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
@@ -22,7 +22,6 @@ export const CanvasAddEntityButtons = memo(() => {
const addControlLayer = useAddControlLayer();
const addGlobalReferenceImage = useAddGlobalReferenceImage();
const addRegionalReferenceImage = useAddRegionalReferenceImage();
const isFLUX = useAppSelector(selectIsFLUX);
const isSD3 = useAppSelector(selectIsSD3);
return (
@@ -75,7 +74,7 @@ export const CanvasAddEntityButtons = memo(() => {
justifyContent="flex-start"
leftIcon={<PiPlusBold />}
onClick={addRegionalReferenceImage}
isDisabled={isFLUX || isSD3}
isDisabled={isSD3}
>
{t('controlLayers.regionalReferenceImage')}
</Button>

View File

@@ -9,7 +9,7 @@ import {
useAddRegionalReferenceImage,
} from 'features/controlLayers/hooks/addLayerHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { selectIsFLUX, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
import { selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
@@ -23,7 +23,6 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
const addRegionalReferenceImage = useAddRegionalReferenceImage();
const addRasterLayer = useAddRasterLayer();
const addControlLayer = useAddControlLayer();
const isFLUX = useAppSelector(selectIsFLUX);
const isSD3 = useAppSelector(selectIsSD3);
return (
@@ -52,7 +51,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isSD3}>
{t('controlLayers.regionalGuidance')}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addRegionalReferenceImage} isDisabled={isFLUX || isSD3}>
<MenuItem icon={<PiPlusBold />} onClick={addRegionalReferenceImage} isDisabled={isSD3}>
{t('controlLayers.regionalReferenceImage')}
</MenuItem>
</MenuGroup>

View File

@@ -1,28 +1,33 @@
import { Divider, Flex } from '@invoke-ai/ui-library';
import { Divider, Flex, type SystemStyleObject } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion } from 'common/hooks/focus';
import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
import { CanvasAddEntityButtons } from 'features/controlLayers/components/CanvasAddEntityButtons';
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList/CanvasEntityList';
import { EntityListSelectedEntityActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBar';
import { selectHasEntities } from 'features/controlLayers/store/selectors';
import { memo, useRef } from 'react';
import { memo } from 'react';
import { ParamDenoisingStrength } from './ParamDenoisingStrength';
const FOCUS_REGION_STYLES: SystemStyleObject = {
width: 'full',
height: 'full',
};
export const CanvasLayersPanelContent = memo(() => {
const hasEntities = useAppSelector(selectHasEntities);
const layersPanelFocusRef = useRef<HTMLDivElement>(null);
useFocusRegion('layers', layersPanelFocusRef);
return (
<Flex ref={layersPanelFocusRef} flexDir="column" gap={2} w="full" h="full">
<EntityListSelectedEntityActionBar />
<Divider py={0} />
<ParamDenoisingStrength />
<Divider py={0} />
{!hasEntities && <CanvasAddEntityButtons />}
{hasEntities && <CanvasEntityList />}
</Flex>
<FocusRegionWrapper region="layers" sx={FOCUS_REGION_STYLES}>
<Flex flexDir="column" gap={2} w="full" h="full">
<EntityListSelectedEntityActionBar />
<Divider py={0} />
<ParamDenoisingStrength />
<Divider py={0} />
{!hasEntities && <CanvasAddEntityButtons />}
{hasEntities && <CanvasEntityList />}
</Flex>
</FocusRegionWrapper>
);
});

View File

@@ -1,6 +1,14 @@
import { ContextMenu, Flex, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
import {
ContextMenu,
Flex,
IconButton,
Menu,
MenuButton,
MenuList,
type SystemStyleObject,
} from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion } from 'common/hooks/focus';
import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
import { CanvasAlertsPreserveMask } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsPreserveMask';
import { CanvasAlertsSelectedEntityStatus } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSelectedEntityStatus';
import { CanvasAlertsSendingToGallery } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
@@ -18,11 +26,16 @@ import { Transform } from 'features/controlLayers/components/Transform/Transform
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { selectDynamicGrid, selectShowHUD } from 'features/controlLayers/store/canvasSettingsSlice';
import { GatedImageViewer } from 'features/gallery/components/ImageViewer/ImageViewer';
import { memo, useCallback, useRef } from 'react';
import { memo, useCallback } from 'react';
import { PiDotsThreeOutlineVerticalFill } from 'react-icons/pi';
import { CanvasAlertsInvocationProgress } from './CanvasAlerts/CanvasAlertsInvocationProgress';
const FOCUS_REGION_STYLES: SystemStyleObject = {
width: 'full',
height: 'full',
};
const MenuContent = () => {
return (
<CanvasManagerProviderGate>
@@ -35,7 +48,6 @@ const MenuContent = () => {
};
export const CanvasMainPanelContent = memo(() => {
const ref = useRef<HTMLDivElement>(null);
const dynamicGrid = useAppSelector(selectDynamicGrid);
const showHUD = useAppSelector(selectShowHUD);
@@ -43,82 +55,81 @@ export const CanvasMainPanelContent = memo(() => {
return <MenuContent />;
}, []);
useFocusRegion('canvas', ref);
return (
<Flex
tabIndex={-1}
ref={ref}
borderRadius="base"
position="relative"
flexDirection="column"
height="full"
width="full"
gap={2}
alignItems="center"
justifyContent="center"
overflow="hidden"
>
<CanvasManagerProviderGate>
<CanvasToolbar />
</CanvasManagerProviderGate>
<ContextMenu<HTMLDivElement> renderMenu={renderMenu} withLongPress={false}>
{(ref) => (
<Flex
ref={ref}
position="relative"
w="full"
h="full"
bg={dynamicGrid ? 'base.850' : 'base.900'}
borderRadius="base"
overflow="hidden"
>
<InvokeCanvasComponent />
<CanvasManagerProviderGate>
<Flex
position="absolute"
flexDir="column"
top={1}
insetInlineStart={1}
pointerEvents="none"
gap={2}
alignItems="flex-start"
>
{showHUD && <CanvasHUD />}
<CanvasAlertsSelectedEntityStatus />
<CanvasAlertsPreserveMask />
<CanvasAlertsSendingToGallery />
<CanvasAlertsInvocationProgress />
</Flex>
<Flex position="absolute" top={1} insetInlineEnd={1}>
<Menu>
<MenuButton as={IconButton} icon={<PiDotsThreeOutlineVerticalFill />} colorScheme="base" />
<MenuContent />
</Menu>
</Flex>
</CanvasManagerProviderGate>
</Flex>
)}
</ContextMenu>
<Flex position="absolute" bottom={4} gap={2} align="center" justify="center">
<FocusRegionWrapper region="canvas" sx={FOCUS_REGION_STYLES}>
<Flex
tabIndex={-1}
borderRadius="base"
position="relative"
flexDirection="column"
height="full"
width="full"
gap={2}
alignItems="center"
justifyContent="center"
overflow="hidden"
>
<CanvasManagerProviderGate>
<StagingAreaIsStagingGate>
<StagingAreaToolbar />
</StagingAreaIsStagingGate>
<CanvasToolbar />
</CanvasManagerProviderGate>
</Flex>
<Flex position="absolute" bottom={4}>
<ContextMenu<HTMLDivElement> renderMenu={renderMenu} withLongPress={false}>
{(ref) => (
<Flex
ref={ref}
position="relative"
w="full"
h="full"
bg={dynamicGrid ? 'base.850' : 'base.900'}
borderRadius="base"
overflow="hidden"
>
<InvokeCanvasComponent />
<CanvasManagerProviderGate>
<Flex
position="absolute"
flexDir="column"
top={1}
insetInlineStart={1}
pointerEvents="none"
gap={2}
alignItems="flex-start"
>
{showHUD && <CanvasHUD />}
<CanvasAlertsSelectedEntityStatus />
<CanvasAlertsPreserveMask />
<CanvasAlertsSendingToGallery />
<CanvasAlertsInvocationProgress />
</Flex>
<Flex position="absolute" top={1} insetInlineEnd={1}>
<Menu>
<MenuButton as={IconButton} icon={<PiDotsThreeOutlineVerticalFill />} colorScheme="base" />
<MenuContent />
</Menu>
</Flex>
</CanvasManagerProviderGate>
</Flex>
)}
</ContextMenu>
<Flex position="absolute" bottom={4} gap={2} align="center" justify="center">
<CanvasManagerProviderGate>
<StagingAreaIsStagingGate>
<StagingAreaToolbar />
</StagingAreaIsStagingGate>
</CanvasManagerProviderGate>
</Flex>
<Flex position="absolute" bottom={4}>
<CanvasManagerProviderGate>
<Filter />
<Transform />
<SelectObject />
</CanvasManagerProviderGate>
</Flex>
<CanvasManagerProviderGate>
<Filter />
<Transform />
<SelectObject />
<CanvasDropArea />
</CanvasManagerProviderGate>
<GatedImageViewer />
</Flex>
<CanvasManagerProviderGate>
<CanvasDropArea />
</CanvasManagerProviderGate>
<GatedImageViewer />
</Flex>
</FocusRegionWrapper>
);
});

View File

@@ -0,0 +1,61 @@
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, FormControl } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
import type { CLIPVisionModelV2 } from 'features/controlLayers/store/types';
import { isCLIPVisionModelV2 } from 'features/controlLayers/store/types';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { assert } from 'tsafe';
// at this time, ViT-L is the only supported clip model for FLUX IP adapter
const FLUX_CLIP_VISION = 'ViT-L';
const CLIP_VISION_OPTIONS = [
{ label: 'ViT-H', value: 'ViT-H' },
{ label: 'ViT-G', value: 'ViT-G' },
{ label: FLUX_CLIP_VISION, value: FLUX_CLIP_VISION },
];
type Props = {
model: CLIPVisionModelV2;
onChange: (clipVisionModel: CLIPVisionModelV2) => void;
};
export const CLIPVisionModel = memo(({ model, onChange }: Props) => {
const { t } = useTranslation();
const _onChangeCLIPVisionModel = useCallback<ComboboxOnChange>(
(v) => {
assert(isCLIPVisionModelV2(v?.value));
onChange(v.value);
},
[onChange]
);
const isFLUX = useAppSelector(selectIsFLUX);
const clipVisionOptions = useMemo(() => {
return CLIP_VISION_OPTIONS.map((option) => ({
...option,
isDisabled: isFLUX && option.value !== FLUX_CLIP_VISION,
}));
}, [isFLUX]);
const clipVisionModelValue = useMemo(() => {
return CLIP_VISION_OPTIONS.find((o) => o.value === model);
}, [model]);
return (
<FormControl width="max-content" minWidth={28}>
<Combobox
options={clipVisionOptions}
placeholder={t('common.placeholderSelectAModel')}
value={clipVisionModelValue}
onChange={_onChangeCLIPVisionModel}
/>
</FormControl>
);
});
CLIPVisionModel.displayName = 'CLIPVisionModel';

View File

@@ -1,40 +1,36 @@
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library';
import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
import { selectBase, selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
import type { CLIPVisionModelV2 } from 'features/controlLayers/store/types';
import { isCLIPVisionModelV2 } from 'features/controlLayers/store/types';
import { selectBase } from 'features/controlLayers/store/paramsSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useIPAdapterModels } from 'services/api/hooks/modelsByType';
import type { AnyModelConfig, IPAdapterModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
// at this time, ViT-L is the only supported clip model for FLUX IP adapter
const FLUX_CLIP_VISION = 'ViT-L';
const CLIP_VISION_OPTIONS = [
{ label: 'ViT-H', value: 'ViT-H' },
{ label: 'ViT-G', value: 'ViT-G' },
{ label: FLUX_CLIP_VISION, value: FLUX_CLIP_VISION },
];
import { useIPAdapterOrFLUXReduxModels } from 'services/api/hooks/modelsByType';
import type { AnyModelConfig, FLUXReduxModelConfig, IPAdapterModelConfig } from 'services/api/types';
type Props = {
isRegionalGuidance: boolean;
modelKey: string | null;
onChangeModel: (modelConfig: IPAdapterModelConfig) => void;
clipVisionModel: CLIPVisionModelV2;
onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModelV2) => void;
onChangeModel: (modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig) => void;
};
export const IPAdapterModel = memo(({ modelKey, onChangeModel, clipVisionModel, onChangeCLIPVisionModel }: Props) => {
export const IPAdapterModel = memo(({ isRegionalGuidance, modelKey, onChangeModel }: Props) => {
const { t } = useTranslation();
const currentBaseModel = useAppSelector(selectBase);
const [modelConfigs, { isLoading }] = useIPAdapterModels();
const filter = useCallback(
(config: IPAdapterModelConfig | FLUXReduxModelConfig) => {
// FLUX supports regional guidance for FLUX Redux models only - not IP Adapter models.
if (isRegionalGuidance && config.base === 'flux' && config.type === 'ip_adapter') {
return false;
}
return true;
},
[isRegionalGuidance]
);
const [modelConfigs, { isLoading }] = useIPAdapterOrFLUXReduxModels(filter);
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
const _onChangeModel = useCallback(
(modelConfig: IPAdapterModelConfig | null) => {
(modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | null) => {
if (!modelConfig) {
return;
}
@@ -43,21 +39,11 @@ export const IPAdapterModel = memo(({ modelKey, onChangeModel, clipVisionModel,
[onChangeModel]
);
const _onChangeCLIPVisionModel = useCallback<ComboboxOnChange>(
(v) => {
assert(isCLIPVisionModelV2(v?.value));
onChangeCLIPVisionModel(v.value);
},
[onChangeCLIPVisionModel]
);
const isFLUX = useAppSelector(selectIsFLUX);
const getIsDisabled = useCallback(
(model: AnyModelConfig): boolean => {
const isCompatible = currentBaseModel === model.base;
const hasMainModel = Boolean(currentBaseModel);
return !hasMainModel || !isCompatible;
const hasSameBase = currentBaseModel === model.base;
return !hasMainModel || !hasSameBase;
},
[currentBaseModel]
);
@@ -70,41 +56,18 @@ export const IPAdapterModel = memo(({ modelKey, onChangeModel, clipVisionModel,
isLoading,
});
const clipVisionOptions = useMemo(() => {
return CLIP_VISION_OPTIONS.map((option) => ({
...option,
isDisabled: isFLUX && option.value !== FLUX_CLIP_VISION,
}));
}, [isFLUX]);
const clipVisionModelValue = useMemo(() => {
return CLIP_VISION_OPTIONS.find((o) => o.value === clipVisionModel);
}, [clipVisionModel]);
return (
<Flex gap={2}>
<Tooltip label={selectedModel?.description}>
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} w="full">
<Combobox
options={options}
placeholder={t('common.placeholderSelectAModel')}
value={value}
onChange={onChange}
noOptionsMessage={noOptionsMessage}
/>
</FormControl>
</Tooltip>
{selectedModel?.format === 'checkpoint' && (
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} width="max-content" minWidth={28}>
<Combobox
options={clipVisionOptions}
placeholder={t('common.placeholderSelectAModel')}
value={clipVisionModelValue}
onChange={_onChangeCLIPVisionModel}
/>
</FormControl>
)}
</Flex>
<Tooltip label={selectedModel?.description}>
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} w="full">
<Combobox
options={options}
placeholder={t('common.placeholderSelectAModel')}
value={value}
onChange={onChange}
noOptionsMessage={noOptionsMessage}
/>
</FormControl>
</Tooltip>
);
});

View File

@@ -1,9 +1,10 @@
import { Box, Flex, IconButton } from '@invoke-ai/ui-library';
import { Flex, IconButton } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
import { Weight } from 'features/controlLayers/components/common/Weight';
import { CLIPVisionModel } from 'features/controlLayers/components/IPAdapter/CLIPVisionModel';
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
import { IPAdapterSettingsEmptyState } from 'features/controlLayers/components/IPAdapter/IPAdapterSettingsEmptyState';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
@@ -25,7 +26,7 @@ import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiBoundingBoxBold } from 'react-icons/pi';
import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
import type { FLUXReduxModelConfig, ImageDTO, IPAdapterModelConfig } from 'services/api/types';
import { IPAdapterImagePreview } from './IPAdapterImagePreview';
import { IPAdapterModel } from './IPAdapterModel';
@@ -65,7 +66,7 @@ const IPAdapterSettingsContent = memo(() => {
);
const onChangeModel = useCallback(
(modelConfig: IPAdapterModelConfig) => {
(modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig) => {
dispatch(referenceImageIPAdapterModelChanged({ entityIdentifier, modelConfig }));
},
[dispatch, entityIdentifier]
@@ -98,14 +99,14 @@ const IPAdapterSettingsContent = memo(() => {
<CanvasEntitySettingsWrapper>
<Flex flexDir="column" gap={2} position="relative" w="full">
<Flex gap={2} alignItems="center" w="full">
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
<IPAdapterModel
modelKey={ipAdapter.model?.key ?? null}
onChangeModel={onChangeModel}
clipVisionModel={ipAdapter.clipVisionModel}
onChangeCLIPVisionModel={onChangeCLIPVisionModel}
/>
</Box>
<IPAdapterModel
isRegionalGuidance={false}
modelKey={ipAdapter.model?.key ?? null}
onChangeModel={onChangeModel}
/>
{ipAdapter.type === 'ip_adapter' && (
<CLIPVisionModel model={ipAdapter.clipVisionModel} onChange={onChangeCLIPVisionModel} />
)}
<IconButton
onClick={pullBboxIntoIPAdapter}
isDisabled={isBusy}
@@ -116,12 +117,14 @@ const IPAdapterSettingsContent = memo(() => {
/>
</Flex>
<Flex gap={2} w="full" alignItems="center">
<Flex flexDir="column" gap={2} w="full">
{!isFLUX && <IPAdapterMethod method={ipAdapter.method} onChange={onChangeIPMethod} />}
<Weight weight={ipAdapter.weight} onChange={onChangeWeight} />
<BeginEndStepPct beginEndStepPct={ipAdapter.beginEndStepPct} onChange={onChangeBeginEndStepPct} />
</Flex>
<Flex alignItems="center" justifyContent="center" h={32} w={32} aspectRatio="1/1">
{ipAdapter.type === 'ip_adapter' && (
<Flex flexDir="column" gap={2} w="full">
{!isFLUX && <IPAdapterMethod method={ipAdapter.method} onChange={onChangeIPMethod} />}
<Weight weight={ipAdapter.weight} onChange={onChangeWeight} />
<BeginEndStepPct beginEndStepPct={ipAdapter.beginEndStepPct} onChange={onChangeBeginEndStepPct} />
</Flex>
)}
<Flex alignItems="center" justifyContent="center" h={32} w={32} aspectRatio="1/1" flexGrow={1}>
<IPAdapterImagePreview
image={ipAdapter.image}
onChangeImage={onChangeImage}

View File

@@ -1,8 +1,9 @@
import { Box, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
import { Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
import { Weight } from 'features/controlLayers/components/common/Weight';
import { CLIPVisionModel } from 'features/controlLayers/components/IPAdapter/CLIPVisionModel';
import { IPAdapterImagePreview } from 'features/controlLayers/components/IPAdapter/IPAdapterImagePreview';
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
import { IPAdapterModel } from 'features/controlLayers/components/IPAdapter/IPAdapterModel';
@@ -26,7 +27,7 @@ import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiBoundingBoxBold, PiXBold } from 'react-icons/pi';
import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
import type { FLUXReduxModelConfig, ImageDTO, IPAdapterModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
type Props = {
@@ -73,7 +74,7 @@ const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Pro
);
const onChangeModel = useCallback(
(modelConfig: IPAdapterModelConfig) => {
(modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig) => {
dispatch(rgIPAdapterModelChanged({ entityIdentifier, referenceImageId, modelConfig }));
},
[dispatch, entityIdentifier, referenceImageId]
@@ -125,14 +126,14 @@ const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Pro
</Flex>
<Flex flexDir="column" gap={2} position="relative" w="full">
<Flex gap={2} alignItems="center" w="full">
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
<IPAdapterModel
modelKey={ipAdapter.model?.key ?? null}
onChangeModel={onChangeModel}
clipVisionModel={ipAdapter.clipVisionModel}
onChangeCLIPVisionModel={onChangeCLIPVisionModel}
/>
</Box>
<IPAdapterModel
isRegionalGuidance={true}
modelKey={ipAdapter.model?.key ?? null}
onChangeModel={onChangeModel}
/>
{ipAdapter.type === 'ip_adapter' && (
<CLIPVisionModel model={ipAdapter.clipVisionModel} onChange={onChangeCLIPVisionModel} />
)}
<IconButton
onClick={pullBboxIntoIPAdapter}
isDisabled={isBusy}
@@ -143,12 +144,14 @@ const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Pro
/>
</Flex>
<Flex gap={2} w="full">
<Flex flexDir="column" gap={2} w="full">
<IPAdapterMethod method={ipAdapter.method} onChange={onChangeIPMethod} />
<Weight weight={ipAdapter.weight} onChange={onChangeWeight} />
<BeginEndStepPct beginEndStepPct={ipAdapter.beginEndStepPct} onChange={onChangeBeginEndStepPct} />
</Flex>
<Flex alignItems="center" justifyContent="center" h={32} w={32} aspectRatio="1/1">
{ipAdapter.type === 'ip_adapter' && (
<Flex flexDir="column" gap={2} w="full">
<IPAdapterMethod method={ipAdapter.method} onChange={onChangeIPMethod} />
<Weight weight={ipAdapter.weight} onChange={onChangeWeight} />
<BeginEndStepPct beginEndStepPct={ipAdapter.beginEndStepPct} onChange={onChangeBeginEndStepPct} />
</Flex>
)}
<Flex alignItems="center" justifyContent="center" h={32} w={32} aspectRatio="1/1" flexGrow={1}>
<IPAdapterImagePreview
image={ipAdapter.image}
onChangeImage={onChangeImage}

View File

@@ -38,6 +38,7 @@ import type { UndoableOptions } from 'redux-undo';
import type {
ControlLoRAModelConfig,
ControlNetModelConfig,
FLUXReduxModelConfig,
ImageDTO,
IPAdapterModelConfig,
T2IAdapterModelConfig,
@@ -76,6 +77,7 @@ import {
imageDTOToImageWithDims,
initialControlLoRA,
initialControlNet,
initialFLUXRedux,
initialIPAdapter,
initialT2IAdapter,
} from './util';
@@ -619,11 +621,16 @@ export const canvasSlice = createSlice({
if (!entity) {
return;
}
if (entity.ipAdapter.type !== 'ip_adapter') {
return;
}
entity.ipAdapter.method = method;
},
referenceImageIPAdapterModelChanged: (
state,
action: PayloadAction<EntityIdentifierPayload<{ modelConfig: IPAdapterModelConfig | null }, 'reference_image'>>
action: PayloadAction<
EntityIdentifierPayload<{ modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | null }, 'reference_image'>
>
) => {
const { entityIdentifier, modelConfig } = action.payload;
const entity = selectEntity(state, entityIdentifier);
@@ -631,12 +638,39 @@ export const canvasSlice = createSlice({
return;
}
entity.ipAdapter.model = modelConfig ? zModelIdentifierField.parse(modelConfig) : null;
// Ensure that the IP Adapter model is compatible with the CLIP Vision model
if (entity.ipAdapter.model?.base === 'flux') {
entity.ipAdapter.clipVisionModel = 'ViT-L';
} else if (entity.ipAdapter.clipVisionModel === 'ViT-L') {
// Fall back to ViT-H (ViT-G would also work)
entity.ipAdapter.clipVisionModel = 'ViT-H';
if (!entity.ipAdapter.model) {
return;
}
if (entity.ipAdapter.type === 'ip_adapter' && entity.ipAdapter.model.type === 'flux_redux') {
// Switching from ip_adapter to flux_redux
entity.ipAdapter = {
...initialFLUXRedux,
image: entity.ipAdapter.image,
model: entity.ipAdapter.model,
};
return;
}
if (entity.ipAdapter.type === 'flux_redux' && entity.ipAdapter.model.type === 'ip_adapter') {
// Switching from flux_redux to ip_adapter
entity.ipAdapter = {
...initialIPAdapter,
image: entity.ipAdapter.image,
model: entity.ipAdapter.model,
};
return;
}
if (entity.ipAdapter.type === 'ip_adapter') {
// Ensure that the IP Adapter model is compatible with the CLIP Vision model
if (entity.ipAdapter.model?.base === 'flux') {
entity.ipAdapter.clipVisionModel = 'ViT-L';
} else if (entity.ipAdapter.clipVisionModel === 'ViT-L') {
// Fall back to ViT-H (ViT-G would also work)
entity.ipAdapter.clipVisionModel = 'ViT-H';
}
}
},
referenceImageIPAdapterCLIPVisionModelChanged: (
@@ -648,6 +682,9 @@ export const canvasSlice = createSlice({
if (!entity) {
return;
}
if (entity.ipAdapter.type !== 'ip_adapter') {
return;
}
entity.ipAdapter.clipVisionModel = clipVisionModel;
},
referenceImageIPAdapterWeightChanged: (
@@ -659,6 +696,9 @@ export const canvasSlice = createSlice({
if (!entity) {
return;
}
if (entity.ipAdapter.type !== 'ip_adapter') {
return;
}
entity.ipAdapter.weight = weight;
},
referenceImageIPAdapterBeginEndStepPctChanged: (
@@ -670,6 +710,9 @@ export const canvasSlice = createSlice({
if (!entity) {
return;
}
if (entity.ipAdapter.type !== 'ip_adapter') {
return;
}
entity.ipAdapter.beginEndStepPct = beginEndStepPct;
},
//#region Regional Guidance
@@ -843,6 +886,10 @@ export const canvasSlice = createSlice({
if (!referenceImage) {
return;
}
if (referenceImage.ipAdapter.type !== 'ip_adapter') {
return;
}
referenceImage.ipAdapter.weight = weight;
},
rgIPAdapterBeginEndStepPctChanged: (
@@ -856,6 +903,10 @@ export const canvasSlice = createSlice({
if (!referenceImage) {
return;
}
if (referenceImage.ipAdapter.type !== 'ip_adapter') {
return;
}
referenceImage.ipAdapter.beginEndStepPct = beginEndStepPct;
},
rgIPAdapterMethodChanged: (
@@ -869,6 +920,10 @@ export const canvasSlice = createSlice({
if (!referenceImage) {
return;
}
if (referenceImage.ipAdapter.type !== 'ip_adapter') {
return;
}
referenceImage.ipAdapter.method = method;
},
rgIPAdapterModelChanged: (
@@ -877,7 +932,7 @@ export const canvasSlice = createSlice({
EntityIdentifierPayload<
{
referenceImageId: string;
modelConfig: IPAdapterModelConfig | null;
modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | null;
},
'regional_guidance'
>
@@ -889,12 +944,39 @@ export const canvasSlice = createSlice({
return;
}
referenceImage.ipAdapter.model = modelConfig ? zModelIdentifierField.parse(modelConfig) : null;
// Ensure that the IP Adapter model is compatible with the CLIP Vision model
if (referenceImage.ipAdapter.model?.base === 'flux') {
referenceImage.ipAdapter.clipVisionModel = 'ViT-L';
} else if (referenceImage.ipAdapter.clipVisionModel === 'ViT-L') {
// Fall back to ViT-H (ViT-G would also work)
referenceImage.ipAdapter.clipVisionModel = 'ViT-H';
if (!referenceImage.ipAdapter.model) {
return;
}
if (referenceImage.ipAdapter.type === 'ip_adapter' && referenceImage.ipAdapter.model.type === 'flux_redux') {
// Switching from ip_adapter to flux_redux
referenceImage.ipAdapter = {
...initialFLUXRedux,
image: referenceImage.ipAdapter.image,
model: referenceImage.ipAdapter.model,
};
return;
}
if (referenceImage.ipAdapter.type === 'flux_redux' && referenceImage.ipAdapter.model.type === 'ip_adapter') {
// Switching from flux_redux to ip_adapter
referenceImage.ipAdapter = {
...initialIPAdapter,
image: referenceImage.ipAdapter.image,
model: referenceImage.ipAdapter.model,
};
return;
}
if (referenceImage.ipAdapter.type === 'ip_adapter') {
// Ensure that the IP Adapter model is compatible with the CLIP Vision model
if (referenceImage.ipAdapter.model?.base === 'flux') {
referenceImage.ipAdapter.clipVisionModel = 'ViT-L';
} else if (referenceImage.ipAdapter.clipVisionModel === 'ViT-L') {
// Fall back to ViT-H (ViT-G would also work)
referenceImage.ipAdapter.clipVisionModel = 'ViT-H';
}
}
},
rgIPAdapterCLIPVisionModelChanged: (
@@ -908,6 +990,10 @@ export const canvasSlice = createSlice({
if (!referenceImage) {
return;
}
if (referenceImage.ipAdapter.type !== 'ip_adapter') {
return;
}
referenceImage.ipAdapter.clipVisionModel = clipVisionModel;
},
//#region Inpaint mask

View File

@@ -233,6 +233,13 @@ const zIPAdapterConfig = z.object({
});
export type IPAdapterConfig = z.infer<typeof zIPAdapterConfig>;
const zFLUXReduxConfig = z.object({
type: z.literal('flux_redux'),
image: zImageWithDims.nullable(),
model: zServerValidatedModelIdentifierField.nullable(),
});
export type FLUXReduxConfig = z.infer<typeof zFLUXReduxConfig>;
const zCanvasEntityBase = z.object({
id: zId,
name: zName,
@@ -242,10 +249,16 @@ const zCanvasEntityBase = z.object({
const zCanvasReferenceImageState = zCanvasEntityBase.extend({
type: z.literal('reference_image'),
ipAdapter: zIPAdapterConfig,
ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig]),
});
export type CanvasReferenceImageState = z.infer<typeof zCanvasReferenceImageState>;
export const isIPAdapterConfig = (config: IPAdapterConfig | FLUXReduxConfig): config is IPAdapterConfig =>
config.type === 'ip_adapter';
export const isFLUXReduxConfig = (config: IPAdapterConfig | FLUXReduxConfig): config is FLUXReduxConfig =>
config.type === 'flux_redux';
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
export type FillStyle = z.infer<typeof zFillStyle>;
export const isFillStyle = (v: unknown): v is FillStyle => zFillStyle.safeParse(v).success;
@@ -253,7 +266,7 @@ const zFill = z.object({ style: zFillStyle, color: zRgbColor });
const zRegionalGuidanceReferenceImageState = z.object({
id: zId,
ipAdapter: zIPAdapterConfig,
ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig]),
});
export type RegionalGuidanceReferenceImageState = z.infer<typeof zRegionalGuidanceReferenceImageState>;

View File

@@ -9,6 +9,7 @@ import type {
CanvasRegionalGuidanceState,
ControlLoRAConfig,
ControlNetConfig,
FLUXReduxConfig,
ImageWithDims,
IPAdapterConfig,
RgbColor,
@@ -70,6 +71,11 @@ export const initialIPAdapter: IPAdapterConfig = {
clipVisionModel: 'ViT-H',
weight: 1,
};
export const initialFLUXRedux: FLUXReduxConfig = {
type: 'flux_redux',
image: null,
model: null,
};
export const initialT2IAdapter: T2IAdapterConfig = {
type: 't2i_adapter',
model: null,

View File

@@ -44,33 +44,33 @@ export const getRegionalGuidanceWarnings = (
if (model.base === 'sd-3' || model.base === 'sd-2') {
// Unsupported model architecture
warnings.push(WARNINGS.UNSUPPORTED_MODEL);
} else if (model.base === 'flux') {
return warnings;
}
if (model.base === 'flux') {
// Some features are not supported for flux models
if (entity.negativePrompt !== null) {
warnings.push(WARNINGS.RG_NEGATIVE_PROMPT_NOT_SUPPORTED);
}
if (entity.referenceImages.length > 0) {
warnings.push(WARNINGS.RG_REFERENCE_IMAGES_NOT_SUPPORTED);
}
if (entity.autoNegative) {
warnings.push(WARNINGS.RG_AUTO_NEGATIVE_NOT_SUPPORTED);
}
} else {
entity.referenceImages.forEach(({ ipAdapter }) => {
if (!ipAdapter.model) {
// No model selected
warnings.push(WARNINGS.IP_ADAPTER_NO_MODEL_SELECTED);
} else if (ipAdapter.model.base !== model.base) {
// Supported model architecture but doesn't match
warnings.push(WARNINGS.IP_ADAPTER_INCOMPATIBLE_BASE_MODEL);
}
if (!ipAdapter.image) {
// No image selected
warnings.push(WARNINGS.IP_ADAPTER_NO_IMAGE_SELECTED);
}
});
}
entity.referenceImages.forEach(({ ipAdapter }) => {
if (!ipAdapter.model) {
// No model selected
warnings.push(WARNINGS.IP_ADAPTER_NO_MODEL_SELECTED);
} else if (ipAdapter.model.base !== model.base) {
// Supported model architecture but doesn't match
warnings.push(WARNINGS.IP_ADAPTER_INCOMPATIBLE_BASE_MODEL);
}
if (!ipAdapter.image) {
// No image selected
warnings.push(WARNINGS.IP_ADAPTER_NO_IMAGE_SELECTED);
}
});
}
return warnings;
@@ -82,22 +82,27 @@ export const getGlobalReferenceImageWarnings = (
): WarningTKey[] => {
const warnings: WarningTKey[] = [];
if (!entity.ipAdapter.model) {
// No model selected
warnings.push(WARNINGS.IP_ADAPTER_NO_MODEL_SELECTED);
} else if (model) {
if (model) {
if (model.base === 'sd-3' || model.base === 'sd-2') {
// Unsupported model architecture
warnings.push(WARNINGS.UNSUPPORTED_MODEL);
} else if (entity.ipAdapter.model.base !== model.base) {
return warnings;
}
const { ipAdapter } = entity;
if (!ipAdapter.model) {
// No model selected
warnings.push(WARNINGS.IP_ADAPTER_NO_MODEL_SELECTED);
} else if (ipAdapter.model.base !== model.base) {
// Supported model architecture but doesn't match
warnings.push(WARNINGS.IP_ADAPTER_INCOMPATIBLE_BASE_MODEL);
}
}
if (!entity.ipAdapter.image) {
// No image selected
warnings.push(WARNINGS.IP_ADAPTER_NO_IMAGE_SELECTED);
if (!entity.ipAdapter.image) {
// No image selected
warnings.push(WARNINGS.IP_ADAPTER_NO_IMAGE_SELECTED);
}
}
return warnings;

View File

@@ -7,6 +7,7 @@ import { Box, Flex, Heading } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { getStore } from 'app/store/nanostores/store';
import { useAppSelector } from 'app/store/storeHooks';
import { $focusedRegion } from 'common/hooks/focus';
import { setFileToPaste } from 'features/controlLayers/components/CanvasPasteModal';
import { DndDropOverlay } from 'features/dnd/DndDropOverlay';
import type { DndTargetState } from 'features/dnd/types';
@@ -99,10 +100,18 @@ export const FullscreenDropzone = memo(() => {
return;
}
const focusedRegion = $focusedRegion.get();
// While on the canvas tab and when pasting a single image, canvas may want to create a new layer. Let it handle
// the paste event.
const [firstImageFile] = files;
if (!isImageViewerOpen && activeTab === 'canvas' && files.length === 1 && firstImageFile) {
if (
focusedRegion === 'canvas' &&
!isImageViewerOpen &&
activeTab === 'canvas' &&
files.length === 1 &&
firstImageFile
) {
setFileToPaste(firstImageFile);
return;
}

View File

@@ -1,6 +1,15 @@
import { Box, Button, Collapse, Divider, Flex, IconButton, useDisclosure } from '@invoke-ai/ui-library';
import {
Box,
Button,
Collapse,
Divider,
Flex,
IconButton,
type SystemStyleObject,
useDisclosure,
} from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion } from 'common/hooks/focus';
import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
import { GalleryHeader } from 'features/gallery/components/GalleryHeader';
import { selectBoardSearchText } from 'features/gallery/store/gallerySelectors';
import { boardSearchTextChanged } from 'features/gallery/store/gallerySlice';
@@ -20,14 +29,20 @@ import { Gallery } from './Gallery';
const COLLAPSE_STYLES: CSSProperties = { flexShrink: 0, minHeight: 0 };
const FOCUS_REGION_STYLES: SystemStyleObject = {
width: 'full',
height: 'full',
position: 'relative',
flexDirection: 'column',
display: 'flex',
};
const GalleryPanelContent = () => {
const { t } = useTranslation();
const boardSearchText = useAppSelector(selectBoardSearchText);
const dispatch = useAppDispatch();
const boardSearchDisclosure = useDisclosure({ defaultIsOpen: !!boardSearchText.length });
const imperativePanelGroupRef = useRef<ImperativePanelGroupHandle>(null);
const galleryPanelFocusRef = useRef<HTMLDivElement>(null);
useFocusRegion('gallery', galleryPanelFocusRef);
const boardsListPanelOptions = useMemo<UsePanelOptions>(
() => ({
@@ -50,7 +65,7 @@ const GalleryPanelContent = () => {
}, [boardSearchText.length, boardSearchDisclosure, boardsListPanel, dispatch]);
return (
<Flex ref={galleryPanelFocusRef} position="relative" flexDirection="column" h="full" w="full" tabIndex={-1}>
<FocusRegionWrapper region="gallery" sx={FOCUS_REGION_STYLES}>
<Flex alignItems="center" justifyContent="space-between" w="full">
<Flex flexGrow={1} flexBasis={0}>
<Button
@@ -99,7 +114,7 @@ const GalleryPanelContent = () => {
<Gallery />
</Panel>
</PanelGroup>
</Flex>
</FocusRegionWrapper>
);
};

View File

@@ -1,9 +1,8 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { SpinnerIcon } from 'features/gallery/components/ImageContextMenu/SpinnerIcon';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
import { useGetAndLoadEmbeddedWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadEmbeddedWorkflow';
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFlowArrowBold } from 'react-icons/pi';
@@ -11,19 +10,15 @@ import { PiFlowArrowBold } from 'react-icons/pi';
export const ImageMenuItemLoadWorkflow = memo(() => {
const { t } = useTranslation();
const imageDTO = useImageDTOContext();
const [getAndLoadEmbeddedWorkflow, { isLoading }] = useGetAndLoadEmbeddedWorkflow();
const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
const hasTemplates = useStore($hasTemplates);
const onClick = useCallback(() => {
getAndLoadEmbeddedWorkflow(imageDTO.image_name);
}, [getAndLoadEmbeddedWorkflow, imageDTO.image_name]);
loadWorkflowWithDialog({ type: 'image', data: imageDTO.image_name });
}, [loadWorkflowWithDialog, imageDTO.image_name]);
return (
<MenuItem
icon={isLoading ? <SpinnerIcon /> : <PiFlowArrowBold />}
onClickCapture={onClick}
isDisabled={!imageDTO.has_workflow || !hasTemplates}
>
<MenuItem icon={<PiFlowArrowBold />} onClickCapture={onClick} isDisabled={!imageDTO.has_workflow || !hasTemplates}>
{t('nodes.loadWorkflow')}
</MenuItem>
);

View File

@@ -1,7 +0,0 @@
import { Flex, Spinner } from '@invoke-ai/ui-library';
export const SpinnerIcon = () => (
<Flex w="14px" alignItems="center" justifyContent="center">
<Spinner size="xs" />
</Flex>
);

View File

@@ -1,6 +1,6 @@
import { Box, Flex, IconButton } from '@invoke-ai/ui-library';
import { Box, IconButton, type SystemStyleObject } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion } from 'common/hooks/focus';
import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { CompareToolbar } from 'features/gallery/components/ImageViewer/CompareToolbar';
import CurrentImagePreview from 'features/gallery/components/ImageViewer/CurrentImagePreview';
@@ -9,7 +9,7 @@ import { ImageComparisonDroppable } from 'features/gallery/components/ImageViewe
import { ViewerToolbar } from 'features/gallery/components/ImageViewer/ViewerToolbar';
import { selectHasImageToCompare } from 'features/gallery/store/gallerySelectors';
import type { ReactNode } from 'react';
import { memo, useRef } from 'react';
import { memo } from 'react';
import { useHotkeys } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next';
import { PiXBold } from 'react-icons/pi';
@@ -25,29 +25,24 @@ const useFocusRegionOptions = {
focusOnMount: true,
};
const FOCUS_REGION_STYLES: SystemStyleObject = {
width: 'full',
height: 'full',
position: 'absolute',
flexDirection: 'column',
inset: 0,
alignItems: 'center',
justifyContent: 'center',
overflow: 'hidden',
};
export const ImageViewer = memo(({ closeButton }: Props) => {
useAssertSingleton('ImageViewer');
const hasImageToCompare = useAppSelector(selectHasImageToCompare);
const [containerRef, containerDims] = useMeasure<HTMLDivElement>();
const ref = useRef<HTMLDivElement>(null);
useFocusRegion('viewer', ref, useFocusRegionOptions);
return (
<Flex
ref={ref}
tabIndex={-1}
layerStyle="first"
borderRadius="base"
position="absolute"
flexDirection="column"
top={0}
right={0}
bottom={0}
left={0}
alignItems="center"
justifyContent="center"
overflow="hidden"
>
<FocusRegionWrapper region="viewer" sx={FOCUS_REGION_STYLES} layerStyle="first" {...useFocusRegionOptions}>
{hasImageToCompare && <CompareToolbar />}
{!hasImageToCompare && <ViewerToolbar closeButton={closeButton} />}
<Box ref={containerRef} w="full" h="full" p={2}>
@@ -55,7 +50,7 @@ export const ImageViewer = memo(({ closeButton }: Props) => {
{hasImageToCompare && <ImageComparison containerDims={containerDims} />}
</Box>
<ImageComparisonDroppable />
</Flex>
</FocusRegionWrapper>
);
});

View File

@@ -7,6 +7,7 @@ import { LOADING_SYMBOL, useHasImages } from 'features/gallery/hooks/useHasImage
import { $installModelsTab } from 'features/modelManagerV2/subpanels/InstallModels';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { selectIsLocal } from 'features/system/store/configSlice';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { setActiveTab } from 'features/ui/store/uiSlice';
import type { PropsWithChildren } from 'react';
import { memo, useCallback, useMemo } from 'react';
@@ -19,6 +20,7 @@ export const NoContentForViewer = memo(() => {
const [mainModels, { data }] = useMainModels();
const isLocal = useAppSelector(selectIsLocal);
const isEnabled = useFeatureStatus('starterModels');
const activeTab = useAppSelector(selectActiveTab);
const { t } = useTranslation();
const showStarterBundles = useMemo(() => {
@@ -38,10 +40,10 @@ export const NoContentForViewer = memo(() => {
}
return (
<Flex flexDir="column" gap={8} alignItems="center" textAlign="center" maxW="600px">
<Flex flexDir="column" gap={8} alignItems="center" textAlign="center" maxW="400px">
<InvokeLogoIcon w={32} h={32} />
<Flex flexDir="column" gap={4} alignItems="center" textAlign="center">
{isLocal ? <GetStartedLocal /> : <GetStartedCommercial />}
{isLocal ? <GetStartedLocal /> : activeTab === 'workflows' ? <GetStartedWorkflows /> : <GetStartedCommercial />}
{showStarterBundles && <StarterBundlesCallout />}
<Divider />
<GettingStartedVideosCallout />
@@ -103,6 +105,14 @@ const GetStartedCommercial = () => {
);
};
const GetStartedWorkflows = () => {
return (
<Text fontSize="md" color="base.200">
<Trans i18nKey="newUserExperience.toGetStartedWorkflow" components={{ StrongComponent }} />
</Text>
);
};
const GettingStartedVideosCallout = () => {
return (
<Text fontSize="md" color="base.200">

View File

@@ -17,7 +17,7 @@ import {
} from 'features/stylePresets/store/stylePresetSlice';
import { toast } from 'features/toast/toast';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { useGetAndLoadEmbeddedWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadEmbeddedWorkflow';
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import { useCallback, useEffect, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
@@ -147,14 +147,15 @@ export const useImageActions = (imageDTO: ImageDTO) => {
});
}, [metadata, imageDTO]);
const [getAndLoadEmbeddedWorkflow] = useGetAndLoadEmbeddedWorkflow();
const loadWorkflowWithDialog = useLoadWorkflowWithDialog();
const loadWorkflow = useCallback(() => {
const loadWorkflowFromImage = useCallback(() => {
if (!imageDTO.has_workflow || !hasTemplates) {
return;
}
getAndLoadEmbeddedWorkflow(imageDTO.image_name);
}, [getAndLoadEmbeddedWorkflow, hasTemplates, imageDTO.has_workflow, imageDTO.image_name]);
loadWorkflowWithDialog({ type: 'image', data: imageDTO.image_name });
}, [hasTemplates, imageDTO.has_workflow, imageDTO.image_name, loadWorkflowWithDialog]);
const recallSize = useCallback(() => {
if (isStaging) {
@@ -180,7 +181,7 @@ export const useImageActions = (imageDTO: ImageDTO) => {
recallSeed,
recallPrompts,
createAsPreset,
loadWorkflow,
loadWorkflow: loadWorkflowFromImage,
hasWorkflow: imageDTO.has_workflow,
recallSize,
upscale,

View File

@@ -1,10 +1,10 @@
import { Flex } from '@invoke-ai/ui-library';
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import { useFocusRegion } from 'common/hooks/focus';
import { AddNodeCmdk } from 'features/nodes/components/flow/AddNodeCmdk/AddNodeCmdk';
import TopPanel from 'features/nodes/components/flow/panels/TopPanel/TopPanel';
import WorkflowEditorSettings from 'features/nodes/components/flow/panels/TopRightPanel/WorkflowEditorSettings';
import { memo, useRef } from 'react';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiFlowArrowBold } from 'react-icons/pi';
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
@@ -13,24 +13,20 @@ import { Flow } from './flow/Flow';
import BottomLeftPanel from './flow/panels/BottomLeftPanel/BottomLeftPanel';
import MinimapPanel from './flow/panels/MinimapPanel/MinimapPanel';
const FOCUS_REGION_STYLES: SystemStyleObject = {
position: 'relative',
width: 'full',
height: 'full',
alignItems: 'center',
justifyContent: 'center',
};
const NodeEditor = () => {
const { data, isLoading } = useGetOpenAPISchemaQuery();
const { t } = useTranslation();
const ref = useRef<HTMLDivElement>(null);
useFocusRegion('workflows', ref);
return (
<Flex
tabIndex={-1}
ref={ref}
layerStyle="first"
position="relative"
width="full"
height="full"
borderRadius="base"
alignItems="center"
justifyContent="center"
>
<FocusRegionWrapper region="workflows" layerStyle="first" sx={FOCUS_REGION_STYLES}>
{data && (
<>
<Flow />
@@ -42,7 +38,7 @@ const NodeEditor = () => {
)}
<WorkflowEditorSettings />
{isLoading && <IAINoContentFallback label={t('nodes.loadingNodes')} icon={PiFlowArrowBold} />}
</Flex>
</FocusRegionWrapper>
);
};

View File

@@ -252,7 +252,7 @@ export const Flow = memo(() => {
id: 'pasteSelection',
category: 'workflows',
callback: pasteSelection,
options: { preventDefault: true },
options: { enabled: isWorkflowsFocused, preventDefault: true },
dependencies: [pasteSelection],
});
@@ -260,7 +260,7 @@ export const Flow = memo(() => {
id: 'pasteSelectionWithEdges',
category: 'workflows',
callback: pasteSelectionWithEdges,
options: { preventDefault: true },
options: { enabled: isWorkflowsFocused, preventDefault: true },
dependencies: [pasteSelectionWithEdges],
});
@@ -270,7 +270,7 @@ export const Flow = memo(() => {
callback: () => {
dispatch(undo());
},
options: { enabled: mayUndo, preventDefault: true },
options: { enabled: isWorkflowsFocused && mayUndo, preventDefault: true },
dependencies: [mayUndo],
});
@@ -280,7 +280,7 @@ export const Flow = memo(() => {
callback: () => {
dispatch(redo());
},
options: { enabled: mayRedo, preventDefault: true },
options: { enabled: isWorkflowsFocused && mayRedo, preventDefault: true },
dependencies: [mayRedo],
});

View File

@@ -3,24 +3,35 @@ import { useFloatField } from 'features/nodes/components/flow/nodes/Invocation/f
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldFloatSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
export const FloatFieldInput = memo((props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate>) => {
const { defaultValue, onChange, value, min, max, step, fineStep } = useFloatField(props);
export const FloatFieldInput = memo(
(
props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate, { settings?: NodeFieldFloatSettings }>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useFloatField(
nodeId,
field.name,
fieldTemplate,
settings
);
return (
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
/>
);
});
return (
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
/>
);
}
);
FloatFieldInput.displayName = 'FloatFieldInput ';

View File

@@ -3,18 +3,27 @@ import { useFloatField } from 'features/nodes/components/flow/nodes/Invocation/f
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldFloatSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
export const FloatFieldInputAndSlider = memo(
(props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate>) => {
const { defaultValue, onChange, value, min, max, step, fineStep } = useFloatField(props);
(
props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate, { settings?: NodeFieldFloatSettings }>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useFloatField(
nodeId,
field.name,
fieldTemplate,
settings
);
return (
<>
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={value}
value={field.value}
min={min}
max={max}
step={step}
@@ -27,7 +36,7 @@ export const FloatFieldInputAndSlider = memo(
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={value}
value={field.value}
min={min}
max={max}
step={step}

View File

@@ -3,26 +3,37 @@ import { useFloatField } from 'features/nodes/components/flow/nodes/Invocation/f
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldFloatSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
export const FloatFieldSlider = memo((props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate>) => {
const { defaultValue, onChange, value, min, max, step, fineStep } = useFloatField(props);
export const FloatFieldSlider = memo(
(
props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate, { settings?: NodeFieldFloatSettings }>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useFloatField(
nodeId,
field.name,
fieldTemplate,
settings
);
return (
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
marks
withThumbTooltip
flex="1 1 0"
/>
);
});
return (
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
marks
withThumbTooltip
flex="1 1 0"
/>
);
}
);
FloatFieldSlider.displayName = 'FloatFieldSlider ';

View File

@@ -1,65 +1,89 @@
import { NUMPY_RAND_MAX } from 'app/constants';
import { useAppDispatch } from 'app/store/storeHooks';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { roundDownToMultiple, roundUpToMultiple } from 'common/util/roundDownToMultiple';
import { fieldFloatValueChanged } from 'features/nodes/store/nodesSlice';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { FloatFieldInputTemplate } from 'features/nodes/types/field';
import { constrainNumber } from 'features/nodes/util/constrainNumber';
import { isNil } from 'lodash-es';
import { useCallback, useMemo } from 'react';
export const useFloatField = (props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate>) => {
const { nodeId, field, fieldTemplate } = props;
export const useFloatField = (
nodeId: string,
fieldName: string,
fieldTemplate: FloatFieldInputTemplate,
overrides: { min?: number; max?: number; step?: number } = {}
) => {
const { min: overrideMin, max: overrideMax, step: overrideStep } = overrides;
const dispatch = useAppDispatch();
const onChange = useCallback(
(value: number) => {
dispatch(fieldFloatValueChanged({ nodeId, fieldName: field.name, value }));
},
[dispatch, field.name, nodeId]
);
const min = useMemo(() => {
let min = -NUMPY_RAND_MAX;
if (!isNil(fieldTemplate.minimum)) {
min = fieldTemplate.minimum;
}
if (!isNil(fieldTemplate.exclusiveMinimum)) {
min = fieldTemplate.exclusiveMinimum + 0.01;
}
return min;
}, [fieldTemplate.exclusiveMinimum, fieldTemplate.minimum]);
const max = useMemo(() => {
let max = NUMPY_RAND_MAX;
if (!isNil(fieldTemplate.maximum)) {
max = fieldTemplate.maximum;
}
if (!isNil(fieldTemplate.exclusiveMaximum)) {
max = fieldTemplate.exclusiveMaximum - 0.01;
}
return max;
}, [fieldTemplate.exclusiveMaximum, fieldTemplate.maximum]);
const step = useMemo(() => {
if (overrideStep !== undefined) {
return overrideStep;
}
if (isNil(fieldTemplate.multipleOf)) {
return 0.1;
}
return fieldTemplate.multipleOf;
}, [fieldTemplate.multipleOf]);
}, [fieldTemplate.multipleOf, overrideStep]);
const fineStep = useMemo(() => {
if (overrideStep !== undefined) {
return overrideStep;
}
if (isNil(fieldTemplate.multipleOf)) {
return 0.01;
}
return fieldTemplate.multipleOf;
}, [fieldTemplate.multipleOf]);
}, [fieldTemplate.multipleOf, overrideStep]);
const min = useMemo(() => {
let min = -NUMPY_RAND_MAX;
if (overrideMin !== undefined) {
min = overrideMin;
} else if (!isNil(fieldTemplate.minimum)) {
min = fieldTemplate.minimum;
} else if (!isNil(fieldTemplate.exclusiveMinimum)) {
min = fieldTemplate.exclusiveMinimum + 0.01;
}
return roundUpToMultiple(min, step);
}, [fieldTemplate.exclusiveMinimum, fieldTemplate.minimum, overrideMin, step]);
const max = useMemo(() => {
let max = NUMPY_RAND_MAX;
if (overrideMax !== undefined) {
max = overrideMax;
} else if (!isNil(fieldTemplate.maximum)) {
max = fieldTemplate.maximum;
} else if (!isNil(fieldTemplate.exclusiveMaximum)) {
max = fieldTemplate.exclusiveMaximum - 0.01;
}
return roundDownToMultiple(max, step);
}, [fieldTemplate.exclusiveMaximum, fieldTemplate.maximum, overrideMax, step]);
const constrainValue = useCallback(
(v: number) =>
constrainNumber(v, { min, max, step: step }, { min: overrideMin, max: overrideMax, step: overrideStep }),
[max, min, overrideMax, overrideMin, overrideStep, step]
);
const onChange = useCallback(
(value: number) => {
dispatch(fieldFloatValueChanged({ nodeId, fieldName, value }));
},
[dispatch, fieldName, nodeId]
);
return {
defaultValue: fieldTemplate.default,
onChange,
value: field.value,
min,
max,
step,
fineStep,
constrainValue,
};
};

View File

@@ -95,6 +95,8 @@ import {
} from 'features/nodes/types/field';
import type { NodeFieldElement } from 'features/nodes/types/workflow';
import { memo } from 'react';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import BoardFieldInputComponent from './inputs/BoardFieldInputComponent';
import BooleanFieldInputComponent from './inputs/BooleanFieldInputComponent';
@@ -157,6 +159,8 @@ export const InputFieldRenderer = memo(({ nodeId, fieldName, settings }: Props)
return <StringFieldInput nodeId={nodeId} field={field} fieldTemplate={template} />;
} else if (settings.component === 'textarea') {
return <StringFieldTextarea nodeId={nodeId} field={field} fieldTemplate={template} />;
} else {
assert<Equals<never, typeof settings.component>>(false, 'Unexpected settings.component');
}
}
@@ -171,32 +175,47 @@ export const InputFieldRenderer = memo(({ nodeId, fieldName, settings }: Props)
if (!isIntegerFieldInputInstance(field)) {
return null;
}
if (settings?.type !== 'integer-field-config') {
if (!settings || settings.type !== 'integer-field-config') {
return <IntegerFieldInput nodeId={nodeId} field={field} fieldTemplate={template} />;
}
if (settings.component === 'number-input') {
return <IntegerFieldInput nodeId={nodeId} field={field} fieldTemplate={template} />;
} else if (settings.component === 'slider') {
return <IntegerFieldSlider nodeId={nodeId} field={field} fieldTemplate={template} />;
} else if (settings.component === 'number-input-and-slider') {
return <IntegerFieldInputAndSlider nodeId={nodeId} field={field} fieldTemplate={template} />;
return <IntegerFieldInput nodeId={nodeId} field={field} fieldTemplate={template} settings={settings} />;
}
if (settings.component === 'slider') {
return <IntegerFieldSlider nodeId={nodeId} field={field} fieldTemplate={template} settings={settings} />;
}
if (settings.component === 'number-input-and-slider') {
return <IntegerFieldInputAndSlider nodeId={nodeId} field={field} fieldTemplate={template} settings={settings} />;
}
assert<Equals<never, typeof settings.component>>(false, 'Unexpected settings.component');
}
if (isFloatFieldInputTemplate(template)) {
if (!isFloatFieldInputInstance(field)) {
return null;
}
if (settings?.type !== 'float-field-config') {
if (!settings || settings.type !== 'float-field-config') {
return <FloatFieldInput nodeId={nodeId} field={field} fieldTemplate={template} />;
}
if (settings.component === 'number-input') {
return <FloatFieldInput nodeId={nodeId} field={field} fieldTemplate={template} />;
} else if (settings.component === 'slider') {
return <FloatFieldSlider nodeId={nodeId} field={field} fieldTemplate={template} />;
} else if (settings.component === 'number-input-and-slider') {
return <FloatFieldInputAndSlider nodeId={nodeId} field={field} fieldTemplate={template} />;
return <FloatFieldInput nodeId={nodeId} field={field} fieldTemplate={template} settings={settings} />;
}
if (settings.component === 'slider') {
return <FloatFieldSlider nodeId={nodeId} field={field} fieldTemplate={template} settings={settings} />;
}
if (settings.component === 'number-input-and-slider') {
return <FloatFieldInputAndSlider nodeId={nodeId} field={field} fieldTemplate={template} settings={settings} />;
}
assert<Equals<never, typeof settings.component>>(false, 'Unexpected settings.component');
}
if (isIntegerFieldCollectionInputTemplate(template)) {

View File

@@ -1,4 +1,5 @@
import { Flex, Text } from '@invoke-ai/ui-library';
import { Flex, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
import { useInputFieldErrors } from 'features/nodes/hooks/useInputFieldErrors';
import { useInputFieldInstance } from 'features/nodes/hooks/useInputFieldInstance';
import { useInputFieldTemplate } from 'features/nodes/hooks/useInputFieldTemplate';
import { useFieldTypeName } from 'features/nodes/hooks/usePrettyFieldType';
@@ -17,6 +18,7 @@ export const InputFieldTooltipContent = memo(({ nodeId, fieldName }: Props) => {
const fieldInstance = useInputFieldInstance(nodeId, fieldName);
const fieldTemplate = useInputFieldTemplate(nodeId, fieldName);
const fieldTypeName = useFieldTypeName(fieldTemplate.type);
const fieldErrors = useInputFieldErrors(nodeId, fieldName);
const fieldTitle = useMemo(() => {
if (fieldInstance.label && fieldTemplate.title) {
@@ -42,6 +44,16 @@ export const InputFieldTooltipContent = memo(({ nodeId, fieldName }: Props) => {
<Text>
{t('common.input')}: {startCase(fieldTemplate.input)}
</Text>
{fieldErrors.length > 0 && (
<>
<Text color="error.500">{t('common.error_withCount', { count: fieldErrors.length })}:</Text>
<UnorderedList>
{fieldErrors.map(({ issue }) => (
<ListItem key={issue}>{issue}</ListItem>
))}
</UnorderedList>
</>
)}
</Flex>
);
});

Some files were not shown because too many files have changed in this diff Show More