Compare commits

..

315 Commits

Author SHA1 Message Date
psychedelicious
b1b009f7b8 chore: bump version to v6.5.1 2025-08-28 22:57:14 +10:00
psychedelicious
3431e6385c chore: uv lock 2025-08-28 22:57:14 +10:00
psychedelicious
5db1027d32 Pin sentencepiece version in pyproject.toml
Pin sentencepiece version to 0.2.0 to avoid coredump issues.
2025-08-28 22:57:14 +10:00
Hosted Weblate
579f182fe9 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-08-28 22:51:40 +10:00
Riccardo Giovanetti
55bf41f63f translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2053 of 2082 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-28 22:51:40 +10:00
psychedelicious
fc32fd2d2e fix(ui): progress image renders at physical size 2025-08-28 22:47:52 +10:00
psychedelicious
a2b6536078 fix(ui): konva caching opt-out doesn't do what i thought it would 2025-08-28 22:45:03 +10:00
Mary Hipp Rogers
144c54a6c8 Revert "video_output support"
This reverts commit 453ef1a220.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ca40daeb97 Revert "rough rough POC of video tab"
This reverts commit e89266bfe3.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e600cdc826 Revert "split out RunwayVideoOutput from VideoOutput"
This reverts commit 97719b0aab.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b7c52f33dc Revert "update VideoField"
This reverts commit bd251f8cce.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e78157fcf0 Revert "push up updates for VideoField"
This reverts commit 94ba840948.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
7d7b98249f Revert "build out adhoc video saving graph"
This reverts commit 07565d4015.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f5bf84f304 Revert "combine nodes that generate and save videos"
This reverts commit eff9c7b92f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c30d5bece2 Revert "add video models"
This reverts commit 295b5a20a8.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
27845b2f1b Revert "add noop video router"
This reverts commit e9c4e12454.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
bad6eea077 Revert "integrating video into gallery - thinking maybe a new category of image would make more senes"
This reverts commit 5c93e53195.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
9c26ac5ce3 Revert "add duration and aspect ratio to video settings"
This reverts commit 4d8bcad15b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b7306bb5c9 Revert "feat(ui): add dnd target for video start frame"
This reverts commit 530d20c1be.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0c115177b2 Revert "feat(nodes): update VideoField & VideoOutput"
This reverts commit 67de3f2d9b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5aae41b5bb Revert "chore: ruff"
This reverts commit 9380d8901c.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
7ad09a2f79 Revert "feat(ui): fiddle w/ video stuff"
This reverts commit f98bbc32dd.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5a6d3639b7 Revert "feat(ui): fiddle w/ video stuff"
This reverts commit 79e8482b27.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
84617d3df2 Revert "feat(ui): more video stuff"
This reverts commit 963c2ec60c.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e05f30749e Revert "add readiness logic to video tab"
This reverts commit 288ac0a293.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
88a2e27338 Revert "hook up starring, unstarring, and deleting single videos (no multiselect yet), adapt context menus to work for both images and videos and start on video context menu"
This reverts commit a918198d4f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
15a6fd76c8 Revert "stubbing out change board functionality"
This reverts commit 67042e6dec.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
6adb46a86c Revert "fix(ui): panel names on video tab"
This reverts commit 64dfa125d2.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e8a74eb79d Revert "feat(ui): gallery optimistic updates for video"
This reverts commit 0ec6d33086.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
dcd716c384 Revert "feat(ui): consolidated gallery (wip)"
This reverts commit 6ef1c2a5e1.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
56697635dd Revert "add Veo3 model support to backend"
This reverts commit 49d569ec59.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5b5657e292 Revert "replace runway with veo, build out veo3 model support"
This reverts commit d95a698ebd.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ad3dfbe1ed Revert "add resolution as a generation setting"
This reverts commit b71829a827.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
59ddc4f7b0 Revert "add videos to change board modal"
This reverts commit 45b4432833.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4653b79f12 Revert "Revert "feat(ui): consolidated gallery (wip)""
This reverts commit 637d19c22b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
778d6f167f Revert "gallery"
This reverts commit aa4e3adadb.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
05c71f50f1 Revert "lint the dang thing"
This reverts commit 1b0d599dc2.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
406e0be39c Revert "tsc"
This reverts commit 7828102b67.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0d71234a12 Revert "lint"
This reverts commit b377b80446.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e38019bb70 Revert "update redux selection to have a list of images and/or videos, update image viewer to show either image or video depending on what is selected"
This reverts commit 8df3067599.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
a879880b42 Revert "add runway to backend"
This reverts commit f631b5178f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
71c8accbfe Revert "add runway back as a model and allow runway and veo3 to live together in peace and harmony"
This reverts commit b2026d9c00.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
154fb99daf Revert "fix video styling"
This reverts commit 3d9889e272.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0df476ce13 Revert "feat(ui): simpler layout for video player"
This reverts commit 3a1cedbced.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e7ad830fa9 Revert "tidy(ui): remove unused VideoAtPosition component"
This reverts commit e55d39a20b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e81e0a8286 Revert "fix(ui): fetching imageDTO for video"
This reverts commit fbf8aa17c8.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
d0f7e72cbb Revert "fix(ui): missing tranlsation"
This reverts commit 89efe9c2b1.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
fdead4fb8c Revert "fix(ui): iterations works for video models"
This reverts commit 24f22d539f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
31c9945b32 Revert "fix(ui): generate tab graph builder"
This reverts commit 84dc4e4ea9.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
22de8a4b12 Revert "feat(ui): video dnd"
This reverts commit f5fdba795a.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
89cb3c3230 Revert "chore(ui): dpdm"
This reverts commit 6a7fe6668b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
7bb99ece4e Revert "chore(ui): lint"
This reverts commit 55139bb169.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
28f040123f Revert "fix(ui): locate in gallery, galleryview when selecting image/video"
This reverts commit 26fe937d97.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
1be3a4db64 Revert "fix(ui): use correct placeholder for vidoes"
This reverts commit 7e031e9c01.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
cb44c995d2 Revert "docs(ui): add note about visual jank in gallery"
This reverts commit 2d9c82da85.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
9b9b35c315 Revert "add Video as new model type"
This reverts commit fb0a924918.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f6edab6032 Revert "add UI support for new model type Video"
This reverts commit c6f2d127ef.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f79665b023 Revert "updates for new model type"
This reverts commit 23cde86bc4.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
6b1bc7a87d Revert "split out video aspect/ratio into its own components"
This reverts commit 6c375b228e.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c6f2994c84 Revert "video metadata support"
This reverts commit b16d1a943d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0cff67ff23 Revert "add video_count to boardDTO"
This reverts commit 1cc6893d0d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e957c11c9a Revert "add asset_count to BoardDTO and split it out from image count"
This reverts commit d4378d9f2a.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4baa685c7a Revert "add video_count and asset_count to boards UI"
This reverts commit e36490c2ec.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
1bd5907a12 Revert "add save frame functionality"
This reverts commit 6a20271dba.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
2fd56e6029 Revert "use context to track video ref so that toolbar can also save current frame"
This reverts commit 1bf25fadb3.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b0548edc8c Revert "lint"
This reverts commit 378f33bc92.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
41d781176f Revert "lint again"
This reverts commit 41e1697e79.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
8709de0b33 Revert "fix(ui): rebase conflict"
This reverts commit bc6dd12083.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
af43fe2fd4 Revert "feat(ui): simplify and consolidate video capture logic"
This reverts commit c5a76806c1.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ebbb11c3b1 Revert "feat(ui): add selector to get model config for current video model"
This reverts commit 5cabc37a87.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0fc8c08da3 Revert "feat(ui): use correct model config object in video graph builders"
This reverts commit 9fcba3b876.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
bfadcffe3c Revert "fix(ui): do not change canvas bbox on video model change"
This reverts commit 8eb3f40e1b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
49c2332c13 Revert "fix(ui): do not store whole model config in state"
This reverts commit b2ed3c99d4.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
dacef158c4 Revert "feat(ui): metadata recall for videos"
This reverts commit 4c32b2a123.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0c34d8201e Revert "feat(ui): delete confirmation for videos"
This reverts commit 505c75a5ab.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
77132075ff Revert "hide video features if video is disabled"
This reverts commit 0de5097207.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f008d3b0b2 Revert "add option for video upsell, rearrange navigation bar and gallery tabs"
This reverts commit 4845d31857.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4e66ccefe8 Revert "rearrange image | video | asset for boards"
This reverts commit 8a60def51f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5d0ed45326 Revert "fix view on large screens, restore auth for screen capture"
This reverts commit 1f526a1c27.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
379d633ac6 Revert "launchpad cleanup"
This reverts commit ab41f71a36.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
93bba1b692 Revert "studio init action for video tab"
This reverts commit 431fd83a43.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
667e175ab7 Revert "chore: ruff"
This reverts commit 3ae99df091.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
de146aa4aa Revert "chore(ui): lint"
This reverts commit 36c16d2781.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ed9c2c8208 Revert "fix(ui): tab hotkeys for video"
This reverts commit 20813b5615.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
9d984878f3 Revert "tweak(ui): nav bar divider not so bright"
This reverts commit 269d4fe670.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
585eb8c69d Revert "fix(ui): graph builder check for veo"
This reverts commit 239fb86a46.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c105bae127 Revert "feat(ui): add border around starting frame image"
This reverts commit 8642e8881d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c39f26266f Revert "feat(ui): tweak video settings padding"
This reverts commit 842d729ec8.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
47dffd123a Revert "fix(ui): do not highlight starting frame image in red when it is not required"
This reverts commit 0b05b24e9a.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b946ec3172 Revert "chore(ui): lint"
This reverts commit 8c2e6a3988.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
024c02329d Revert "fix(ui): metadata viewer translations"
This reverts commit 2a6cfde488.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4b43b59472 Revert "feat(ui): remove unimplemented context menu items for video"
This reverts commit a6b0581939.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
d11f115e1a Revert "fix(ui): make ctx menu download tooltip not refer to iamges"
This reverts commit e4f24c4dc4.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
92253ce854 Revert "fix(ui): make ctx menu star label not refer to iamges"
This reverts commit ec793cb636.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0ebbfa90c9 Revert "fix(ui): more video translations"
This reverts commit 0d827d8306.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
fdfee11e37 Revert "chore(ui): lint"
This reverts commit 3971382a6d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
6091bf4f60 Revert "fix(ui): hide unused queue actions menu item category"
This reverts commit 07271ca468.
2025-08-28 08:32:47 -04:00
psychedelicious
07271ca468 fix(ui): hide unused queue actions menu item category 2025-08-28 08:23:58 -04:00
psychedelicious
3971382a6d chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
0d827d8306 fix(ui): more video translations 2025-08-28 08:23:58 -04:00
psychedelicious
ec793cb636 fix(ui): make ctx menu star label not refer to iamges 2025-08-28 08:23:58 -04:00
psychedelicious
e4f24c4dc4 fix(ui): make ctx menu download tooltip not refer to iamges 2025-08-28 08:23:58 -04:00
psychedelicious
a6b0581939 feat(ui): remove unimplemented context menu items for video 2025-08-28 08:23:58 -04:00
psychedelicious
2a6cfde488 fix(ui): metadata viewer translations 2025-08-28 08:23:58 -04:00
psychedelicious
8c2e6a3988 chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
0b05b24e9a fix(ui): do not highlight starting frame image in red when it is not required 2025-08-28 08:23:58 -04:00
psychedelicious
842d729ec8 feat(ui): tweak video settings padding 2025-08-28 08:23:58 -04:00
psychedelicious
8642e8881d feat(ui): add border around starting frame image 2025-08-28 08:23:58 -04:00
psychedelicious
239fb86a46 fix(ui): graph builder check for veo 2025-08-28 08:23:58 -04:00
psychedelicious
269d4fe670 tweak(ui): nav bar divider not so bright 2025-08-28 08:23:58 -04:00
psychedelicious
20813b5615 fix(ui): tab hotkeys for video 2025-08-28 08:23:58 -04:00
psychedelicious
36c16d2781 chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
3ae99df091 chore: ruff 2025-08-28 08:23:58 -04:00
Mary Hipp
431fd83a43 studio init action for video tab 2025-08-28 08:23:58 -04:00
Mary Hipp
ab41f71a36 launchpad cleanup 2025-08-28 08:23:58 -04:00
Mary Hipp
1f526a1c27 fix view on large screens, restore auth for screen capture 2025-08-28 08:23:58 -04:00
Mary Hipp
8a60def51f rearrange image | video | asset for boards 2025-08-28 08:23:58 -04:00
Mary Hipp
4845d31857 add option for video upsell, rearrange navigation bar and gallery tabs 2025-08-28 08:23:58 -04:00
Mary Hipp
0de5097207 hide video features if video is disabled 2025-08-28 08:23:58 -04:00
psychedelicious
505c75a5ab feat(ui): delete confirmation for videos 2025-08-28 08:23:58 -04:00
psychedelicious
4c32b2a123 feat(ui): metadata recall for videos 2025-08-28 08:23:58 -04:00
psychedelicious
b2ed3c99d4 fix(ui): do not store whole model config in state 2025-08-28 08:23:58 -04:00
psychedelicious
8eb3f40e1b fix(ui): do not change canvas bbox on video model change 2025-08-28 08:23:58 -04:00
psychedelicious
9fcba3b876 feat(ui): use correct model config object in video graph builders 2025-08-28 08:23:58 -04:00
psychedelicious
5cabc37a87 feat(ui): add selector to get model config for current video model 2025-08-28 08:23:58 -04:00
psychedelicious
c5a76806c1 feat(ui): simplify and consolidate video capture logic 2025-08-28 08:23:58 -04:00
psychedelicious
bc6dd12083 fix(ui): rebase conflict 2025-08-28 08:23:58 -04:00
Mary Hipp
41e1697e79 lint again 2025-08-28 08:23:58 -04:00
Mary Hipp
378f33bc92 lint 2025-08-28 08:23:58 -04:00
Mary Hipp
1bf25fadb3 use context to track video ref so that toolbar can also save current frame 2025-08-28 08:23:58 -04:00
Mary Hipp
6a20271dba add save frame functionality 2025-08-28 08:23:58 -04:00
Mary Hipp
e36490c2ec add video_count and asset_count to boards UI 2025-08-28 08:23:58 -04:00
Mary Hipp
d4378d9f2a add asset_count to BoardDTO and split it out from image count 2025-08-28 08:23:58 -04:00
Mary Hipp
1cc6893d0d add video_count to boardDTO 2025-08-28 08:23:58 -04:00
Mary Hipp
b16d1a943d video metadata support 2025-08-28 08:23:58 -04:00
Mary Hipp
6c375b228e split out video aspect/ratio into its own components 2025-08-28 08:23:58 -04:00
Mary Hipp
23cde86bc4 updates for new model type 2025-08-28 08:23:58 -04:00
Mary Hipp
c6f2d127ef add UI support for new model type Video 2025-08-28 08:23:58 -04:00
Mary Hipp
fb0a924918 add Video as new model type 2025-08-28 08:23:58 -04:00
psychedelicious
2d9c82da85 docs(ui): add note about visual jank in gallery 2025-08-28 08:23:58 -04:00
psychedelicious
7e031e9c01 fix(ui): use correct placeholder for vidoes 2025-08-28 08:23:58 -04:00
psychedelicious
26fe937d97 fix(ui): locate in gallery, galleryview when selecting image/video 2025-08-28 08:23:58 -04:00
psychedelicious
55139bb169 chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
6a7fe6668b chore(ui): dpdm 2025-08-28 08:23:58 -04:00
psychedelicious
f5fdba795a feat(ui): video dnd 2025-08-28 08:23:58 -04:00
psychedelicious
84dc4e4ea9 fix(ui): generate tab graph builder 2025-08-28 08:23:58 -04:00
psychedelicious
24f22d539f fix(ui): iterations works for video models 2025-08-28 08:23:58 -04:00
psychedelicious
89efe9c2b1 fix(ui): missing tranlsation 2025-08-28 08:23:58 -04:00
psychedelicious
fbf8aa17c8 fix(ui): fetching imageDTO for video 2025-08-28 08:23:58 -04:00
psychedelicious
e55d39a20b tidy(ui): remove unused VideoAtPosition component 2025-08-28 08:23:58 -04:00
psychedelicious
3a1cedbced feat(ui): simpler layout for video player 2025-08-28 08:23:58 -04:00
Mary Hipp
3d9889e272 fix video styling 2025-08-28 08:23:58 -04:00
Mary Hipp
b2026d9c00 add runway back as a model and allow runway and veo3 to live together in peace and harmony 2025-08-28 08:23:58 -04:00
Mary Hipp
f631b5178f add runway to backend 2025-08-28 08:23:58 -04:00
Mary Hipp
8df3067599 update redux selection to have a list of images and/or videos, update image viewer to show either image or video depending on what is selected 2025-08-28 08:23:58 -04:00
Mary Hipp
b377b80446 lint 2025-08-28 08:23:58 -04:00
Mary Hipp
7828102b67 tsc 2025-08-28 08:23:58 -04:00
Mary Hipp
1b0d599dc2 lint the dang thing 2025-08-28 08:23:58 -04:00
psychedelicious
aa4e3adadb gallery 2025-08-28 08:23:58 -04:00
psychedelicious
637d19c22b Revert "feat(ui): consolidated gallery (wip)"
This reverts commit 12b70bca67.
2025-08-28 08:23:58 -04:00
Mary Hipp
45b4432833 add videos to change board modal 2025-08-28 08:23:58 -04:00
Mary Hipp
b71829a827 add resolution as a generation setting 2025-08-28 08:23:58 -04:00
Mary Hipp
d95a698ebd replace runway with veo, build out veo3 model support 2025-08-28 08:23:58 -04:00
Mary Hipp
49d569ec59 add Veo3 model support to backend 2025-08-28 08:23:58 -04:00
psychedelicious
6ef1c2a5e1 feat(ui): consolidated gallery (wip) 2025-08-28 08:23:58 -04:00
psychedelicious
0ec6d33086 feat(ui): gallery optimistic updates for video 2025-08-28 08:23:58 -04:00
psychedelicious
64dfa125d2 fix(ui): panel names on video tab 2025-08-28 08:23:58 -04:00
Mary Hipp
67042e6dec stubbing out change board functionality 2025-08-28 08:23:58 -04:00
Mary Hipp
a918198d4f hook up starring, unstarring, and deleting single videos (no multiselect yet), adapt context menus to work for both images and videos and start on video context menu 2025-08-28 08:23:58 -04:00
Mary Hipp
288ac0a293 add readiness logic to video tab 2025-08-28 08:23:58 -04:00
psychedelicious
963c2ec60c feat(ui): more video stuff 2025-08-28 08:23:58 -04:00
psychedelicious
79e8482b27 feat(ui): fiddle w/ video stuff 2025-08-28 08:23:58 -04:00
psychedelicious
f98bbc32dd feat(ui): fiddle w/ video stuff 2025-08-28 08:23:58 -04:00
psychedelicious
9380d8901c chore: ruff 2025-08-28 08:23:58 -04:00
psychedelicious
67de3f2d9b feat(nodes): update VideoField & VideoOutput 2025-08-28 08:23:58 -04:00
psychedelicious
530d20c1be feat(ui): add dnd target for video start frame 2025-08-28 08:23:58 -04:00
Mary Hipp
4d8bcad15b add duration and aspect ratio to video settings 2025-08-28 08:23:58 -04:00
Mary Hipp
5c93e53195 integrating video into gallery - thinking maybe a new category of image would make more senes 2025-08-28 08:23:58 -04:00
Mary Hipp
e9c4e12454 add noop video router 2025-08-28 08:23:58 -04:00
Mary Hipp
295b5a20a8 add video models 2025-08-28 08:23:58 -04:00
Mary Hipp
eff9c7b92f combine nodes that generate and save videos 2025-08-28 08:23:58 -04:00
Mary Hipp
07565d4015 build out adhoc video saving graph 2025-08-28 08:23:58 -04:00
Mary Hipp
94ba840948 push up updates for VideoField 2025-08-28 08:23:58 -04:00
Mary Hipp
bd251f8cce update VideoField 2025-08-28 08:23:58 -04:00
Mary Hipp
97719b0aab split out RunwayVideoOutput from VideoOutput 2025-08-28 08:23:58 -04:00
Mary Hipp
e89266bfe3 rough rough POC of video tab 2025-08-28 08:23:58 -04:00
Mary Hipp
453ef1a220 video_output support 2025-08-28 08:23:58 -04:00
psychedelicious
faf8f0f291 chore: bump version to v6.5.0 2025-08-28 13:32:37 +10:00
psychedelicious
5d36499982 chore: update whatsnew 2025-08-28 13:32:37 +10:00
Linos
151d67a0cc translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2082 of 2082 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-08-28 13:02:16 +10:00
Riccardo Giovanetti
72431ff197 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2053 of 2082 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-28 13:02:16 +10:00
psychedelicious
0de1feed76 chore(ui): lint 2025-08-28 12:59:35 +10:00
psychedelicious
7ffb626dbe feat(ui): add image load errors to logging 2025-08-28 12:59:35 +10:00
psychedelicious
79753289b1 feat(ui): log image failed to load errors at error level 2025-08-28 12:59:35 +10:00
psychedelicious
bac4c05fd9 feat(ui): log "destroying module" at debug level 2025-08-28 12:59:35 +10:00
psychedelicious
8a3b5d2c6f fix(ui): do not cache canvas entities when they have no w/h 2025-08-28 12:59:35 +10:00
psychedelicious
309578c19a fix(ui): progress image gets stuck on viewer when generating on canvas 2025-08-28 12:55:36 +10:00
Mary Hipp
fd58e1d0f2 update copy for API models without w/h controls 2025-08-27 09:24:22 -04:00
psychedelicious
04ffb979ce fix(ui): deny the pull of the square 2025-08-27 08:56:15 -04:00
psychedelicious
35c00d5a83 chore(ui): lint 2025-08-27 08:56:15 -04:00
psychedelicious
c2b49d58f5 fix(ui): gemini 2.5 unsupported gen mode error message 2025-08-27 08:56:15 -04:00
psychedelicious
6ff6b40a35 feat(ui): support unknown output image dimensions on canvas
Gemini 2.5 Flash makes no guarantees about output image sizes. Our
existing logic always rendered staged images on Canvas at the bbox dims
- not the image's physical dimensions. When Gemini returns an image that
doesn't match the bbox, it would get squished.

To rectify this, the canvas staging area renderer is updated to render
its images using their physical dimensions, as opposed to their
configured dimensions (i.e. bbox).

A flag on CanvasObjectImage enables this rendering behaviour.

Then, when saving the image as a layer from staging area, we use the
physical dimensions.

When the bbox and physical dimensions do not match, the bbox is not
touched, so it won't exactly encompass the staged image. No point in
resizing the bbox if the dimensions don't match - the next image could
be a different size, and the sizes might not be valid (it's an external
resource, after all).
2025-08-27 08:56:15 -04:00
psychedelicious
1f1beda567 fix(ui): remove gemini aspect ratio checking in graph builder 2025-08-27 08:56:15 -04:00
psychedelicious
91d62eb242 fix(ui): update ref image type when switching to gemini 2025-08-27 08:56:15 -04:00
psychedelicious
013e02d08b feat(ui): show w/h, scaled bbox settings only when relevant 2025-08-27 08:56:15 -04:00
psychedelicious
115053972c feat(ui): handle api model determination in a clearer way w/ list of base models; use it in dimensions component 2025-08-27 08:56:15 -04:00
psychedelicious
bcab754ac2 docs(ui): add note about reactflow types 2025-08-27 08:56:15 -04:00
psychedelicious
f1a542aca2 docs(ui): add note about extraneous coordiantes in paramsSlice 2025-08-27 08:56:15 -04:00
psychedelicious
0701cc63a1 feat(ui): hide width/height sliders for api models
These models only support aspect ratio inputs; not pixel dimensions
2025-08-27 08:56:15 -04:00
psychedelicious
9337710b45 chore(ui): lint 2025-08-27 08:56:15 -04:00
psychedelicious
592ef5a9ee feat(ui): improved support model handling when switching models
- Disable LoRAs instead of deleting them when base model changes
- Update toast message to indicate that we may have _updated_ a model
(prev just sayed cleared or disabled)
- Do not change ref image models if the new base model doesn't support
them. For example, changing from SDXL to Imagen does not update the ref
image model or alert the user, because Imagen does not support ref
images. Switching from Imagen to FLUX does update the ref image model
and alert the user. Just a bit less noisy.
2025-08-27 08:56:15 -04:00
psychedelicious
5fe39a3ae9 fix(ui): add gemini 2.5 to ref image supporting models 2025-08-27 08:56:15 -04:00
psychedelicious
1888c586ca feat(ui): do not prevent invoking when ref images are added but model does not support ref images 2025-08-27 08:56:15 -04:00
psychedelicious
88922a467e feat(ui): hide ref images UI when selected models does not support ref images 2025-08-27 08:56:15 -04:00
psychedelicious
84115e598c fix(ui): lock height slider when using api model 2025-08-27 08:56:15 -04:00
Mary Hipp
370fc67777 UI support for gemini 2.5 API model 2025-08-27 08:56:15 -04:00
Mary Hipp
fa810e1d02 add gemini 2.5 to base model 2025-08-27 08:56:15 -04:00
Attila Cseh
ec5043aa83 useNodeFieldElementExists turned private 2025-08-26 11:39:16 +10:00
Attila Cseh
9a2a0cef74 node field dnd logic updatedto prevent duplicates 2025-08-26 11:39:16 +10:00
Attila Cseh
c205c1d19e current board removed from options 2025-08-26 11:33:39 +10:00
Attila Cseh
ae1a815453 change board - sorting order of boards alphabetical 2025-08-26 11:33:39 +10:00
psychedelicious
687bc281e5 chore: prep for v6.5.0rc1 (#8479)
## Summary

Bump version

## Related Issues / Discussions

n/a

## QA Instructions

n/a

## Merge Plan

This is already released.

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2025-08-26 11:25:01 +10:00
psychedelicious
567316d753 chore: bump version to v6.5.0rc1 2025-08-25 18:10:18 +10:00
psychedelicious
53ac7c9d2c feat(ui): bbox aspect ratio lock is always inverted by shift 2025-08-25 17:59:20 +10:00
Riccardo Giovanetti
90be2a0cdf translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2050 of 2079 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-25 17:57:54 +10:00
Attila Cseh
c7fb8f69ae code review fixes 2025-08-25 17:53:59 +10:00
Attila Cseh
7fecb8e88b formatting fixed 2025-08-25 17:53:59 +10:00
Attila Cseh
ee6a2a6603 respect direction of selection in Gallery 2025-08-25 17:53:59 +10:00
Attila Cseh
2496ac19c4 remove input field from form 2025-08-25 16:33:09 +10:00
psychedelicious
e34ed199c9 feat(ui): respect aspect ratio when resizing bbox on canvas 2025-08-25 15:30:01 +10:00
psychedelicious
569533ef80 fix(ui): toggle bbox visiblity translation 2025-08-25 14:51:34 +10:00
psychedelicious
dfac73f9f0 fix(ui): disable color picker while middle-mouse panning canvas 2025-08-25 14:47:42 +10:00
psychedelicious
f4219d5db3 chore: uv lock 2025-08-23 14:17:56 +10:00
psychedelicious
04d1958e93 feat(app): vendor in invisible-watermark
Fixes errors like `AttributeError: module 'cv2.ximgproc' has no
attribute 'thinning'` which occur because there is a conflict between
our own `opencv-contrib-python` dependency and the `invisible-watermark`
library's `opencv-python`.
2025-08-23 14:17:56 +10:00
psychedelicious
47d7d93e78 fix(ui): float input precision
Determine the "base" step for floats. If no `multipleOf` is provided,
the "base" step is `undefined`, meaning the float can have any number of
decimal places.

The UI library does its own step constrains though and is rounding to 3
decimal places. Probably need to update the logic in the UI library to
have truly arbitrary precision for float fields.
2025-08-22 13:35:59 +10:00
psychedelicious
0e17950949 fix(ui): race condition when setting hf token and downloading model
I ran into a race condition where I set a HF token and it was valid, but
somehow this error toast still appeared. The conditional feel through to
an assertion that we never expected to get to, which crashed the UI.

Handled the unexpected case gracefully now.
2025-08-22 13:30:38 +10:00
psychedelicious
b0cfdc94b5 feat(ui): do not sample alpha in Canvas color picker
Closes #7897
2025-08-21 21:38:03 +10:00
psychedelicious
bb153b55d3 docs: update quick start 2025-08-21 21:26:09 +10:00
psychedelicious
93ef637d59 docs: update latest release links 2025-08-21 21:26:09 +10:00
Attila Cseh
c5689ca1a7 code review changes 2025-08-21 19:42:38 +10:00
Attila Cseh
008e421ad4 shuffle button on workflows 2025-08-21 19:42:38 +10:00
psychedelicious
28a77ab06c Revert "experiment: add non-lfs-tracked file to lfs-tracked dir"
This reverts commit 4f4b7ddfb0.
2025-08-21 15:49:20 +10:00
psychedelicious
be48d3c12d ci: give workflow perms to label/comment on pr 2025-08-21 15:49:20 +10:00
psychedelicious
518b21a49a experiment: add non-lfs-tracked file to lfs-tracked dir 2025-08-21 15:49:20 +10:00
psychedelicious
68825ca9eb ci: add workflow to catch incorrect usage of git-lfs 2025-08-21 15:49:20 +10:00
psychedelicious
73c5f0b479 chore: bump version to v6.4.0 2025-08-19 12:19:02 +10:00
psychedelicious
7b4e04cd7c git: move test LoRA to LFS 2025-08-19 11:56:59 +10:00
Linos
ae4368fabe translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2073 of 2073 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-08-19 10:28:35 +10:00
psychedelicious
df8e39a9e1 chore: bump version to v6.4.0rc2 2025-08-19 00:01:48 +10:00
psychedelicious
45b43de571 fix(ui): prevent node drag when editing title
Closes #8435
2025-08-18 23:20:28 +10:00
psychedelicious
6d18a72a05 fix(ui): fit to bbox when bbox is not aligned to 64px grid 2025-08-18 23:17:45 +10:00
Kent Keirsey
af58a75e97 Support PEFT Loras with Base_Model.model prefix (#8433)
* Support PEFT Loras with Base_Model.model prefix

* update tests

* ruff

* fix python complaints

* update kes

* format keys

* remove unneeded test
2025-08-18 09:14:46 -04:00
psychedelicious
fd4c3bd27a refactor: estimate working vae memory during encode/decode
- Move the estimation logic to utility functions
- Estimate memory _within_ the encode and decode methods, ensuring we
_always_ estimate working memory when running a VAE
2025-08-18 21:43:14 +10:00
psychedelicious
1f8a60ded2 fix(ui): export NumericalParameterConfig type 2025-08-18 21:38:17 +10:00
psychedelicious
b1b677997d chore: bump version to v6.4.0rc1 2025-08-18 21:34:09 +10:00
psychedelicious
f17b43d736 chore(ui): update whatsnew 2025-08-18 21:34:09 +10:00
psychedelicious
c009a50489 feat(ui): reduce storage persist debounce to 300ms
matches pre-server-backed-state-persistence value
2025-08-18 21:34:09 +10:00
psychedelicious
97a16c455c fix(ui): update board totals when generation completes 2025-08-18 21:34:09 +10:00
psychedelicious
a8a07598c8 chore: ruff 2025-08-18 21:14:00 +10:00
psychedelicious
23206e22e8 tests: skip excessively flaky MPS-specific tests in CI 2025-08-18 21:14:00 +10:00
psychedelicious
f4aba52b90 feat(ui): use flushSync for locateInGallery to ensure panel api calls finish before selecting image 2025-08-18 19:55:06 +10:00
psychedelicious
d17c273939 feat(ui): add locate in gallery button to current image buttons toolbar 2025-08-18 19:55:06 +10:00
psychedelicious
aeb5e7d50a feat(ui): hide locate in gallery from context when unable to actually locate
e.g. when on a tab that doesn't have a gallery, or the image is
intermediate
2025-08-18 19:55:06 +10:00
psychedelicious
580ad30832 feat(ui): use bold icon for locate in gallery 2025-08-18 19:55:06 +10:00
psychedelicious
6390f7d734 fix(ui): more reliable scrollIntoView/"Locate in Gallery"
Three changes needed to make scrollIntoView and "Locate in Gallery" work
reliably.

1. Use setTimeout to work around race condition with scrollIntoView in
gallery.

It was possible to call scrollIntoView before react-virtuoso was ready.
I think react-virtuoso was initialized but hadn't rendered/measured its
items yet, so when we scroll to e.g. index 742, the items have a zero
height, so it doesn't actually scroll down. Then the items render.

Setting a timeout here defers the scroll until after the next event loop
cycle, by which time we expect react-virutoso to be ready.

2. Ensure the scollIntoView effect in gallery triggers any time the
selection is touched by making its dependency the array of selected
images, not just the last selected image name.

The "locate in gallery" functionality works by selecting an image.
There's a reactive effect in the gallery that runs when the last
selected image changes and scrolls it into view.

But if you already have an image selected, selecting it again will not
change the image name bc it is a string primitive. The useEffect ignores
the selection.

So, if you clicked "locate in gallery" on an image that was already
selected, it wouldn't be scrolled into view - even if you had already
scrolled away from it.

To work around this, the effect now uses the whole selection array as
its dependency. Whenever the selection changes, we get a new array,
which triggers the effect.

3. Gallery slice had some checks to avoid creating a new array of
selected image names in state when the selected images didn't change.

For example, if image "abc" was selected, and we selected "abc" again,
instead of creating a new array with the same "abc" image, we bailed
early. IIRC this optimization addressed a rerender issue long ago.

This optimization needs to be removed in order for fix #2 above to work.
We now _want_ a new array whenever selection is set - even if it didn't
actually change.
2025-08-18 19:55:06 +10:00
psychedelicious
5ddbfefb6a feat(ui): add trace logging to scrollIntoView 2025-08-18 19:55:06 +10:00
psychedelicious
bbf5ed7956 fix(ui): use is_intermediate to determine if image is gallery image 2025-08-18 19:55:06 +10:00
Attila Cseh
19cd6eed08 locate in gallery image context menu 2025-08-18 19:55:06 +10:00
Attila Cseh
9c1eb263a8 new entity added above the currently selected one 2025-08-18 18:46:40 +10:00
Attila Cseh
75755189a7 prettier fixes 2025-08-18 18:46:40 +10:00
Attila Cseh
a9ab72d27d new layers created on the top of the existing layers 2025-08-18 18:46:40 +10:00
Attila Cseh
678eb34995 duplicate layer appear above original one 2025-08-18 18:46:40 +10:00
Attila Cseh
ef7050f560 merged layers order retained 2025-08-18 18:46:40 +10:00
Attila Cseh
9787d9de74 prettier fix 2025-08-18 18:30:08 +10:00
Attila Cseh
bb4a50bab2 confirmation before downloading starter bundle 2025-08-18 18:30:08 +10:00
Attila Cseh
f3554b4e1b prettier fixed 2025-08-14 21:10:21 +10:00
Attila Cseh
9dcb025241 build error fixed 2025-08-14 21:10:21 +10:00
Attila Cseh
ecf646066a CLIP skip value clamped 2025-08-14 21:10:21 +10:00
Attila Cseh
3fd10b68cd recall CLIP skip 2025-08-14 21:10:21 +10:00
Attila Cseh
6e32c7993c CLIP Skip zod schema created 2025-08-14 21:10:21 +10:00
Riccardo Giovanetti
8329533848 translationBot(ui): update translation (Italian)
Currently translated at 98.5% (2041 of 2071 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (2039 of 2067 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-14 12:14:27 +10:00
psychedelicious
fc7157b029 fix(ui): do not add pos style prompt to metadata 2025-08-14 10:56:24 +10:00
psychedelicious
a1897f7490 chore(ui): lint 2025-08-14 10:56:24 +10:00
psychedelicious
a89b3efd14 feat(ui): remove SDXL style prompt from linear UI
This feature added a lot of unexpected complexity in graph building /
metadata recall and is unintuitive user experience. 99% of the time, the
style prompt should be exactly the main prompt.

You can still use style prompts in workflows, but in an effort to reduce
complexity in the linear UI, we are removing this rarely-used feature.
2025-08-14 10:56:24 +10:00
jiangmencity
5259693ed1 chore: fix some comments
Signed-off-by: jiangmencity <jiangmen@52it.net>
2025-08-14 09:32:54 +10:00
Tikal
d77c24206d Update NODES.md 2025-08-14 09:18:47 +10:00
psychedelicious
c5069557f3 fix(mm): fail when model exists at path instead of finding unused new path
When installing a model, the previous, graceful logic would increment a
suffix on the destination path until found a free path for the model.

But because model file installation and record creation are not in a
transaction, we could end up moving the file successfully and fail to
create the record:
- User attempts to install an already-installed model
- Attempt to move the downloaded model from download tempdir to
destination path
- The path already exists
- Add `_1` or similar to the path until we find a path that is free
- Move the model
- Create the model record
- FK constraint violation bc we already have a model w/ that name, but
the model file has already been moved into the invokeai dir.

Closes #8416
2025-08-13 10:40:06 +10:00
psychedelicious
9b220f61bd translations(ui): add translation for gallery settings 2025-08-12 23:34:24 +10:00
psychedelicious
7fc3af12cc translations(ui): add translation for select your model in launchpad 2025-08-12 23:34:24 +10:00
psychedelicious
e2721b46b6 translations(ui): add atranslations for add/remove negative promtp 2025-08-12 23:34:24 +10:00
psychedelicious
17118a04bd feat(ui): dynamic dockview tab title translations
Requires a ui slice migration and reset of users's layout settings to
get the right titles into dockview params state, which is persisted.
2025-08-12 23:34:24 +10:00
psychedelicious
24788e3c83 fix(ui): input field error styling specificity 2025-08-12 23:30:34 +10:00
psychedelicious
056387c981 feat(ui): allow recall of prompt and seed on upscaling tab 2025-08-12 16:21:51 +10:00
psychedelicious
8a43d90273 fix(ui): positive prompt in upscale metadata 2025-08-12 16:21:51 +10:00
psychedelicious
4f9b9760db feat(ui): debounce persistence instead of throttle 2025-08-12 16:16:11 +10:00
psychedelicious
fdaddafa56 fix(mm): only add suffix to model paths when path is file 2025-08-12 15:31:43 +10:00
psychedelicious
23d59abbd7 chore: ruff 2025-08-12 10:51:05 +10:00
psychedelicious
cf7fa5bce8 perf(backend): clear torch cache after encoding each image in kontext extension
Slightly reduces VRAM allocations.
2025-08-12 10:51:05 +10:00
psychedelicious
39e41998bb feat(ui): use latent-space kontext ref image concat in flux graph
Prevents a large spike in VRAM when preparing to denoise w/ multiple ref
images.

There doesn't appear to be any different in image quality / ref
adherence when concatenating in latent space vs image space, though
images _are_ different.
2025-08-12 10:51:05 +10:00
psychedelicious
c6eff71b74 fix(backend): bug in kontext canvas dimension tracking when concating in latent space
We weren't tracking the canvas dimensions properly which coudl result in
FLUX not "seeing" ref images after the first very well
2025-08-12 10:51:05 +10:00
psychedelicious
6ea4c47757 chore: ruff 2025-08-12 10:51:05 +10:00
psychedelicious
91f91aa835 feat(mm): prepare kontext latents before loading transformer
If the transformer fills up VRAM, then when we VAE encode kontext
latents, we'll need to first offload the transformer (partially, if
partial loading is enabled).

No need to do this - we can encode kontext latents before loading the
transformer to reduce model thrashing.
2025-08-12 10:51:05 +10:00
psychedelicious
ea7868d076 Revert "experiment(mm): investigate vae working memory calculations"
This reverts commit bc9ed57d5cd134dc7c9117395e91d22a3c4aa6de.
2025-08-12 10:51:05 +10:00
psychedelicious
7d86f00d82 feat(mm): implement working memory estimation for VAE encode for all models
Tell the model manager that we need some extra working memory for VAE
encoding operations to prevent OOMs.

See previous commit for investigation and determination of the magic
numbers used.

This safety measure is especially relevant now that we have FLUX Kontext
and may be encoding rather large ref images. Without the working memory
estimation we can OOM as we prepare for denoising.

See #8405 for an example of this issue on a very low VRAM system. It's
possible we can have the same issue on any GPU, though - just a matter
of hitting the right combination of models loaded.
2025-08-12 10:51:05 +10:00
psychedelicious
7785061e7d experiment(mm): investigate vae working memory calculations
This commit includes a task delegated to Claude to investigate our VAE
working memory calculations and investigation results.

See VAE_INVESTIGATION.md for motivation and detail. Everything else is
its output.

Result data includes empirical measurements for all supported model
architectures at a variety of resolutions and fp16/fp32 precision.
Testing conducted on a 4090.

The summarized conclusion is that our working memory estimations for
decoding are spot-on, but decoding also needs some extra working memory.
Empirical measurements suggest ~45% the amount needed for encoding.

A followup commit will implement working memory estimations for VAE
encoding with the goal of preventing unexpected OOMs during encode.
2025-08-12 10:51:05 +10:00
psychedelicious
3370052e54 fix(ui): restore deduping logic in node field element selectors
This is required for some publishing functionality
2025-08-11 22:50:05 +10:00
Attila Cseh
325dacd29c same field cannot be added to form multiple times in workflow editor 2025-08-11 22:50:05 +10:00
psychedelicious
f4981a6ba9 tidy(ui): minor cleanup 2025-08-11 22:37:46 +10:00
Attila Cseh
8c159942eb add to form icon included 2025-08-11 22:37:46 +10:00
Attila Cseh
deb4dc64af error nodes outlined in red 2025-08-11 22:37:46 +10:00
psychedelicious
1a11437b6f feat(ui): add hidden bbox hotkey to alert
If you accidentally hit the hotkey and hide the bbox it could be
difficult to figure out how to un-hide it without the hotkey called out
in the alert.
2025-08-11 22:30:45 +10:00
Attila Cseh
04572c94ad setting bbox visibility moved into render method 2025-08-11 22:30:45 +10:00
Attila Cseh
1e9e78089e Add toggle for bbox with hotkey 2025-08-11 22:30:45 +10:00
Heathen711
e65f93663d bugfix(container-builder) Use the mnt space instead of root space for docker images 2025-08-06 12:36:07 -04:00
psychedelicious
2a796fe25e chore: bump version to v6.3.0 2025-08-05 10:35:22 +10:00
174 changed files with 5007 additions and 2755 deletions

View File

@@ -45,6 +45,9 @@ jobs:
steps:
- name: Free up more disk space on the runner
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
# the /mnt dir has 70GBs of free space
# /dev/sda1 74G 28K 70G 1% /mnt
# According to some online posts the /mnt is not always there, so checking before setting docker to use it
run: |
echo "----- Free space before cleanup"
df -h
@@ -52,6 +55,11 @@ jobs:
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
sudo swapoff /mnt/swapfile
sudo rm -rf /mnt/swapfile
if [ -d /mnt ]; then
sudo chmod -R 777 /mnt
echo '{"data-root": "/mnt/docker-root"}' | sudo tee /etc/docker/daemon.json
sudo systemctl restart docker
fi
echo "----- Free space after cleanup"
df -h

30
.github/workflows/lfs-checks.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
# Checks that large files and LFS-tracked files are properly checked in with pointer format.
# Uses https://github.com/ppremk/lfs-warning to detect LFS issues.
name: 'lfs checks'
on:
push:
branches:
- 'main'
pull_request:
types:
- 'ready_for_review'
- 'opened'
- 'synchronize'
merge_group:
workflow_dispatch:
jobs:
lfs-check:
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
# Required to label and comment on the PRs
pull-requests: write
steps:
- name: checkout
uses: actions/checkout@v4
- name: check lfs files
uses: ppremk/lfs-warning@v3.3

View File

@@ -265,7 +265,7 @@ If the key is unrecognized, this call raises an
#### exists(key) -> AnyModelConfig
Returns True if a model with the given key exists in the databsae.
Returns True if a model with the given key exists in the database.
#### search_by_path(path) -> AnyModelConfig
@@ -718,7 +718,7 @@ When downloading remote models is implemented, additional
configuration information, such as list of trigger terms, will be
retrieved from the HuggingFace and Civitai model repositories.
The probed values can be overriden by providing a dictionary in the
The probed values can be overridden by providing a dictionary in the
optional `config` argument passed to `import_model()`. You may provide
overriding values for any of the model's configuration
attributes. Here is an example of setting the
@@ -841,7 +841,7 @@ variable.
#### installer.start(invoker)
The `start` method is called by the API intialization routines when
The `start` method is called by the API initialization routines when
the API starts up. Its effect is to call `sync_to_config()` to
synchronize the model record store database with what's currently on
disk.

View File

@@ -16,7 +16,7 @@ We thank [all contributors](https://github.com/invoke-ai/InvokeAI/graphs/contrib
- @psychedelicious (Spencer Mabrito) - Web Team Leader
- @joshistoast (Josh Corbett) - Web Development
- @cheerio (Mary Rogers) - Lead Engineer & Web App Development
- @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
- @ebr (Eugene Brodsky) - Cloud/DevOps/Software engineer; your friendly neighbourhood cluster-autoscaler
- @sunija - Standalone version
- @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
- @ryanjdick (Ryan Dick) - Machine Learning & Training

View File

@@ -33,30 +33,45 @@ Hardware requirements vary significantly depending on model and image output siz
More detail on system requirements can be found [here](./requirements.md).
## Step 2: Download
## Step 2: Download and Set Up the Launcher
Download the most recent launcher for your operating system:
The Launcher manages your Invoke install. Follow these instructions to download and set up the Launcher.
- [Download for Windows](https://download.invoke.ai/Invoke%20Community%20Edition.exe)
- [Download for macOS](https://download.invoke.ai/Invoke%20Community%20Edition.dmg)
- [Download for Linux](https://download.invoke.ai/Invoke%20Community%20Edition.AppImage)
!!! info "Instructions for each OS"
## Step 3: Install or Update
=== "Windows"
Run the launcher you just downloaded, click **Install** and follow the instructions to get set up.
- [Download for Windows](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe)
- Run the `EXE` to install the Launcher and start it.
- A desktop shortcut will be created; use this to run the Launcher in the future.
- You can delete the `EXE` file you downloaded.
=== "macOS"
- [Download for macOS](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg)
- Open the `DMG` and drag the app into `Applications`.
- Run the Launcher using its entry in `Applications`.
- You can delete the `DMG` file you downloaded.
=== "Linux"
- [Download for Linux](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage)
- You may need to edit the `AppImage` file properties and make it executable.
- Optionally move the file to a location that does not require admin privileges and add a desktop shortcut for it.
- Run the Launcher by double-clicking the `AppImage` or the shortcut you made.
## Step 3: Install Invoke
Run the Launcher you just set up if you haven't already. Click **Install** and follow the instructions to install (or update) Invoke.
If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation.
!!! warning "Problem running the launcher on macOS"
!!! tip "Updating"
macOS may not allow you to run the launcher. We are working to resolve this by signing the launcher executable. Until that is done, you can manually flag the launcher as safe:
The Launcher will check for updates for itself _and_ Invoke.
- Open the **Invoke Community Edition.dmg** file.
- Drag the launcher to **Applications**.
- Open a terminal.
- Run `xattr -d 'com.apple.quarantine' /Applications/Invoke\ Community\ Edition.app`.
You should now be able to run the launcher.
- When the Launcher detects an update is available for itself, you'll get a small popup window. Click through this and the Launcher will update itself.
- When the Launcher detects an update for Invoke, you'll see a small green alert in the Launcher. Click that and follow the instructions to update Invoke.
## Step 4: Launch

View File

@@ -41,7 +41,7 @@ Nodes have a "Use Cache" option in their footer. This allows for performance imp
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
### Noise
### Create Latent Noise
An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input.

View File

@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
# TODO(ryand): This is effectively a copy of SD3ImageToLatentsInvocation and a subset of ImageToLatentsInvocation. We
# should refactor to avoid this duplication.
@@ -38,7 +39,11 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
@staticmethod
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
with vae_info as vae:
assert isinstance(vae_info.model, AutoencoderKL)
estimated_working_memory = estimate_vae_working_memory_cogview4(
operation="encode", image_tensor=image_tensor, vae=vae_info.model
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoencoderKL)
vae.disable_tiling()
@@ -62,6 +67,8 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, AutoencoderKL)
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")

View File

@@ -6,7 +6,6 @@ from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
# TODO(ryand): This is effectively a copy of SD3LatentsToImageInvocation and a subset of LatentsToImageInvocation. We
# should refactor to avoid this duplication.
@@ -39,22 +39,15 @@ class CogView4LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
return int(working_memory)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL))
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
estimated_working_memory = estimate_vae_working_memory_cogview4(
operation="decode", image_tensor=latents, vae=vae_info.model
)
with (
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),

View File

@@ -64,6 +64,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
Imagen3Model = "Imagen3ModelField"
Imagen4Model = "Imagen4ModelField"
ChatGPT4oModel = "ChatGPT4oModelField"
Gemini2_5Model = "Gemini2_5ModelField"
FluxKontextModel = "FluxKontextModelField"
# endregion

View File

@@ -328,6 +328,21 @@ class FluxDenoiseInvocation(BaseInvocation):
cfg_scale_end_step=self.cfg_scale_end_step,
)
kontext_extension = None
if self.kontext_conditioning:
if not self.controlnet_vae:
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
kontext_extension = KontextExtension(
context=context,
kontext_conditioning=self.kontext_conditioning
if isinstance(self.kontext_conditioning, list)
else [self.kontext_conditioning],
vae_field=self.controlnet_vae,
device=TorchDevice.choose_torch_device(),
dtype=inference_dtype,
)
with ExitStack() as exit_stack:
# Prepare ControlNet extensions.
# Note: We do this before loading the transformer model to minimize peak memory (see implementation).
@@ -385,21 +400,6 @@ class FluxDenoiseInvocation(BaseInvocation):
dtype=inference_dtype,
)
kontext_extension = None
if self.kontext_conditioning:
if not self.controlnet_vae:
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
kontext_extension = KontextExtension(
context=context,
kontext_conditioning=self.kontext_conditioning
if isinstance(self.kontext_conditioning, list)
else [self.kontext_conditioning],
vae_field=self.controlnet_vae,
device=TorchDevice.choose_torch_device(),
dtype=inference_dtype,
)
# Prepare Kontext conditioning if provided
img_cond_seq = None
img_cond_seq_ids = None

View File

@@ -3,7 +3,6 @@ from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -18,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
@invocation(
@@ -39,17 +39,11 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
input=Input.Connection,
)
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
"""Estimate the working memory required by the invocation in bytes."""
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
return int(working_memory)
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
assert isinstance(vae_info.model, AutoEncoder)
estimated_working_memory = estimate_vae_working_memory_flux(
operation="decode", image_tensor=latents, vae=vae_info.model
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoEncoder)
vae_dtype = next(iter(vae.parameters())).dtype

View File

@@ -15,6 +15,7 @@ from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
@invocation(
@@ -41,8 +42,12 @@ class FluxVaeEncodeInvocation(BaseInvocation):
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
# should be used for VAE encode sampling.
assert isinstance(vae_info.model, AutoEncoder)
estimated_working_memory = estimate_vae_working_memory_flux(
operation="encode", image_tensor=image_tensor, vae=vae_info.model
)
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
with vae_info as vae:
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoEncoder)
vae_dtype = next(iter(vae.parameters())).dtype
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)

View File

@@ -27,6 +27,7 @@ from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
@invocation(
@@ -52,11 +53,24 @@ class ImageToLatentsInvocation(BaseInvocation):
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
@staticmethod
@classmethod
def vae_encode(
vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor, tile_size: int = 0
cls,
vae_info: LoadedModel,
upcast: bool,
tiled: bool,
image_tensor: torch.Tensor,
tile_size: int = 0,
) -> torch.Tensor:
with vae_info as vae:
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
operation="encode",
image_tensor=image_tensor,
vae=vae_info.model,
tile_size=tile_size if tiled else None,
fp32=upcast,
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
orig_dtype = vae.dtype
if upcast:
@@ -113,6 +127,7 @@ class ImageToLatentsInvocation(BaseInvocation):
image = context.images.get_pil(self.image.image_name)
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:
@@ -120,7 +135,11 @@ class ImageToLatentsInvocation(BaseInvocation):
context.util.signal_progress("Running VAE encoder")
latents = self.vae_encode(
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
vae_info=vae_info,
upcast=self.fp32,
tiled=self.tiled or context.config.get().force_tiled_decode,
image_tensor=image_tensor,
tile_size=self.tile_size,
)
latents = latents.to("cpu")

View File

@@ -27,6 +27,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
@invocation(
@@ -53,39 +54,6 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
def _estimate_working_memory(
self, latents: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision). This estimate is accurate for both SD1 and SDXL.
element_size = 4 if self.fp32 else 2
scaling_constant = 2200 # Determined experimentally.
if use_tiling:
tile_size = self.tile_size
if tile_size == 0:
tile_size = vae.tile_sample_min_size
assert isinstance(tile_size, int)
out_h = tile_size
out_w = tile_size
working_memory = out_h * out_w * element_size * scaling_constant
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
# and number of tiles. We could make this more precise in the future, but this should be good enough for
# most use cases.
working_memory = working_memory * 1.25
else:
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
working_memory = out_h * out_w * element_size * scaling_constant
if self.fp32:
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
working_memory += 250 * 2**20
return int(working_memory)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
@@ -94,8 +62,13 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
estimated_working_memory = self._estimate_working_memory(latents, use_tiling, vae_info.model)
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
operation="decode",
image_tensor=latents,
vae=vae_info.model,
tile_size=self.tile_size if use_tiling else None,
fp32=self.fp32,
)
with (
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),

View File

@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
@invocation(
@@ -34,7 +35,11 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
@staticmethod
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
with vae_info as vae:
assert isinstance(vae_info.model, AutoencoderKL)
estimated_working_memory = estimate_vae_working_memory_sd3(
operation="encode", image_tensor=image_tensor, vae=vae_info.model
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoencoderKL)
vae.disable_tiling()
@@ -58,6 +63,8 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, AutoencoderKL)
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")

View File

@@ -6,7 +6,6 @@ from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
@invocation(
@@ -41,22 +41,15 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
input=Input.Connection,
)
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
return int(working_memory)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL))
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
estimated_working_memory = estimate_vae_working_memory_sd3(
operation="decode", image_tensor=latents, vae=vae_info.model
)
with (
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),

View File

@@ -186,8 +186,9 @@ class ModelInstallService(ModelInstallServiceBase):
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
if preferred_name := config.name:
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
preferred_name = f"{preferred_name}{model_path.suffix}"
if Path(model_path).is_file():
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
preferred_name = f"{preferred_name}{model_path.suffix}"
dest_path = (
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
@@ -622,16 +623,13 @@ class ModelInstallService(ModelInstallServiceBase):
if old_path == new_path:
return old_path
if new_path.exists():
raise FileExistsError(f"Cannot move {old_path} to {new_path}: destination already exists")
new_path.parent.mkdir(parents=True, exist_ok=True)
# if path already exists then we jigger the name to make it unique
counter: int = 1
while new_path.exists():
path = new_path.with_stem(new_path.stem + f"_{counter:02d}")
if not path.exists():
new_path = path
counter += 1
move(old_path, new_path)
return new_path
def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None):

View File

@@ -106,8 +106,8 @@ class KontextExtension:
# Track cumulative dimensions for spatial tiling
# These track the running extent of the virtual canvas in latent space
h = 0 # Running height extent
w = 0 # Running width extent
canvas_h = 0 # Running canvas height
canvas_w = 0 # Running canvas width
vae_info = self._context.models.load(self._vae_field.vae)
@@ -131,12 +131,20 @@ class KontextExtension:
# Continue with VAE encoding
# Don't sample from the distribution for reference images - use the mean (matching ComfyUI)
with vae_info as vae:
# Estimate working memory for encode operation (50% of decode memory requirements)
img_h = image_tensor.shape[-2]
img_w = image_tensor.shape[-1]
element_size = next(vae_info.model.parameters()).element_size()
scaling_constant = 1100 # 50% of decode scaling constant (2200)
estimated_working_memory = int(img_h * img_w * element_size * scaling_constant)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoEncoder)
vae_dtype = next(iter(vae.parameters())).dtype
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
# Use sample=False to get the distribution mean without noise
kontext_latents_unpacked = vae.encode(image_tensor, sample=False)
TorchDevice.empty_cache()
# Extract tensor dimensions
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
@@ -154,21 +162,33 @@ class KontextExtension:
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
# Determine spatial offsets for this reference image
# - Compare the potential new canvas dimensions if we add the image vertically vs horizontally
# - Choose the placement that results in a more square-like canvas
h_offset = 0
w_offset = 0
if idx > 0: # First image starts at (0, 0)
# Check which placement would result in better canvas dimensions
# If adding to height would make the canvas taller than wide, tile horizontally
# Otherwise, tile vertically
if latent_height + h > latent_width + w:
# Calculate potential canvas dimensions for each tiling option
# Option 1: Tile vertically (below existing content)
potential_h_vertical = canvas_h + latent_height
# Option 2: Tile horizontally (to the right of existing content)
potential_w_horizontal = canvas_w + latent_width
# Choose arrangement that minimizes the maximum dimension
# This keeps the canvas closer to square, optimizing attention computation
if potential_h_vertical > potential_w_horizontal:
# Tile horizontally (to the right of existing images)
w_offset = w
w_offset = canvas_w
canvas_w = canvas_w + latent_width
canvas_h = max(canvas_h, latent_height)
else:
# Tile vertically (below existing images)
h_offset = h
h_offset = canvas_h
canvas_h = canvas_h + latent_height
canvas_w = max(canvas_w, latent_width)
else:
# First image - just set canvas dimensions
canvas_h = latent_height
canvas_w = latent_width
# Generate IDs with both index offset and spatial offsets
kontext_ids = generate_img_ids_with_offset(
@@ -182,11 +202,6 @@ class KontextExtension:
w_offset=w_offset,
)
# Update cumulative dimensions
# Track the maximum extent of the virtual canvas after placing this image
h = max(h, latent_height + h_offset)
w = max(w, latent_width + w_offset)
all_latents.append(kontext_latents_packed)
all_ids.append(kontext_ids)

View File

@@ -0,0 +1,304 @@
# This file is vendored from https://github.com/ShieldMnt/invisible-watermark
#
# `invisible-watermark` is MIT licensed as of August 23, 2025, when the code was copied into this repo.
#
# Why we vendored it in:
# `invisible-watermark` has a dependency on `opencv-python`, which conflicts with Invoke's dependency on
# `opencv-contrib-python`. It's easier to copy the code over than complicate the installation process by
# requiring an extra post-install step of removing `opencv-python` and installing `opencv-contrib-python`.
import struct
import uuid
import base64
import cv2
import numpy as np
import pywt
class WatermarkEncoder(object):
def __init__(self, content=b""):
seq = np.array([n for n in content], dtype=np.uint8)
self._watermarks = list(np.unpackbits(seq))
self._wmLen = len(self._watermarks)
self._wmType = "bytes"
def set_by_ipv4(self, addr):
bits = []
ips = addr.split(".")
for ip in ips:
bits += list(np.unpackbits(np.array([ip % 255], dtype=np.uint8)))
self._watermarks = bits
self._wmLen = len(self._watermarks)
self._wmType = "ipv4"
assert self._wmLen == 32
def set_by_uuid(self, uid):
u = uuid.UUID(uid)
self._wmType = "uuid"
seq = np.array([n for n in u.bytes], dtype=np.uint8)
self._watermarks = list(np.unpackbits(seq))
self._wmLen = len(self._watermarks)
def set_by_bytes(self, content):
self._wmType = "bytes"
seq = np.array([n for n in content], dtype=np.uint8)
self._watermarks = list(np.unpackbits(seq))
self._wmLen = len(self._watermarks)
def set_by_b16(self, b16):
content = base64.b16decode(b16)
self.set_by_bytes(content)
self._wmType = "b16"
def set_by_bits(self, bits=[]):
self._watermarks = [int(bit) % 2 for bit in bits]
self._wmLen = len(self._watermarks)
self._wmType = "bits"
def set_watermark(self, wmType="bytes", content=""):
if wmType == "ipv4":
self.set_by_ipv4(content)
elif wmType == "uuid":
self.set_by_uuid(content)
elif wmType == "bits":
self.set_by_bits(content)
elif wmType == "bytes":
self.set_by_bytes(content)
elif wmType == "b16":
self.set_by_b16(content)
else:
raise NameError("%s is not supported" % wmType)
def get_length(self):
return self._wmLen
# @classmethod
# def loadModel(cls):
# RivaWatermark.loadModel()
def encode(self, cv2Image, method="dwtDct", **configs):
(r, c, channels) = cv2Image.shape
if r * c < 256 * 256:
raise RuntimeError("image too small, should be larger than 256x256")
if method == "dwtDct":
embed = EmbedMaxDct(self._watermarks, wmLen=self._wmLen, **configs)
return embed.encode(cv2Image)
# elif method == 'dwtDctSvd':
# embed = EmbedDwtDctSvd(self._watermarks, wmLen=self._wmLen, **configs)
# return embed.encode(cv2Image)
# elif method == 'rivaGan':
# embed = RivaWatermark(self._watermarks, self._wmLen)
# return embed.encode(cv2Image)
else:
raise NameError("%s is not supported" % method)
class WatermarkDecoder(object):
def __init__(self, wm_type="bytes", length=0):
self._wmType = wm_type
if wm_type == "ipv4":
self._wmLen = 32
elif wm_type == "uuid":
self._wmLen = 128
elif wm_type == "bytes":
self._wmLen = length
elif wm_type == "bits":
self._wmLen = length
elif wm_type == "b16":
self._wmLen = length
else:
raise NameError("%s is unsupported" % wm_type)
def reconstruct_ipv4(self, bits):
ips = [str(ip) for ip in list(np.packbits(bits))]
return ".".join(ips)
def reconstruct_uuid(self, bits):
nums = np.packbits(bits)
bstr = b""
for i in range(16):
bstr += struct.pack(">B", nums[i])
return str(uuid.UUID(bytes=bstr))
def reconstruct_bits(self, bits):
# return ''.join([str(b) for b in bits])
return bits
def reconstruct_b16(self, bits):
bstr = self.reconstruct_bytes(bits)
return base64.b16encode(bstr)
def reconstruct_bytes(self, bits):
nums = np.packbits(bits)
bstr = b""
for i in range(self._wmLen // 8):
bstr += struct.pack(">B", nums[i])
return bstr
def reconstruct(self, bits):
if len(bits) != self._wmLen:
raise RuntimeError("bits are not matched with watermark length")
if self._wmType == "ipv4":
return self.reconstruct_ipv4(bits)
elif self._wmType == "uuid":
return self.reconstruct_uuid(bits)
elif self._wmType == "bits":
return self.reconstruct_bits(bits)
elif self._wmType == "b16":
return self.reconstruct_b16(bits)
else:
return self.reconstruct_bytes(bits)
def decode(self, cv2Image, method="dwtDct", **configs):
(r, c, channels) = cv2Image.shape
if r * c < 256 * 256:
raise RuntimeError("image too small, should be larger than 256x256")
bits = []
if method == "dwtDct":
embed = EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
bits = embed.decode(cv2Image)
# elif method == 'dwtDctSvd':
# embed = EmbedDwtDctSvd(watermarks=[], wmLen=self._wmLen, **configs)
# bits = embed.decode(cv2Image)
# elif method == 'rivaGan':
# embed = RivaWatermark(watermarks=[], wmLen=self._wmLen, **configs)
# bits = embed.decode(cv2Image)
else:
raise NameError("%s is not supported" % method)
return self.reconstruct(bits)
# @classmethod
# def loadModel(cls):
# RivaWatermark.loadModel()
class EmbedMaxDct(object):
def __init__(self, watermarks=[], wmLen=8, scales=[0, 36, 36], block=4):
self._watermarks = watermarks
self._wmLen = wmLen
self._scales = scales
self._block = block
def encode(self, bgr):
(row, col, channels) = bgr.shape
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
for channel in range(2):
if self._scales[channel] <= 0:
continue
ca1, (h1, v1, d1) = pywt.dwt2(yuv[: row // 4 * 4, : col // 4 * 4, channel], "haar")
self.encode_frame(ca1, self._scales[channel])
yuv[: row // 4 * 4, : col // 4 * 4, channel] = pywt.idwt2((ca1, (v1, h1, d1)), "haar")
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
return bgr_encoded
def decode(self, bgr):
(row, col, channels) = bgr.shape
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
scores = [[] for i in range(self._wmLen)]
for channel in range(2):
if self._scales[channel] <= 0:
continue
ca1, (h1, v1, d1) = pywt.dwt2(yuv[: row // 4 * 4, : col // 4 * 4, channel], "haar")
scores = self.decode_frame(ca1, self._scales[channel], scores)
avgScores = list(map(lambda l: np.array(l).mean(), scores))
bits = np.array(avgScores) * 255 > 127
return bits
def decode_frame(self, frame, scale, scores):
(row, col) = frame.shape
num = 0
for i in range(row // self._block):
for j in range(col // self._block):
block = frame[
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
]
score = self.infer_dct_matrix(block, scale)
# score = self.infer_dct_svd(block, scale)
wmBit = num % self._wmLen
scores[wmBit].append(score)
num = num + 1
return scores
def diffuse_dct_svd(self, block, wmBit, scale):
u, s, v = np.linalg.svd(cv2.dct(block))
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
def infer_dct_svd(self, block, scale):
u, s, v = np.linalg.svd(cv2.dct(block))
score = 0
score = int((s[0] % scale) > scale * 0.5)
return score
if score >= 0.5:
return 1.0
else:
return 0.0
def diffuse_dct_matrix(self, block, wmBit, scale):
pos = np.argmax(abs(block.flatten()[1:])) + 1
i, j = pos // self._block, pos % self._block
val = block[i][j]
if val >= 0.0:
block[i][j] = (val // scale + 0.25 + 0.5 * wmBit) * scale
else:
val = abs(val)
block[i][j] = -1.0 * (val // scale + 0.25 + 0.5 * wmBit) * scale
return block
def infer_dct_matrix(self, block, scale):
pos = np.argmax(abs(block.flatten()[1:])) + 1
i, j = pos // self._block, pos % self._block
val = block[i][j]
if val < 0:
val = abs(val)
if (val % scale) > 0.5 * scale:
return 1
else:
return 0
def encode_frame(self, frame, scale):
"""
frame is a matrix (M, N)
we get K (watermark bits size) blocks (self._block x self._block)
For i-th block, we encode watermark[i] bit into it
"""
(row, col) = frame.shape
num = 0
for i in range(row // self._block):
for j in range(col // self._block):
block = frame[
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
]
wmBit = self._watermarks[(num % self._wmLen)]
diffusedBlock = self.diffuse_dct_matrix(block, wmBit, scale)
# diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
frame[
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
] = diffusedBlock
num = num + 1

View File

@@ -6,13 +6,10 @@ configuration variable, that allows the watermarking to be supressed.
import cv2
import numpy as np
from imwatermark import WatermarkEncoder
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.app.services.config.config_default import get_config
config = get_config()
from invokeai.backend.image_util.imwatermark.vendor import WatermarkEncoder
class InvisibleWatermark:

View File

@@ -28,6 +28,7 @@ class BaseModelType(str, Enum):
CogView4 = "cogview4"
Imagen3 = "imagen3"
Imagen4 = "imagen4"
Gemini2_5 = "gemini-2.5"
ChatGPT4o = "chatgpt-4o"
FluxKontext = "flux-kontext"

View File

@@ -18,16 +18,25 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
# First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format).
all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys())
# Next, check that this is likely a FLUX model by spot-checking a few keys.
expected_keys = [
# Check if keys use transformer prefix
transformer_prefix_keys = [
"transformer.single_transformer_blocks.0.attn.to_q.lora_A.weight",
"transformer.single_transformer_blocks.0.attn.to_q.lora_B.weight",
"transformer.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
"transformer.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
]
all_expected_keys_present = all(k in state_dict for k in expected_keys)
transformer_keys_present = all(k in state_dict for k in transformer_prefix_keys)
return all_keys_in_peft_format and all_expected_keys_present
# Check if keys use base_model.model prefix
base_model_prefix_keys = [
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_A.weight",
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_B.weight",
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
]
base_model_keys_present = all(k in state_dict for k in base_model_prefix_keys)
return all_keys_in_peft_format and (transformer_keys_present or base_model_keys_present)
def lora_model_from_flux_diffusers_state_dict(
@@ -49,8 +58,16 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
"""
# Remove the "transformer." prefix from all keys.
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
# Determine which prefix is used and remove it from all keys.
# Check if any key starts with "base_model.model." prefix
has_base_model_prefix = any(k.startswith("base_model.model.") for k in grouped_state_dict.keys())
if has_base_model_prefix:
# Remove the "base_model.model." prefix from all keys.
grouped_state_dict = {k.replace("base_model.model.", ""): v for k, v in grouped_state_dict.items()}
else:
# Remove the "transformer." prefix from all keys.
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
# Constants for FLUX.1
num_double_layers = 19

View File

@@ -20,7 +20,7 @@ def main():
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
)
with log_time("Intialize FLUX transformer on meta device"):
with log_time("Initialize FLUX transformer on meta device"):
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
p = params["flux-schnell"]

View File

@@ -33,7 +33,7 @@ def main():
)
# inference_dtype = torch.bfloat16
with log_time("Intialize FLUX transformer on meta device"):
with log_time("Initialize FLUX transformer on meta device"):
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
p = params["flux-schnell"]

View File

@@ -27,7 +27,7 @@ def main():
"""
model_path = Path("/data/misc/text_encoder_2")
with log_time("Intialize T5 on meta device"):
with log_time("Initialize T5 on meta device"):
model_config = AutoConfig.from_pretrained(model_path)
with accelerate.init_empty_weights():
model = AutoModelForTextEncoding.from_config(model_config)

View File

@@ -0,0 +1,117 @@
from typing import Literal
import torch
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
def estimate_vae_working_memory_sd15_sdxl(
operation: Literal["encode", "decode"],
image_tensor: torch.Tensor,
vae: AutoencoderKL | AutoencoderTiny,
tile_size: int | None,
fp32: bool,
) -> int:
"""Estimate the working memory required to encode or decode the given tensor."""
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision). This estimate is accurate for both SD1 and SDXL.
element_size = 4 if fp32 else 2
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
if tile_size is not None:
if tile_size == 0:
tile_size = vae.tile_sample_min_size
assert isinstance(tile_size, int)
h = tile_size
w = tile_size
working_memory = h * w * element_size * scaling_constant
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
# and number of tiles. We could make this more precise in the future, but this should be good enough for
# most use cases.
working_memory = working_memory * 1.25
else:
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
working_memory = h * w * element_size * scaling_constant
if fp32:
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
working_memory += 250 * 2**20
print(f"estimate_vae_working_memory_sd15_sdxl: {int(working_memory)}")
return int(working_memory)
def estimate_vae_working_memory_cogview4(
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
working_memory = h * w * element_size * scaling_constant
print(f"estimate_vae_working_memory_cogview4: {int(working_memory)}")
return int(working_memory)
def estimate_vae_working_memory_flux(
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoEncoder
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
out_h = latent_scale_factor_for_operation * image_tensor.shape[-2]
out_w = latent_scale_factor_for_operation * image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
working_memory = out_h * out_w * element_size * scaling_constant
print(f"estimate_vae_working_memory_flux: {int(working_memory)}")
return int(working_memory)
def estimate_vae_working_memory_sd3(
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# Encode operations use approximately 50% of the memory required for decode operations
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
working_memory = h * w * element_size * scaling_constant
print(f"estimate_vae_working_memory_sd3: {int(working_memory)}")
return int(working_memory)

View File

@@ -38,6 +38,7 @@
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
"hideBoards": "Hide Boards",
"loading": "Loading...",
"locateInGalery": "Locate in Gallery",
"menuItemAutoAdd": "Auto-add to this Board",
"move": "Move",
"movingImagesToBoard_one": "Moving {{count}} image to board:",
@@ -114,6 +115,9 @@
"t2iAdapter": "T2I Adapter",
"positivePrompt": "Positive Prompt",
"negativePrompt": "Negative Prompt",
"removeNegativePrompt": "Remove Negative Prompt",
"addNegativePrompt": "Add Negative Prompt",
"selectYourModel": "Select Your Model",
"discordLabel": "Discord",
"dontAskMeAgain": "Don't ask me again",
"dontShowMeThese": "Don't show me these",
@@ -618,6 +622,10 @@
"title": "Fit Bbox To Masks",
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
},
"toggleBbox": {
"title": "Toggle Bbox Visibility",
"desc": "Hide or show the generation bounding box"
},
"applySegmentAnything": {
"title": "Apply Segment Anything",
"desc": "Apply the current Segment Anything mask.",
@@ -767,6 +775,7 @@
"allPrompts": "All Prompts",
"cfgScale": "CFG scale",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
"clipSkip": "$t(parameters.clipSkip)",
"createdBy": "Created By",
"generationMode": "Generation Mode",
"guidance": "Guidance",
@@ -869,6 +878,9 @@
"install": "Install",
"installAll": "Install All",
"installRepo": "Install Repo",
"installBundle": "Install Bundle",
"installBundleMsg1": "Are you sure you want to install the {{bundleName}} bundle?",
"installBundleMsg2": "This bundle will install the following {{count}} models:",
"ipAdapters": "IP Adapters",
"learnMoreAboutSupportedModels": "Learn more about the models we support",
"load": "Load",
@@ -1287,6 +1299,7 @@
"remixImage": "Remix Image",
"usePrompt": "Use Prompt",
"useSeed": "Use Seed",
"useClipSkip": "Use CLIP Skip",
"width": "Width",
"gaussianBlur": "Gaussian Blur",
"boxBlur": "Box Blur",
@@ -1368,8 +1381,8 @@
"addedToBoard": "Added to board {{name}}'s assets",
"addedToUncategorized": "Added to board $t(boards.uncategorized)'s assets",
"baseModelChanged": "Base Model Changed",
"baseModelChangedCleared_one": "Cleared or disabled {{count}} incompatible submodel",
"baseModelChangedCleared_other": "Cleared or disabled {{count}} incompatible submodels",
"baseModelChangedCleared_one": "Updated, cleared or disabled {{count}} incompatible submodel",
"baseModelChangedCleared_other": "Updated, cleared or disabled {{count}} incompatible submodels",
"canceled": "Processing Canceled",
"connected": "Connected to Server",
"imageCopied": "Image Copied",
@@ -1937,8 +1950,11 @@
"zoomToNode": "Zoom to Node",
"nodeFieldTooltip": "To add a node field, click the small plus sign button on the field in the Workflow Editor, or drag the field by its name into the form.",
"addToForm": "Add to Form",
"removeFromForm": "Remove from Form",
"label": "Label",
"showDescription": "Show Description",
"showShuffle": "Show Shuffle",
"shuffle": "Shuffle",
"component": "Component",
"numberInput": "Number Input",
"singleLine": "Single Line",
@@ -2180,7 +2196,8 @@
"rgReferenceImagesNotSupported": "regional Reference Images not supported for selected base model",
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
"rgNoRegion": "no region drawn",
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill"
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill",
"bboxHidden": "Bounding box is hidden (shift+o to toggle)"
},
"errors": {
"unableToFindImage": "Unable to find image",
@@ -2672,8 +2689,8 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"Studio state is saved to the server, allowing you to continue your work on any device.",
"Support for multiple reference images for FLUX Kontext (local model only)."
"Canvas: Color Picker does not sample alpha, bbox respects aspect ratio lock when resizing shuffle button for number fields in Workflow Builder, hide pixel dimension sliders when using a model that doesn't support them",
"Workflows: Add a Shuffle button to number input fields"
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -128,7 +128,10 @@
"search": "Cerca",
"clear": "Cancella",
"compactView": "Vista compatta",
"fullView": "Vista completa"
"fullView": "Vista completa",
"removeNegativePrompt": "Rimuovi prompt negativo",
"addNegativePrompt": "Aggiungi prompt negativo",
"selectYourModel": "Seleziona il modello"
},
"gallery": {
"galleryImageSize": "Dimensione dell'immagine",
@@ -410,6 +413,14 @@
"cancelSegmentAnything": {
"title": "Annulla Segment Anything",
"desc": "Annulla l'operazione Segment Anything corrente."
},
"fitBboxToLayers": {
"title": "Adatta il riquadro di delimitazione ai livelli",
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo ai livelli visibili"
},
"toggleBbox": {
"title": "Attiva/disattiva la visibilità del riquadro di delimitazione",
"desc": "Nascondi o mostra il riquadro di delimitazione della generazione"
}
},
"workflows": {
@@ -711,7 +722,10 @@
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
"browseAll": "Oppure scopri tutti i modelli disponibili:"
},
"launchpadTab": "Rampa di lancio"
"launchpadTab": "Rampa di lancio",
"installBundle": "Installa pacchetto",
"installBundleMsg1": "Vuoi davvero installare il pacchetto {{bundleName}}?",
"installBundleMsg2": "Questo pacchetto installerà i seguenti {{count}} modelli:"
},
"parameters": {
"images": "Immagini",
@@ -798,7 +812,6 @@
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con FLUX Kontext tramite BFL API",
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
"promptExpansionPending": "Espansione del prompt in corso"
},
@@ -828,7 +841,8 @@
"coherenceMinDenoise": "Min rid. rumore",
"recallMetadata": "Richiama i metadati",
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)",
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Visita le <LinkComponent>impostazioni account</LinkComponent> per effettuare l'upgrade."
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Visita le <LinkComponent>impostazioni account</LinkComponent> per effettuare l'upgrade.",
"useClipSkip": "Usa CLIP Skip"
},
"settings": {
"models": "Modelli",
@@ -881,8 +895,8 @@
"parameterSet": "Parametro richiamato",
"parameterNotSet": "Parametro non richiamato",
"problemCopyingImage": "Impossibile copiare l'immagine",
"baseModelChangedCleared_one": "Cancellato o disabilitato {{count}} sottomodello incompatibile",
"baseModelChangedCleared_many": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
"baseModelChangedCleared_one": "Aggiornato, cancellato o disabilitato {{count}} sottomodello incompatibile",
"baseModelChangedCleared_many": "Aggiornati, cancellati o disabilitati {{count}} sottomodelli incompatibili",
"baseModelChangedCleared_other": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
"loadedWithWarnings": "Flusso di lavoro caricato con avvisi",
"imageUploaded": "Immagine caricata",
@@ -1227,7 +1241,8 @@
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
"uncategorizedImages": "Immagini non categorizzate",
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate.",
"locateInGalery": "Trova nella Galleria"
},
"queue": {
"queueFront": "Aggiungi all'inizio della coda",
@@ -1974,7 +1989,10 @@
"publishInProgress": "Pubblicazione in corso",
"selectingOutputNode": "Selezione del nodo di uscita",
"selectingOutputNodeDesc": "Fare clic su un nodo per selezionarlo come nodo di uscita del flusso di lavoro.",
"errorWorkflowHasUnpublishableNodes": "Il flusso di lavoro ha nodi di estrazione lotto, generatore o metadati"
"errorWorkflowHasUnpublishableNodes": "Il flusso di lavoro ha nodi di estrazione lotto, generatore o metadati",
"showShuffle": "Mostra Mescola",
"shuffle": "Mescola",
"removeFromForm": "Rimuovi dal modulo"
},
"loadMore": "Carica altro",
"searchPlaceholder": "Cerca per nome, descrizione o etichetta",
@@ -2455,7 +2473,8 @@
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato",
"fluxFillIncompatibleWithControlLoRA": "Il controllo LoRA non è compatibile con FLUX Fill"
"fluxFillIncompatibleWithControlLoRA": "Il controllo LoRA non è compatibile con FLUX Fill",
"bboxHidden": "Il riquadro di delimitazione è nascosto (Shift+o per attivarlo)"
},
"pasteTo": "Incolla su",
"pasteToBboxDesc": "Nuovo livello (nel riquadro di delimitazione)",
@@ -2685,8 +2704,8 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"Lo stato dello studio viene salvato sul server, consentendoti di continuare a lavorare su qualsiasi dispositivo.",
"Supporto per più immagini di riferimento per FLUX Kontext (solo modello locale)."
"Tela: Color Picker non campiona l'alfa, il riquadro di delimitazione rispetta il blocco delle proporzioni quando si ridimensiona il pulsante Mescola per i campi numerici nel generatore di flusso di lavoro, nasconde i cursori delle dimensioni dei pixel quando si utilizza un modello che non li supporta",
"Flussi di lavoro: aggiunto un pulsante Mescola ai campi di input numerici"
]
},
"system": {

View File

@@ -755,7 +755,6 @@
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
"promptExpansionPending": "プロンプト拡張が進行中",
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
},

View File

@@ -55,7 +55,8 @@
"assetsWithCount_other": "{{count}} tài nguyên",
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại.",
"locateInGalery": "Vị Trí Ở Thư Viện Ảnh"
},
"gallery": {
"swapImages": "Đổi Hình Ảnh",
@@ -252,7 +253,10 @@
"clear": "Dọn Dẹp",
"compactView": "Chế Độ Xem Gọn",
"fullView": "Chế Độ Xem Đầy Đủ",
"options_withCount_other": "{{count}} thiết lập"
"options_withCount_other": "{{count}} thiết lập",
"removeNegativePrompt": "Xóa Lệnh Tiêu Cực",
"addNegativePrompt": "Thêm Lệnh Tiêu Cực",
"selectYourModel": "Chọn Model"
},
"prompt": {
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
@@ -492,6 +496,14 @@
"title": "Huỷ Segment Anything",
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
"key": "esc"
},
"fitBboxToLayers": {
"title": "Xếp Vừa Hộp Giới Hạn Vào Layer",
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào layer nhìn thấy được"
},
"toggleBbox": {
"title": "Bật/Tắt Hiển Thị Hộp Giới Hạn",
"desc": "Ẩn hoặc hiện hộp giới hạn tạo sinh"
}
},
"workflows": {
@@ -865,7 +877,10 @@
"stableDiffusion15": "Stable Diffusion 1.5",
"sdxl": "SDXL",
"fluxDev": "FLUX.1 dev"
}
},
"installBundle": "Tải Xuống Gói",
"installBundleMsg1": "Bạn có chắc chắn muốn tải xuống gói {{bundleName}}?",
"installBundleMsg2": "Gói này sẽ tải xuống {{count}} model sau đây:"
},
"metadata": {
"guidance": "Hướng Dẫn",
@@ -898,7 +913,8 @@
"recallParameters": "Gợi Nhớ Tham Số",
"scheduler": "Scheduler",
"noMetaData": "Không tìm thấy metadata",
"imageDimensions": "Kích Thước Ảnh"
"imageDimensions": "Kích Thước Ảnh",
"clipSkip": "$t(parameters.clipSkip)"
},
"accordions": {
"generation": {
@@ -1641,7 +1657,6 @@
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với LUX Kontext thông qua BFL API",
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
},
@@ -1707,7 +1722,8 @@
"upscaling": "Upscale",
"tileSize": "Kích Thước Khối",
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp."
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp.",
"useClipSkip": "Dùng CLIP Skip"
},
"dynamicPrompts": {
"seedBehaviour": {
@@ -2198,7 +2214,8 @@
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
"rgNoRegion": "không có khu vực được vẽ",
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill"
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill",
"bboxHidden": "Hộp giới hạn đang ẩn (shift+o để bật/tắt)"
},
"pasteTo": "Dán Vào",
"pasteToAssets": "Tài Nguyên",
@@ -2622,7 +2639,10 @@
"publishingValidationRunInProgress": "Quá trình kiểm tra tính hợp lệ đang diễn ra.",
"selectingOutputNodeDesc": "Bấm vào node để biến nó thành node đầu ra của workflow.",
"selectingOutputNode": "Chọn node đầu ra",
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata"
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata",
"removeFromForm": "Xóa Khỏi Vùng Nhập",
"showShuffle": "Hiện Xáo Trộn",
"shuffle": "Xáo Trộn"
},
"yourWorkflows": "Workflow Của Bạn",
"browseWorkflows": "Khám Phá Workflow",
@@ -2679,8 +2699,8 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"Trạng thái Studio được lưu vào server, giúp bạn tiếp tục công việc ở mọi thiết bị.",
"Hỗ trợ nhiều ảnh mẫu cho FLUX KONTEXT (chỉ cho model trên máy)."
"Misc QoL: Bật/Tắt hiển thị hộp giới hạn, đánh dấu node bị lỗi, chặn lỗi thêm node vào vùng nhập nhiều lần, khả năng đọc lại metadata của CLIP Skip",
"Giảm lượng tiêu thụ VRAM cho các ảnh mẫu Kontext và mã hóa VAE"
]
},
"upsell": {

View File

@@ -71,7 +71,7 @@ interface Props extends PropsWithChildren {
* If provided, overrides in-app navigation to the model manager
*/
onClickGoToModelManager?: () => void;
storagePersistThrottle?: number;
storagePersistDebounce?: number;
}
const InvokeAIUI = ({
@@ -98,7 +98,7 @@ const InvokeAIUI = ({
loggingOverrides,
onClickGoToModelManager,
whatsNew,
storagePersistThrottle = 2000,
storagePersistDebounce = 300,
}: Props) => {
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
const [didRehydrate, setDidRehydrate] = useState(false);
@@ -318,7 +318,7 @@ const InvokeAIUI = ({
const onRehydrated = () => {
setDidRehydrate(true);
};
const store = createStore({ persist: true, persistThrottle: storagePersistThrottle, onRehydrated });
const store = createStore({ persist: true, persistDebounce: storagePersistDebounce, onRehydrated });
setStore(store);
$store.set(store);
if (import.meta.env.MODE === 'development') {
@@ -333,7 +333,7 @@ const InvokeAIUI = ({
window.$store = undefined;
}
};
}, [storagePersistThrottle]);
}, [storagePersistDebounce]);
if (!store || !didRehydrate) {
return <Loading />;

View File

@@ -2,7 +2,7 @@ import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/store';
import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
import { loraIsEnabledChanged } from 'features/controlLayers/store/lorasSlice';
import { modelChanged, syncedToOptimalDimension, vaeSelected } from 'features/controlLayers/store/paramsSlice';
import { refImageModelChanged, selectReferenceImageEntities } from 'features/controlLayers/store/refImagesSlice';
import {
@@ -12,6 +12,7 @@ import {
} from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { modelSelected } from 'features/parameters/store/actions';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
@@ -22,6 +23,7 @@ import {
isFluxKontextApiModelConfig,
isFluxKontextModelConfig,
isFluxReduxModelConfig,
isGemini2_5ModelConfig,
} from 'services/api/types';
const log = logger('models');
@@ -44,13 +46,13 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
if (didBaseModelChange) {
// we may need to reset some incompatible submodels
let modelsCleared = 0;
let modelsUpdatedDisabledOrCleared = 0;
// handle incompatible loras
state.loras.loras.forEach((lora) => {
if (lora.model.base !== newBase) {
dispatch(loraDeleted({ id: lora.id }));
modelsCleared += 1;
dispatch(loraIsEnabledChanged({ id: lora.id, isEnabled: false }));
modelsUpdatedDisabledOrCleared += 1;
}
});
@@ -58,52 +60,57 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
const { vae } = state.params;
if (vae && vae.base !== newBase) {
dispatch(vaeSelected(null));
modelsCleared += 1;
modelsUpdatedDisabledOrCleared += 1;
}
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
// to choose the best available model based on the new main model.
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
if (SUPPORTS_REF_IMAGES_BASE_MODELS.includes(newModel.base)) {
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
// to choose the best available model based on the new main model.
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
let newGlobalRefImageModel = null;
let newGlobalRefImageModel = null;
// Certain models require the ref image model to be the same as the main model - others just need a matching
// base. Helper to grab the first exact match or the first available model if no exact match is found.
const exactMatchOrFirst = <T extends AnyModelConfig>(candidates: T[]): T | null =>
candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
// Certain models require the ref image model to be the same as the main model - others just need a matching
// base. Helper to grab the first exact match or the first available model if no exact match is found.
const exactMatchOrFirst = <T extends AnyModelConfig>(candidates: T[]): T | null =>
candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
} else if (newModel.base === 'chatgpt-4o') {
const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
} else if (newModel.base === 'flux-kontext') {
const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
} else if (newModel.base === 'flux') {
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
} else {
newGlobalRefImageModel = allRefImageModels[0] ?? null;
}
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
} else if (newModel.base === 'chatgpt-4o') {
const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
} else if (newModel.base === 'gemini-2.5') {
const gemini2_5Models = allRefImageModels.filter(isGemini2_5ModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(gemini2_5Models);
} else if (newModel.base === 'flux-kontext') {
const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
} else if (newModel.base === 'flux') {
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
} else {
newGlobalRefImageModel = allRefImageModels[0] ?? null;
}
// All ref image entities are updated to use the same new model
const refImageEntities = selectReferenceImageEntities(state);
for (const entity of refImageEntities) {
const shouldUpdateModel =
(entity.config.model && entity.config.model.base !== newBase) ||
(!entity.config.model && newGlobalRefImageModel);
// All ref image entities are updated to use the same new model
const refImageEntities = selectReferenceImageEntities(state);
for (const entity of refImageEntities) {
const shouldUpdateModel =
(entity.config.model && entity.config.model.base !== newBase) ||
(!entity.config.model && newGlobalRefImageModel);
if (shouldUpdateModel) {
dispatch(
refImageModelChanged({
id: entity.id,
modelConfig: newGlobalRefImageModel,
})
);
modelsCleared += 1;
if (shouldUpdateModel) {
dispatch(
refImageModelChanged({
id: entity.id,
modelConfig: newGlobalRefImageModel,
})
);
modelsUpdatedDisabledOrCleared += 1;
}
}
}
@@ -128,17 +135,17 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
modelConfig: newRegionalRefImageModel,
})
);
modelsCleared += 1;
modelsUpdatedDisabledOrCleared += 1;
}
}
}
if (modelsCleared > 0) {
if (modelsUpdatedDisabledOrCleared > 0) {
toast({
id: 'BASE_MODEL_CHANGED',
title: t('toast.baseModelChanged'),
description: t('toast.baseModelChangedCleared', {
count: modelsCleared,
count: modelsUpdatedDisabledOrCleared,
}),
status: 'warning',
});

View File

@@ -184,7 +184,7 @@ const PERSISTED_KEYS = Object.values(SLICE_CONFIGS)
.filter((sliceConfig) => !!sliceConfig.persistConfig)
.map((sliceConfig) => sliceConfig.slice.reducerPath);
export const createStore = (options?: { persist?: boolean; persistThrottle?: number; onRehydrated?: () => void }) => {
export const createStore = (options?: { persist?: boolean; persistDebounce?: number; onRehydrated?: () => void }) => {
const store = configureStore({
reducer: rememberedRootReducer,
middleware: (getDefaultMiddleware) =>
@@ -204,7 +204,7 @@ export const createStore = (options?: { persist?: boolean; persistThrottle?: num
if (options?.persist) {
return enhancers.prepend(
rememberEnhancer(reduxRememberDriver, PERSISTED_KEYS, {
persistThrottle: options?.persistThrottle ?? 2000,
persistDebounce: options?.persistDebounce ?? 2000,
serialize,
unserialize,
prefix: '',

View File

@@ -58,6 +58,7 @@ const zNumericalParameterConfig = z.object({
fineStep: z.number().default(8),
coarseStep: z.number().default(64),
});
export type NumericalParameterConfig = z.infer<typeof zNumericalParameterConfig>;
/**
* Configuration options for the InvokeAI UI.

View File

@@ -0,0 +1,5 @@
const randomFloat = (min: number, max: number): number => {
return Math.random() * (max - min + Number.EPSILON) + min;
};
export default randomFloat;

View File

@@ -8,6 +8,7 @@ import {
isModalOpenChanged,
selectChangeBoardModalSlice,
} from 'features/changeBoardModal/store/slice';
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
import { memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useListAllBoardsQuery } from 'services/api/endpoints/boards';
@@ -26,7 +27,8 @@ const selectIsModalOpen = createSelector(
const ChangeBoardModal = () => {
useAssertSingleton('ChangeBoardModal');
const dispatch = useAppDispatch();
const [selectedBoard, setSelectedBoard] = useState<string | null>();
const currentBoardId = useAppSelector(selectSelectedBoardId);
const [selectedBoardId, setSelectedBoardId] = useState<string | null>();
const { data: boards, isFetching } = useListAllBoardsQuery({ include_archived: true });
const isModalOpen = useAppSelector(selectIsModalOpen);
const imagesToChange = useAppSelector(selectImagesToChange);
@@ -35,15 +37,19 @@ const ChangeBoardModal = () => {
const { t } = useTranslation();
const options = useMemo<ComboboxOption[]>(() => {
return [{ label: t('boards.uncategorized'), value: 'none' }].concat(
(boards ?? []).map((board) => ({
label: board.board_name,
value: board.board_id,
}))
);
}, [boards, t]);
return [{ label: t('boards.uncategorized'), value: 'none' }]
.concat(
(boards ?? [])
.map((board) => ({
label: board.board_name,
value: board.board_id,
}))
.sort((a, b) => a.label.localeCompare(b.label))
)
.filter((board) => board.value !== currentBoardId);
}, [boards, currentBoardId, t]);
const value = useMemo(() => options.find((o) => o.value === selectedBoard), [options, selectedBoard]);
const value = useMemo(() => options.find((o) => o.value === selectedBoardId), [options, selectedBoardId]);
const handleClose = useCallback(() => {
dispatch(changeBoardReset());
@@ -51,27 +57,26 @@ const ChangeBoardModal = () => {
}, [dispatch]);
const handleChangeBoard = useCallback(() => {
if (!imagesToChange.length || !selectedBoard) {
if (!imagesToChange.length || !selectedBoardId) {
return;
}
if (selectedBoard === 'none') {
if (selectedBoardId === 'none') {
removeImagesFromBoard({ image_names: imagesToChange });
} else {
addImagesToBoard({
image_names: imagesToChange,
board_id: selectedBoard,
board_id: selectedBoardId,
});
}
setSelectedBoard(null);
dispatch(changeBoardReset());
}, [addImagesToBoard, dispatch, imagesToChange, removeImagesFromBoard, selectedBoard]);
}, [addImagesToBoard, dispatch, imagesToChange, removeImagesFromBoard, selectedBoardId]);
const onChange = useCallback<ComboboxOnChange>((v) => {
if (!v) {
return;
}
setSelectedBoard(v.value);
setSelectedBoardId(v.value);
}, []);
return (
@@ -89,7 +94,6 @@ const ChangeBoardModal = () => {
{t('boards.movingImagesToBoard', {
count: imagesToChange.length,
})}
:
</Text>
<FormControl isDisabled={isFetching}>
<Combobox

View File

@@ -0,0 +1,24 @@
import { Alert, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
export const CanvasAlertsBboxVisibility = memo(() => {
const { t } = useTranslation();
const canvasManager = useCanvasManager();
const isBboxHidden = useStore(canvasManager.tool.tools.bbox.$isBboxHidden);
if (!isBboxHidden) {
return null;
}
return (
<Alert status="warning" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
<AlertIcon />
<AlertTitle>{t('controlLayers.warnings.bboxHidden')}</AlertTitle>
</Alert>
);
});
CanvasAlertsBboxVisibility.displayName = 'CanvasAlertsBboxVisibility';

View File

@@ -10,13 +10,19 @@ import type {
ChatGPT4oModelConfig,
FLUXKontextModelConfig,
FLUXReduxModelConfig,
Gemini2_5ModelConfig,
IPAdapterModelConfig,
} from 'services/api/types';
type Props = {
modelKey: string | null;
onChangeModel: (
modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig
modelConfig:
| IPAdapterModelConfig
| FLUXReduxModelConfig
| ChatGPT4oModelConfig
| FLUXKontextModelConfig
| Gemini2_5ModelConfig
) => void;
};
@@ -28,7 +34,13 @@ export const RefImageModel = memo(({ modelKey, onChangeModel }: Props) => {
const _onChangeModel = useCallback(
(
modelConfig: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig | null
modelConfig:
| IPAdapterModelConfig
| FLUXReduxModelConfig
| ChatGPT4oModelConfig
| FLUXKontextModelConfig
| Gemini2_5ModelConfig
| null
) => {
if (!modelConfig) {
return;
@@ -39,7 +51,14 @@ export const RefImageModel = memo(({ modelKey, onChangeModel }: Props) => {
);
const getIsDisabled = useCallback(
(model: IPAdapterModelConfig | FLUXReduxModelConfig | ChatGPT4oModelConfig | FLUXKontextModelConfig): boolean => {
(
model:
| IPAdapterModelConfig
| FLUXReduxModelConfig
| ChatGPT4oModelConfig
| FLUXKontextModelConfig
| Gemini2_5ModelConfig
): boolean => {
return !areBasesCompatibleForRefImage(mainModelConfig, model);
},
[mainModelConfig]

View File

@@ -12,7 +12,7 @@ import {
} from 'features/controlLayers/store/canvasStagingAreaSlice';
import { selectBboxRect, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageNameToImageObject } from 'features/controlLayers/store/util';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import type { PropsWithChildren } from 'react';
import { createContext, memo, useContext, useEffect, useMemo, useState } from 'react';
import { getImageDTOSafe } from 'services/api/endpoints/images';
@@ -71,8 +71,8 @@ export const StagingAreaContextProvider = memo(({ children, sessionId }: PropsWi
},
onAccept: (item, imageDTO) => {
const bboxRect = selectBboxRect(store.getState());
const { x, y, width, height } = bboxRect;
const imageObject = imageNameToImageObject(imageDTO.image_name, { width, height });
const { x, y } = bboxRect;
const imageObject = imageDTOToImageObject(imageDTO);
const selectedEntityIdentifier = selectSelectedEntityIdentifier(store.getState());
const overrides: Partial<CanvasRasterLayerState> = {
position: { x, y },

View File

@@ -15,6 +15,7 @@ import { useCanvasEntityQuickSwitchHotkey } from 'features/controlLayers/hooks/u
import { useCanvasFilterHotkey } from 'features/controlLayers/hooks/useCanvasFilterHotkey';
import { useCanvasInvertMaskHotkey } from 'features/controlLayers/hooks/useCanvasInvertMaskHotkey';
import { useCanvasResetLayerHotkey } from 'features/controlLayers/hooks/useCanvasResetLayerHotkey';
import { useCanvasToggleBboxHotkey } from 'features/controlLayers/hooks/useCanvasToggleBboxHotkey';
import { useCanvasToggleNonRasterLayersHotkey } from 'features/controlLayers/hooks/useCanvasToggleNonRasterLayersHotkey';
import { useCanvasTransformHotkey } from 'features/controlLayers/hooks/useCanvasTransformHotkey';
import { useCanvasUndoRedoHotkeys } from 'features/controlLayers/hooks/useCanvasUndoRedoHotkeys';
@@ -31,6 +32,7 @@ export const CanvasToolbar = memo(() => {
useCanvasFilterHotkey();
useCanvasInvertMaskHotkey();
useCanvasToggleNonRasterLayersHotkey();
useCanvasToggleBboxHotkey();
return (
<Flex w="full" gap={2} alignItems="center" px={2}>

View File

@@ -1,7 +1,7 @@
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import type { AppGetState } from 'app/store/store';
import { useAppDispatch, useAppStore } from 'app/store/storeHooks';
import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks';
import { deepClone } from 'common/util/deepClone';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import {
@@ -16,7 +16,11 @@ import {
rgRefImageAdded,
} from 'features/controlLayers/store/canvasSlice';
import { selectBase, selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
import {
selectCanvasSlice,
selectEntity,
selectSelectedEntityIdentifier,
} from 'features/controlLayers/store/selectors';
import type {
CanvasEntityIdentifier,
CanvasRegionalGuidanceState,
@@ -24,6 +28,7 @@ import type {
ControlLoRAConfig,
ControlNetConfig,
FluxKontextReferenceImageConfig,
Gemini2_5ReferenceImageConfig,
IPAdapterConfig,
T2IAdapterConfig,
} from 'features/controlLayers/store/types';
@@ -31,6 +36,7 @@ import {
initialChatGPT4oReferenceImage,
initialControlNet,
initialFluxKontextReferenceImage,
initialGemini2_5ReferenceImage,
initialIPAdapter,
initialT2IAdapter,
} from 'features/controlLayers/store/util';
@@ -72,7 +78,11 @@ export const selectDefaultControlAdapter = createSelector(
export const getDefaultRefImageConfig = (
getState: AppGetState
): IPAdapterConfig | ChatGPT4oReferenceImageConfig | FluxKontextReferenceImageConfig => {
):
| IPAdapterConfig
| ChatGPT4oReferenceImageConfig
| FluxKontextReferenceImageConfig
| Gemini2_5ReferenceImageConfig => {
const state = getState();
const mainModelConfig = selectMainModelConfig(state);
@@ -93,6 +103,12 @@ export const getDefaultRefImageConfig = (
return config;
}
if (base === 'gemini-2.5') {
const config = deepClone(initialGemini2_5ReferenceImage);
config.model = zModelIdentifierField.parse(mainModelConfig);
return config;
}
// Otherwise, find the first compatible IP Adapter model.
const modelConfig = ipAdapterModelConfigs.find((m) => m.base === base);
@@ -136,37 +152,49 @@ export const getDefaultRegionalGuidanceRefImageConfig = (getState: AppGetState):
export const useAddControlLayer = () => {
const dispatch = useAppDispatch();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const selectedControlLayer =
selectedEntityIdentifier?.type === 'control_layer' ? selectedEntityIdentifier.id : undefined;
const func = useCallback(() => {
const overrides = { controlAdapter: deepClone(initialControlNet) };
dispatch(controlLayerAdded({ isSelected: true, overrides }));
}, [dispatch]);
dispatch(controlLayerAdded({ isSelected: true, overrides, addAfter: selectedControlLayer }));
}, [dispatch, selectedControlLayer]);
return func;
};
export const useAddRasterLayer = () => {
const dispatch = useAppDispatch();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const selectedRasterLayer =
selectedEntityIdentifier?.type === 'raster_layer' ? selectedEntityIdentifier.id : undefined;
const func = useCallback(() => {
dispatch(rasterLayerAdded({ isSelected: true }));
}, [dispatch]);
dispatch(rasterLayerAdded({ isSelected: true, addAfter: selectedRasterLayer }));
}, [dispatch, selectedRasterLayer]);
return func;
};
export const useAddInpaintMask = () => {
const dispatch = useAppDispatch();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const selectedInpaintMask =
selectedEntityIdentifier?.type === 'inpaint_mask' ? selectedEntityIdentifier.id : undefined;
const func = useCallback(() => {
dispatch(inpaintMaskAdded({ isSelected: true }));
}, [dispatch]);
dispatch(inpaintMaskAdded({ isSelected: true, addAfter: selectedInpaintMask }));
}, [dispatch, selectedInpaintMask]);
return func;
};
export const useAddRegionalGuidance = () => {
const dispatch = useAppDispatch();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const selectedRegionalGuidance =
selectedEntityIdentifier?.type === 'regional_guidance' ? selectedEntityIdentifier.id : undefined;
const func = useCallback(() => {
dispatch(rgAdded({ isSelected: true }));
}, [dispatch]);
dispatch(rgAdded({ isSelected: true, addAfter: selectedRegionalGuidance }));
}, [dispatch, selectedRegionalGuidance]);
return func;
};

View File

@@ -0,0 +1,18 @@
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { useCallback } from 'react';
export const useCanvasToggleBboxHotkey = () => {
const canvasManager = useCanvasManager();
const handleToggleBboxVisibility = useCallback(() => {
canvasManager.tool.tools.bbox.toggleBboxVisibility();
}, [canvasManager]);
useRegisteredHotkeys({
id: 'toggleBbox',
category: 'canvas',
callback: handleToggleBboxVisibility,
dependencies: [handleToggleBboxVisibility],
});
};

View File

@@ -3,6 +3,7 @@ import {
selectIsChatGPT4o,
selectIsCogView4,
selectIsFluxKontext,
selectIsGemini2_5,
selectIsImagen3,
selectIsImagen4,
selectIsSD3,
@@ -19,21 +20,22 @@ export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => {
const isImagen4 = useAppSelector(selectIsImagen4);
const isFluxKontext = useAppSelector(selectIsFluxKontext);
const isChatGPT4o = useAppSelector(selectIsChatGPT4o);
const isGemini2_5 = useAppSelector(selectIsGemini2_5);
const isEntityTypeEnabled = useMemo<boolean>(() => {
switch (entityType) {
case 'regional_guidance':
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
case 'control_layer':
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
case 'inpaint_mask':
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
case 'raster_layer':
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o && !isGemini2_5;
default:
assert<Equals<typeof entityType, never>>(false);
}
}, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o]);
}, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, isGemini2_5]);
return isEntityTypeEnabled;
};

View File

@@ -372,6 +372,7 @@ export class CanvasCompositorModule extends CanvasModuleBase {
position: { x: Math.floor(rect.x), y: Math.floor(rect.y) },
},
mergedEntitiesToDelete: deleteMergedEntities ? entityIdentifiers.map(mapId) : [],
addAfter: entityIdentifiers.map(mapId).at(-1),
};
switch (type) {

View File

@@ -214,6 +214,9 @@ export class CanvasEntityObjectRenderer extends CanvasModuleBase {
const isVisible = this.parent.konva.layer.visible();
const isCached = this.konva.objectGroup.isCached();
// We should also never cache if the entity has no dimensions. Konva will log an error to console like this:
// Konva error: Can not cache the node. Width or height of the node equals 0. Caching is skipped.
if (isVisible && (force || !isCached)) {
this.log.trace('Caching object group');
this.konva.objectGroup.clearCache();

View File

@@ -482,13 +482,24 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
// "contain" means that the entity should be scaled to fit within the bbox, but it should not exceed the bbox.
const scale = Math.min(scaleX, scaleY);
// Center the shape within the bounding box
const offsetX = (rect.width - width * scale) / 2;
const offsetY = (rect.height - height * scale) / 2;
// Calculate the scaled dimensions
const scaledWidth = width * scale;
const scaledHeight = height * scale;
// Calculate centered position
const centerX = rect.x + (rect.width - scaledWidth) / 2;
const centerY = rect.y + (rect.height - scaledHeight) / 2;
// Round to grid and clamp to valid bounds
const roundedX = gridSize > 1 ? roundToMultiple(centerX, gridSize) : centerX;
const roundedY = gridSize > 1 ? roundToMultiple(centerY, gridSize) : centerY;
const x = clamp(roundedX, rect.x, rect.x + rect.width - scaledWidth);
const y = clamp(roundedY, rect.y, rect.y + rect.height - scaledHeight);
this.konva.proxyRect.setAttrs({
x: clamp(roundToMultiple(rect.x + offsetX, gridSize), rect.x, rect.x + rect.width),
y: clamp(roundToMultiple(rect.y + offsetY, gridSize), rect.y, rect.y + rect.height),
x,
y,
scaleX: scale,
scaleY: scale,
rotation: 0,
@@ -513,16 +524,32 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
const scaleX = rect.width / width;
const scaleY = rect.height / height;
// "cover" is the same as "contain", but we choose the larger scale to cover the shape
// "cover" means the entity should cover the entire bbox, potentially overflowing
const scale = Math.max(scaleX, scaleY);
// Center the shape within the bounding box
const offsetX = (rect.width - width * scale) / 2;
const offsetY = (rect.height - height * scale) / 2;
// Calculate the scaled dimensions
const scaledWidth = width * scale;
const scaledHeight = height * scale;
// Calculate position - center only if entity exceeds bbox
let x = rect.x;
let y = rect.y;
// If scaled width exceeds bbox width, center horizontally
if (scaledWidth > rect.width) {
const centerX = rect.x + (rect.width - scaledWidth) / 2;
x = gridSize > 1 ? roundToMultiple(centerX, gridSize) : centerX;
}
// If scaled height exceeds bbox height, center vertically
if (scaledHeight > rect.height) {
const centerY = rect.y + (rect.height - scaledHeight) / 2;
y = gridSize > 1 ? roundToMultiple(centerY, gridSize) : centerY;
}
this.konva.proxyRect.setAttrs({
x: roundToMultiple(rect.x + offsetX, gridSize),
y: roundToMultiple(rect.y + offsetY, gridSize),
x,
y,
scaleX: scale,
scaleY: scale,
rotation: 0,

View File

@@ -115,7 +115,7 @@ export abstract class CanvasModuleBase {
* ```
*/
destroy: () => void = () => {
this.log('Destroying module');
this.log.debug('Destroying module');
};
/**

View File

@@ -2,6 +2,7 @@ import { objectEquals } from '@observ33r/object-equals';
import { Mutex } from 'async-mutex';
import { deepClone } from 'common/util/deepClone';
import { withResultAsync } from 'common/util/result';
import { parseify } from 'common/util/serialize';
import type { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer';
import type { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
import type { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer';
@@ -10,12 +11,21 @@ import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase'
import type { CanvasSegmentAnythingModule } from 'features/controlLayers/konva/CanvasSegmentAnythingModule';
import type { CanvasStagingAreaModule } from 'features/controlLayers/konva/CanvasStagingAreaModule';
import { getKonvaNodeDebugAttrs, loadImage } from 'features/controlLayers/konva/util';
import type { CanvasImageState } from 'features/controlLayers/store/types';
import type { CanvasImageState, Dimensions } from 'features/controlLayers/store/types';
import { t } from 'i18next';
import Konva from 'konva';
import type { Logger } from 'roarr';
import type { JsonObject } from 'roarr/dist/types';
import { getImageDTOSafe } from 'services/api/endpoints/images';
type CanvasObjectImageConfig = {
usePhysicalDimensions: boolean;
};
const DEFAULT_CONFIG: CanvasObjectImageConfig = {
usePhysicalDimensions: false,
};
export class CanvasObjectImage extends CanvasModuleBase {
readonly type = 'object_image';
readonly id: string;
@@ -30,6 +40,9 @@ export class CanvasObjectImage extends CanvasModuleBase {
readonly log: Logger;
state: CanvasImageState;
config: CanvasObjectImageConfig;
konva: {
group: Konva.Group;
placeholder: { group: Konva.Group; rect: Konva.Rect; text: Konva.Text };
@@ -47,7 +60,8 @@ export class CanvasObjectImage extends CanvasModuleBase {
| CanvasEntityBufferObjectRenderer
| CanvasStagingAreaModule
| CanvasSegmentAnythingModule
| CanvasEntityFilterer
| CanvasEntityFilterer,
config = DEFAULT_CONFIG
) {
super();
this.id = state.id;
@@ -55,6 +69,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
this.manager = parent.manager;
this.path = this.manager.buildPath(this);
this.log = this.manager.buildLogger(this);
this.config = config;
this.log.debug({ state }, 'Creating module');
@@ -116,7 +131,10 @@ export class CanvasObjectImage extends CanvasModuleBase {
const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url, true));
if (imageElementResult.isErr()) {
// Image loading failed (e.g. the URL to the "physical" image is invalid)
this.onFailedToLoadImage(t('controlLayers.unableToLoadImage', 'Unable to load image'));
this.onFailedToLoadImage(
t('controlLayers.unableToLoadImage', 'Unable to load image'),
parseify(imageElementResult.error)
);
return;
}
@@ -139,7 +157,10 @@ export class CanvasObjectImage extends CanvasModuleBase {
const imageElementResult = await withResultAsync(() => loadImage(dataURL, false));
if (imageElementResult.isErr()) {
// Image loading failed (e.g. the URL to the "physical" image is invalid)
this.onFailedToLoadImage(t('controlLayers.unableToLoadImage', 'Unable to load image'));
this.onFailedToLoadImage(
t('controlLayers.unableToLoadImage', 'Unable to load image'),
parseify(imageElementResult.error)
);
return;
}
@@ -148,8 +169,8 @@ export class CanvasObjectImage extends CanvasModuleBase {
this.updateImageElement();
};
onFailedToLoadImage = (message: string) => {
this.log({ image: this.state.image }, message);
onFailedToLoadImage = (message: string, error?: JsonObject) => {
this.log.error({ image: this.state.image, error }, message);
this.konva.image?.visible(false);
this.isLoading = false;
this.isError = true;
@@ -157,9 +178,22 @@ export class CanvasObjectImage extends CanvasModuleBase {
this.konva.placeholder.group.visible(true);
};
getDimensions = (): Dimensions => {
if (this.config.usePhysicalDimensions && this.imageElement) {
return {
width: this.imageElement.width,
height: this.imageElement.height,
};
}
return {
width: this.state.image.width,
height: this.state.image.height,
};
};
updateImageElement = () => {
if (this.imageElement) {
const { width, height } = this.state.image;
const { width, height } = this.getDimensions();
if (this.konva.image) {
this.log.trace('Updating Konva image attrs');
@@ -196,7 +230,6 @@ export class CanvasObjectImage extends CanvasModuleBase {
this.log.trace({ state }, 'Updating image');
const { image } = state;
const { width, height } = image;
if (force || (!objectEquals(this.state, state) && !this.isLoading)) {
const release = await this.mutex.acquire();
@@ -212,7 +245,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
}
}
this.konva.image?.setAttrs({ width, height });
this.konva.image?.setAttrs(this.getDimensions());
this.state = state;
return true;
}

View File

@@ -230,7 +230,16 @@ export class CanvasStagingAreaModule extends CanvasModuleBase {
if (imageSrc) {
const image = this._getImageFromSrc(imageSrc, width, height);
if (!this.image) {
this.image = new CanvasObjectImage({ id: 'staging-area-image', type: 'image', image }, this);
this.image = new CanvasObjectImage({ id: 'staging-area-image', type: 'image', image }, this, {
// Some models do not make guarantees about their output dimensions. This flag allows the staged images to
// render at their real dimensions, instead of the bbox size.
//
// When the image source is an image name, it is a final output image. In that case, we should use its
// physical dimensions. Otherwise, if it is a dataURL, that means it is a progress image. These come in at
// a smaller resolution and need to be stretched to fill the bbox, so we do not use the physical
// dimensions in that case.
usePhysicalDimensions: imageSrc.type === 'imageName',
});
await this.image.update(this.image.state, true);
this.konva.group.add(this.image.konva.group);
} else if (this.image.isLoading || this.image.isError) {

View File

@@ -231,7 +231,7 @@ export class CanvasStateApiModule extends CanvasModuleBase {
/**
* Sets the drawing color, pushing state to redux.
*/
setColor = (color: RgbaColor) => {
setColor = (color: Partial<RgbaColor>) => {
return this.store.dispatch(settingsColorChanged(color));
};

View File

@@ -30,7 +30,6 @@ const ALL_ANCHORS: string[] = [
'bottom-center',
'bottom-right',
];
const CORNER_ANCHORS: string[] = ['top-left', 'top-right', 'bottom-left', 'bottom-right'];
const NO_ANCHORS: string[] = [];
/**
@@ -66,6 +65,11 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
*/
$aspectRatioBuffer = atom(1);
/**
* Buffer to store the visibility of the bbox.
*/
$isBboxHidden = atom(false);
constructor(parent: CanvasToolModule) {
super();
this.id = getPrefixedId(this.type);
@@ -191,6 +195,9 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
// Update on busy state changes
this.subscriptions.add(this.manager.$isBusy.listen(this.render));
// Listen for stage changes to update the bbox's visibility
this.subscriptions.add(this.$isBboxHidden.listen(this.render));
}
// This is a noop. The cursor is changed when the cursor enters or leaves the bbox.
@@ -206,13 +213,15 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
};
/**
* Renders the bbox. The bbox is only visible when the tool is set to 'bbox'.
* Renders the bbox.
*/
render = () => {
const tool = this.manager.tool.$tool.get();
const { x, y, width, height } = this.manager.stateApi.runSelector(selectBbox).rect;
this.konva.group.visible(!this.$isBboxHidden.get());
// We need to reach up to the preview layer to enable/disable listening so that the bbox can be interacted with.
// If the mangaer is busy, we disable listening so the bbox cannot be interacted with.
this.konva.group.listening(tool === 'bbox' && !this.manager.$isBusy.get());
@@ -334,9 +343,23 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
let width = roundToMultipleMin(this.konva.proxyRect.width() * this.konva.proxyRect.scaleX(), gridSize);
let height = roundToMultipleMin(this.konva.proxyRect.height() * this.konva.proxyRect.scaleY(), gridSize);
// If shift is held and we are resizing from a corner, retain aspect ratio - needs special handling. We skip this
// if alt/opt is held - this requires math too big for my brain.
if (shift && CORNER_ANCHORS.includes(anchor) && !alt) {
// When resizing the bbox using the transformer, we may need to do some extra math to maintain the current aspect
// ratio. Need to check a few things to determine if we should be maintaining the aspect ratio or not.
let shouldMaintainAspectRatio = false;
if (alt) {
// If alt is held, we are doing center-anchored transforming. In this case, maintaining aspect ratio is rather
// complicated.
shouldMaintainAspectRatio = false;
} else if (this.manager.stateApi.getBbox().aspectRatio.isLocked) {
// When the aspect ratio is locked, holding shift means we SHOULD NOT maintain the aspect ratio
shouldMaintainAspectRatio = !shift;
} else {
// When the aspect ratio is not locked, holding shift means we SHOULD maintain aspect ratio
shouldMaintainAspectRatio = shift;
}
if (shouldMaintainAspectRatio) {
// Fit the bbox to the last aspect ratio
let fittedWidth = Math.sqrt(width * height * this.$aspectRatioBuffer.get());
let fittedHeight = fittedWidth / this.$aspectRatioBuffer.get();
@@ -377,7 +400,7 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
// Update the aspect ratio buffer whenever the shift key is not held - this allows for a nice UX where you can start
// a transform, get the right aspect ratio, then hold shift to lock it in.
if (!shift) {
if (!shouldMaintainAspectRatio) {
this.$aspectRatioBuffer.set(bboxRect.width / bboxRect.height);
}
};
@@ -478,4 +501,8 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
this.subscriptions.clear();
this.konva.group.destroy();
};
toggleBboxVisibility = () => {
this.$isBboxHidden.set(!this.$isBboxHidden.get());
};
}

View File

@@ -289,6 +289,14 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
this.manager.stage.setCursor('none');
};
getCanPick = () => {
if (this.manager.stage.getIsDragging()) {
return false;
}
return true;
};
/**
* Renders the color picker tool preview on the canvas.
*/
@@ -298,6 +306,11 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
return;
}
if (!this.getCanPick()) {
this.setVisibility(false);
return;
}
const cursorPos = this.parent.$cursorPos.get();
if (!cursorPos) {
@@ -406,11 +419,21 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
};
onStagePointerUp = (_e: KonvaEventObject<PointerEvent>) => {
const color = this.$colorUnderCursor.get();
this.manager.stateApi.setColor({ ...color, a: color.a / 255 });
if (!this.getCanPick()) {
this.setVisibility(false);
return;
}
const { a: _, ...color } = this.$colorUnderCursor.get();
this.manager.stateApi.setColor(color);
};
onStagePointerMove = (_e: KonvaEventObject<PointerEvent>) => {
if (!this.getCanPick()) {
this.setVisibility(false);
return;
}
this.syncColorUnderCursor();
};

View File

@@ -164,7 +164,7 @@ export class CanvasToolModule extends CanvasModuleBase {
const selectedEntityAdapter = this.manager.stateApi.getSelectedEntityAdapter();
if (this.manager.stage.getIsDragging()) {
this.tools.view.syncCursorStyle();
stage.setCursor('grabbing');
} else if (tool === 'view') {
this.tools.view.syncCursorStyle();
} else if (segmentingAdapter) {

View File

@@ -134,8 +134,8 @@ const slice = createSlice({
settingsEraserWidthChanged: (state, action: PayloadAction<CanvasSettingsState['eraserWidth']>) => {
state.eraserWidth = Math.round(action.payload);
},
settingsColorChanged: (state, action: PayloadAction<CanvasSettingsState['color']>) => {
state.color = action.payload;
settingsColorChanged: (state, action: PayloadAction<Partial<CanvasSettingsState['color']>>) => {
state.color = { ...state.color, ...action.payload };
},
settingsInvertScrollForToolWidthChanged: (
state,

View File

@@ -72,12 +72,14 @@ import {
CHATGPT_ASPECT_RATIOS,
DEFAULT_ASPECT_RATIO_CONFIG,
FLUX_KONTEXT_ASPECT_RATIOS,
GEMINI_2_5_ASPECT_RATIOS,
getEntityIdentifier,
getInitialCanvasState,
IMAGEN_ASPECT_RATIOS,
isChatGPT4oAspectRatioID,
isFluxKontextAspectRatioID,
isFLUXReduxConfig,
isGemini2_5AspectRatioID,
isImagenAspectRatioID,
isIPAdapterConfig,
zCanvasState,
@@ -111,12 +113,16 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}>
) => {
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
const entityState = getRasterLayerState(id, overrides);
state.rasterLayers.entities.push(entityState);
const index = addAfter
? state.rasterLayers.entities.findIndex((e) => e.id === addAfter) + 1
: state.rasterLayers.entities.length;
state.rasterLayers.entities.splice(index, 0, entityState);
if (mergedEntitiesToDelete.length > 0) {
state.rasterLayers.entities = state.rasterLayers.entities.filter(
@@ -139,6 +145,7 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}) => ({
payload: { ...payload, id: getPrefixedId('raster_layer') },
}),
@@ -272,13 +279,17 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}>
) => {
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
const entityState = getControlLayerState(id, overrides);
state.controlLayers.entities.push(entityState);
const index = addAfter
? state.controlLayers.entities.findIndex((e) => e.id === addAfter) + 1
: state.controlLayers.entities.length;
state.controlLayers.entities.splice(index, 0, entityState);
if (mergedEntitiesToDelete.length > 0) {
state.controlLayers.entities = state.controlLayers.entities.filter(
@@ -300,6 +311,7 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}) => ({
payload: { ...payload, id: getPrefixedId('control_layer') },
}),
@@ -570,13 +582,17 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}>
) => {
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
const entityState = getRegionalGuidanceState(id, overrides);
state.regionalGuidance.entities.push(entityState);
const index = addAfter
? state.regionalGuidance.entities.findIndex((e) => e.id === addAfter) + 1
: state.regionalGuidance.entities.length;
state.regionalGuidance.entities.splice(index, 0, entityState);
if (mergedEntitiesToDelete.length > 0) {
state.regionalGuidance.entities = state.regionalGuidance.entities.filter(
@@ -598,6 +614,7 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}) => ({
payload: { ...payload, id: getPrefixedId('regional_guidance') },
}),
@@ -874,13 +891,17 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}>
) => {
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
const entityState = getInpaintMaskState(id, overrides);
state.inpaintMasks.entities.push(entityState);
const index = addAfter
? state.inpaintMasks.entities.findIndex((e) => e.id === addAfter) + 1
: state.inpaintMasks.entities.length;
state.inpaintMasks.entities.splice(index, 0, entityState);
if (mergedEntitiesToDelete.length > 0) {
state.inpaintMasks.entities = state.inpaintMasks.entities.filter(
@@ -902,6 +923,7 @@ const slice = createSlice({
isSelected?: boolean;
isBookmarked?: boolean;
mergedEntitiesToDelete?: string[];
addAfter?: string;
}) => ({
payload: { ...payload, id: getPrefixedId('inpaint_mask') },
}),
@@ -1124,6 +1146,12 @@ const slice = createSlice({
state.bbox.rect.height = height;
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
state.bbox.aspectRatio.isLocked = true;
} else if (state.bbox.modelBase === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) {
const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id];
state.bbox.rect.width = width;
state.bbox.rect.height = height;
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
state.bbox.aspectRatio.isLocked = true;
} else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id];
state.bbox.rect.width = width;
@@ -1249,25 +1277,33 @@ const slice = createSlice({
newEntity.name = `${newEntity.name} (Copy)`;
}
switch (newEntity.type) {
case 'raster_layer':
case 'raster_layer': {
newEntity.id = getPrefixedId('raster_layer');
state.rasterLayers.entities.push(newEntity);
const newEntityIndex = state.rasterLayers.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
state.rasterLayers.entities.splice(newEntityIndex, 0, newEntity);
break;
case 'control_layer':
}
case 'control_layer': {
newEntity.id = getPrefixedId('control_layer');
state.controlLayers.entities.push(newEntity);
const newEntityIndex = state.controlLayers.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
state.controlLayers.entities.splice(newEntityIndex, 0, newEntity);
break;
case 'regional_guidance':
}
case 'regional_guidance': {
newEntity.id = getPrefixedId('regional_guidance');
for (const refImage of newEntity.referenceImages) {
refImage.id = getPrefixedId('regional_guidance_ip_adapter');
}
state.regionalGuidance.entities.push(newEntity);
const newEntityIndex = state.regionalGuidance.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
state.regionalGuidance.entities.splice(newEntityIndex, 0, newEntity);
break;
case 'inpaint_mask':
}
case 'inpaint_mask': {
newEntity.id = getPrefixedId('inpaint_mask');
state.inpaintMasks.entities.push(newEntity);
const newEntityIndex = state.inpaintMasks.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
state.inpaintMasks.entities.splice(newEntityIndex, 0, newEntity);
break;
}
}
state.selectedEntityIdentifier = getEntityIdentifier(newEntity);

View File

@@ -11,15 +11,26 @@ import {
CHATGPT_ASPECT_RATIOS,
DEFAULT_ASPECT_RATIO_CONFIG,
FLUX_KONTEXT_ASPECT_RATIOS,
GEMINI_2_5_ASPECT_RATIOS,
getInitialParamsState,
IMAGEN_ASPECT_RATIOS,
isChatGPT4oAspectRatioID,
isFluxKontextAspectRatioID,
isGemini2_5AspectRatioID,
isImagenAspectRatioID,
zParamsState,
} from 'features/controlLayers/store/types';
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
import {
API_BASE_MODELS,
CLIP_SKIP_MAP,
SUPPORTS_ASPECT_RATIO_BASE_MODELS,
SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS,
SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS,
SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS,
SUPPORTS_REF_IMAGES_BASE_MODELS,
SUPPORTS_SEED_BASE_MODELS,
} from 'features/parameters/types/constants';
import type {
ParameterCanvasCoherenceMode,
ParameterCFGRescaleMultiplier,
@@ -107,14 +118,15 @@ const slice = createSlice({
return;
}
// Clamp CLIP skip layer count to the bounds of the new model
if (model.base === 'sdxl') {
// We don't support user-defined CLIP skip for SDXL because it doesn't do anything useful
state.clipSkip = 0;
} else {
const { maxClip } = CLIP_SKIP_MAP[model.base];
state.clipSkip = clamp(state.clipSkip, 0, maxClip);
if (API_BASE_MODELS.includes(model.base)) {
state.dimensions.aspectRatio.isLocked = true;
state.dimensions.aspectRatio.value = 1;
state.dimensions.aspectRatio.id = '1:1';
state.dimensions.rect.width = 1024;
state.dimensions.rect.height = 1024;
}
applyClipSkip(state, model, state.clipSkip);
},
vaeSelected: (state, action: PayloadAction<ParameterVAEModel | null>) => {
// null is a valid VAE!
@@ -170,7 +182,7 @@ const slice = createSlice({
state.vaePrecision = action.payload;
},
setClipSkip: (state, action: PayloadAction<number>) => {
state.clipSkip = action.payload;
applyClipSkip(state, state.model, action.payload);
},
shouldUseCpuNoiseChanged: (state, action: PayloadAction<boolean>) => {
state.shouldUseCpuNoise = action.payload;
@@ -181,15 +193,6 @@ const slice = createSlice({
negativePromptChanged: (state, action: PayloadAction<ParameterNegativePrompt>) => {
state.negativePrompt = action.payload;
},
positivePrompt2Changed: (state, action: PayloadAction<string>) => {
state.positivePrompt2 = action.payload;
},
negativePrompt2Changed: (state, action: PayloadAction<string>) => {
state.negativePrompt2 = action.payload;
},
shouldConcatPromptsChanged: (state, action: PayloadAction<boolean>) => {
state.shouldConcatPrompts = action.payload;
},
refinerModelChanged: (state, action: PayloadAction<ParameterSDXLRefinerModel | null>) => {
const result = zParamsState.shape.refinerModel.safeParse(action.payload);
if (!result.success) {
@@ -306,6 +309,12 @@ const slice = createSlice({
state.dimensions.rect.height = height;
state.dimensions.aspectRatio.value = state.dimensions.rect.width / state.dimensions.rect.height;
state.dimensions.aspectRatio.isLocked = true;
} else if (state.model?.base === 'gemini-2.5' && isGemini2_5AspectRatioID(id)) {
const { width, height } = GEMINI_2_5_ASPECT_RATIOS[id];
state.dimensions.rect.width = width;
state.dimensions.rect.height = height;
state.dimensions.aspectRatio.value = state.dimensions.rect.width / state.dimensions.rect.height;
state.dimensions.aspectRatio.isLocked = true;
} else if (state.model?.base === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
const { width, height } = FLUX_KONTEXT_ASPECT_RATIOS[id];
state.dimensions.rect.width = width;
@@ -375,6 +384,33 @@ const slice = createSlice({
},
});
const applyClipSkip = (state: { clipSkip: number }, model: ParameterModel | null, clipSkip: number) => {
if (model === null) {
return;
}
const maxClip = getModelMaxClipSkip(model);
state.clipSkip = clamp(clipSkip, 0, maxClip);
};
const hasModelClipSkip = (model: ParameterModel | null) => {
if (model === null) {
return false;
}
return getModelMaxClipSkip(model) > 0;
};
const getModelMaxClipSkip = (model: ParameterModel) => {
if (model.base === 'sdxl') {
// We don't support user-defined CLIP skip for SDXL because it doesn't do anything useful
return 0;
}
return CLIP_SKIP_MAP[model.base].maxClip;
};
const resetState = (state: ParamsState): ParamsState => {
// When a new session is requested, we need to keep the current model selections, plus dependent state
// like VAE precision. Everything else gets reset to default.
@@ -425,9 +461,6 @@ export const {
shouldUseCpuNoiseChanged,
positivePromptChanged,
negativePromptChanged,
positivePrompt2Changed,
negativePrompt2Changed,
shouldConcatPromptsChanged,
refinerModelChanged,
setRefinerSteps,
setRefinerCFGScale,
@@ -460,8 +493,7 @@ export const paramsSliceConfig: SliceConfig<typeof slice> = {
};
export const selectParamsSlice = (state: RootState) => state.params;
export const createParamsSelector = <T>(selector: Selector<ParamsState, T>) =>
createSelector(selectParamsSlice, selector);
const createParamsSelector = <T>(selector: Selector<ParamsState, T>) => createSelector(selectParamsSlice, selector);
export const selectBase = createParamsSelector((params) => params.model?.base);
export const selectIsSDXL = createParamsSelector((params) => params.model?.base === 'sdxl');
@@ -470,7 +502,6 @@ export const selectIsSD3 = createParamsSelector((params) => params.model?.base =
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4');
export const selectIsFluxKontextApi = createParamsSelector((params) => params.model?.base === 'flux-kontext');
export const selectIsFluxKontext = createParamsSelector((params) => {
if (params.model?.base === 'flux-kontext') {
return true;
@@ -481,6 +512,7 @@ export const selectIsFluxKontext = createParamsSelector((params) => {
return false;
});
export const selectIsChatGPT4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o');
export const selectIsGemini2_5 = createParamsSelector((params) => params.model?.base === 'gemini-2.5');
export const selectModel = createParamsSelector((params) => params.model);
export const selectModelKey = createParamsSelector((params) => params.model?.key);
@@ -497,7 +529,8 @@ export const selectCFGScale = createParamsSelector((params) => params.cfgScale);
export const selectGuidance = createParamsSelector((params) => params.guidance);
export const selectSteps = createParamsSelector((params) => params.steps);
export const selectCFGRescaleMultiplier = createParamsSelector((params) => params.cfgRescaleMultiplier);
export const selectCLIPSKip = createParamsSelector((params) => params.clipSkip);
export const selectCLIPSkip = createParamsSelector((params) => params.clipSkip);
export const selectHasModelCLIPSkip = createParamsSelector((params) => hasModelClipSkip(params.model));
export const selectCanvasCoherenceEdgeSize = createParamsSelector((params) => params.canvasCoherenceEdgeSize);
export const selectCanvasCoherenceMinDenoise = createParamsSelector((params) => params.canvasCoherenceMinDenoise);
export const selectCanvasCoherenceMode = createParamsSelector((params) => params.canvasCoherenceMode);
@@ -515,12 +548,33 @@ export const selectNegativePrompt = createParamsSelector((params) => params.nega
export const selectNegativePromptWithFallback = createParamsSelector((params) => params.negativePrompt ?? '');
export const selectHasNegativePrompt = createParamsSelector((params) => params.negativePrompt !== null);
export const selectModelSupportsNegativePrompt = createSelector(
[selectIsFLUX, selectIsChatGPT4o, selectIsFluxKontext],
(isFLUX, isChatGPT4o, isFluxKontext) => !isFLUX && !isChatGPT4o && !isFluxKontext
selectModel,
(model) => !!model && SUPPORTS_NEGATIVE_PROMPT_BASE_MODELS.includes(model.base)
);
export const selectModelSupportsSeed = createSelector(
selectModel,
(model) => !!model && SUPPORTS_SEED_BASE_MODELS.includes(model.base)
);
export const selectModelSupportsRefImages = createSelector(
selectModel,
(model) => !!model && SUPPORTS_REF_IMAGES_BASE_MODELS.includes(model.base)
);
export const selectModelSupportsAspectRatio = createSelector(
selectModel,
(model) => !!model && SUPPORTS_ASPECT_RATIO_BASE_MODELS.includes(model.base)
);
export const selectModelSupportsPixelDimensions = createSelector(
selectModel,
(model) => !!model && SUPPORTS_PIXEL_DIMENSIONS_BASE_MODELS.includes(model.base)
);
export const selectIsApiBaseModel = createSelector(
selectModel,
(model) => !!model && API_BASE_MODELS.includes(model.base)
);
export const selectModelSupportsOptimizedDenoising = createSelector(
selectModel,
(model) => !!model && SUPPORTS_OPTIMIZED_DENOISING_BASE_MODELS.includes(model.base)
);
export const selectPositivePrompt2 = createParamsSelector((params) => params.positivePrompt2);
export const selectNegativePrompt2 = createParamsSelector((params) => params.negativePrompt2);
export const selectShouldConcatPrompts = createParamsSelector((params) => params.shouldConcatPrompts);
export const selectScheduler = createParamsSelector((params) => params.scheduler);
export const selectSeamlessXAxis = createParamsSelector((params) => params.seamlessXAxis);
export const selectSeamlessYAxis = createParamsSelector((params) => params.seamlessYAxis);

View File

@@ -26,6 +26,7 @@ import {
initialChatGPT4oReferenceImage,
initialFluxKontextReferenceImage,
initialFLUXRedux,
initialGemini2_5ReferenceImage,
initialIPAdapter,
} from './util';
@@ -136,6 +137,16 @@ const slice = createSlice({
return;
}
if (entity.config.model.base === 'gemini-2.5') {
// Switching to Gemini 2.5 Flash Preview (nano banana) ref image
entity.config = {
...initialGemini2_5ReferenceImage,
image: entity.config.image,
model: entity.config.model,
};
return;
}
if (
entity.config.model.base === 'flux-kontext' ||
(entity.config.model.base === 'flux' && entity.config.model.name?.toLowerCase().includes('kontext'))

View File

@@ -14,9 +14,7 @@ import {
zParameterMaskBlurMethod,
zParameterModel,
zParameterNegativePrompt,
zParameterNegativeStylePromptSDXL,
zParameterPositivePrompt,
zParameterPositiveStylePromptSDXL,
zParameterPrecision,
zParameterScheduler,
zParameterSDXLRefinerModel,
@@ -266,6 +264,13 @@ const zChatGPT4oReferenceImageConfig = z.object({
});
export type ChatGPT4oReferenceImageConfig = z.infer<typeof zChatGPT4oReferenceImageConfig>;
const zGemini2_5ReferenceImageConfig = z.object({
type: z.literal('gemini_2_5_reference_image'),
image: zImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
});
export type Gemini2_5ReferenceImageConfig = z.infer<typeof zGemini2_5ReferenceImageConfig>;
const zFluxKontextReferenceImageConfig = z.object({
type: z.literal('flux_kontext_reference_image'),
image: zImageWithDims.nullable(),
@@ -288,6 +293,7 @@ export const zRefImageState = z.object({
zFLUXReduxConfig,
zChatGPT4oReferenceImageConfig,
zFluxKontextReferenceImageConfig,
zGemini2_5ReferenceImageConfig,
]),
});
export type RefImageState = z.infer<typeof zRefImageState>;
@@ -300,10 +306,15 @@ export const isFLUXReduxConfig = (config: RefImageState['config']): config is FL
export const isChatGPT4oReferenceImageConfig = (
config: RefImageState['config']
): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image';
export const isFluxKontextReferenceImageConfig = (
config: RefImageState['config']
): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image';
export const isGemini2_5ReferenceImageConfig = (
config: RefImageState['config']
): config is Gemini2_5ReferenceImageConfig => config.type === 'gemini_2_5_reference_image';
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
export type FillStyle = z.infer<typeof zFillStyle>;
export const isFillStyle = (v: unknown): v is FillStyle => zFillStyle.safeParse(v).success;
@@ -449,6 +460,14 @@ export const CHATGPT_ASPECT_RATIOS: Record<ChatGPT4oAspectRatio, Dimensions> = {
'2:3': { width: 1024, height: 1536 },
} as const;
export const zGemini2_5AspectRatioID = z.enum(['1:1']);
type Gemini2_5AspectRatio = z.infer<typeof zGemini2_5AspectRatioID>;
export const isGemini2_5AspectRatioID = (v: unknown): v is Gemini2_5AspectRatio =>
zGemini2_5AspectRatioID.safeParse(v).success;
export const GEMINI_2_5_ASPECT_RATIOS: Record<Gemini2_5AspectRatio, Dimensions> = {
'1:1': { width: 1024, height: 1024 },
} as const;
export const zFluxKontextAspectRatioID = z.enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', '9:21']);
type FluxKontextAspectRatio = z.infer<typeof zFluxKontextAspectRatioID>;
export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer<typeof zFluxKontextAspectRatioID> =>
@@ -493,6 +512,8 @@ const zBboxState = z.object({
});
const zDimensionsState = z.object({
// TODO(psyche): There is no concept of x/y coords for the dimensions state here... It's just width and height.
// Remove the extraneous data.
rect: z.object({
x: z.number().int(),
y: z.number().int(),
@@ -534,9 +555,6 @@ export const zParamsState = z.object({
shouldUseCpuNoise: z.boolean(),
positivePrompt: zParameterPositivePrompt,
negativePrompt: zParameterNegativePrompt,
positivePrompt2: zParameterPositiveStylePromptSDXL,
negativePrompt2: zParameterNegativeStylePromptSDXL,
shouldConcatPrompts: z.boolean(),
refinerModel: zParameterSDXLRefinerModel.nullable(),
refinerSteps: z.number(),
refinerCFGScale: z.number(),
@@ -584,9 +602,6 @@ export const getInitialParamsState = (): ParamsState => ({
shouldUseCpuNoise: true,
positivePrompt: '',
negativePrompt: null,
positivePrompt2: '',
negativePrompt2: '',
shouldConcatPrompts: true,
refinerModel: null,
refinerSteps: 20,
refinerCFGScale: 7.5,
@@ -663,7 +678,12 @@ export const getInitialRefImagesState = (): RefImagesState => ({
export const zCanvasReferenceImageState_OLD = zCanvasEntityBase.extend({
type: z.literal('reference_image'),
ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zChatGPT4oReferenceImageConfig]),
ipAdapter: z.discriminatedUnion('type', [
zIPAdapterConfig,
zFLUXReduxConfig,
zChatGPT4oReferenceImageConfig,
zGemini2_5ReferenceImageConfig,
]),
});
export const zCanvasMetadata = z.object({

View File

@@ -10,9 +10,9 @@ import type {
ChatGPT4oReferenceImageConfig,
ControlLoRAConfig,
ControlNetConfig,
Dimensions,
FluxKontextReferenceImageConfig,
FLUXReduxConfig,
Gemini2_5ReferenceImageConfig,
ImageWithDims,
IPAdapterConfig,
RefImageState,
@@ -38,22 +38,6 @@ export const imageDTOToImageObject = (imageDTO: ImageDTO, overrides?: Partial<Ca
};
};
export const imageNameToImageObject = (
imageName: string,
dimensions: Dimensions,
overrides?: Partial<CanvasImageState>
): CanvasImageState => {
return {
id: getPrefixedId('image'),
type: 'image',
image: {
image_name: imageName,
...dimensions,
},
...overrides,
};
};
export const imageDTOToImageWithDims = ({ image_name, width, height }: ImageDTO): ImageWithDims => ({
image_name,
width,
@@ -105,6 +89,11 @@ export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = {
image: null,
model: null,
};
export const initialGemini2_5ReferenceImage: Gemini2_5ReferenceImageConfig = {
type: 'gemini_2_5_reference_image',
image: null,
model: null,
};
export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = {
type: 'flux_kontext_reference_image',
image: null,

View File

@@ -7,13 +7,7 @@ import { useGallerySearchTerm } from 'features/gallery/components/ImageGrid/useG
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
import { galleryViewChanged, selectGallerySlice } from 'features/gallery/store/gallerySlice';
import { useAutoLayoutContext } from 'features/ui/layouts/auto-layout-context';
import {
GALLERY_PANEL_DEFAULT_HEIGHT_PX,
GALLERY_PANEL_ID,
GALLERY_PANEL_MIN_EXPANDED_HEIGHT_PX,
GALLERY_PANEL_MIN_HEIGHT_PX,
} from 'features/ui/layouts/shared';
import { useCollapsibleGridviewPanel } from 'features/ui/layouts/use-collapsible-gridview-panel';
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
import type { CSSProperties } from 'react';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@@ -34,16 +28,8 @@ export const GalleryPanel = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const { tab } = useAutoLayoutContext();
const collapsibleApi = useCollapsibleGridviewPanel(
tab,
GALLERY_PANEL_ID,
'vertical',
GALLERY_PANEL_DEFAULT_HEIGHT_PX,
GALLERY_PANEL_MIN_HEIGHT_PX,
GALLERY_PANEL_MIN_EXPANDED_HEIGHT_PX
);
const isCollapsed = useStore(collapsibleApi.$isCollapsed);
const galleryPanel = useGalleryPanel(tab);
const isCollapsed = useStore(galleryPanel.$isCollapsed);
const galleryView = useAppSelector(selectGalleryView);
const initialSearchTerm = useAppSelector(selectSearchTerm);
const searchDisclosure = useDisclosure(!!initialSearchTerm);
@@ -58,11 +44,11 @@ export const GalleryPanel = memo(() => {
const handleClickSearch = useCallback(() => {
onResetSearchTerm();
if (!searchDisclosure.isOpen && collapsibleApi.$isCollapsed.get()) {
collapsibleApi.expand();
if (!searchDisclosure.isOpen && galleryPanel.$isCollapsed.get()) {
galleryPanel.expand();
}
searchDisclosure.toggle();
}, [collapsibleApi, onResetSearchTerm, searchDisclosure]);
}, [galleryPanel, onResetSearchTerm, searchDisclosure]);
const selectedBoardId = useAppSelector(selectSelectedBoardId);
const boardName = useBoardName(selectedBoardId);
@@ -73,7 +59,7 @@ export const GalleryPanel = memo(() => {
<Button
size="sm"
variant="ghost"
onClick={collapsibleApi.toggle}
onClick={galleryPanel.toggle}
leftIcon={isCollapsed ? <PiCaretDownBold /> : <PiCaretUpBold />}
noOfLines={1}
>

View File

@@ -40,7 +40,7 @@ export const GallerySettingsPopover = memo(() => {
<PopoverBody>
<Flex direction="column" gap={2}>
<Text fontWeight="semibold" color="base.300">
Gallery Settings
{t('gallery.gallerySettings')}
</Text>
<Divider />

View File

@@ -0,0 +1,39 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback, useMemo } from 'react';
import { flushSync } from 'react-dom';
import { useTranslation } from 'react-i18next';
import { PiCrosshairBold } from 'react-icons/pi';
export const ImageMenuItemLocateInGalery = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const imageDTO = useImageDTOContext();
const activeTab = useAppSelector(selectActiveTab);
const galleryPanel = useGalleryPanel(activeTab);
const isGalleryImage = useMemo(() => {
return !imageDTO.is_intermediate;
}, [imageDTO]);
const onClick = useCallback(() => {
navigationApi.expandRightPanel();
galleryPanel.expand();
flushSync(() => {
dispatch(boardIdSelected({ boardId: imageDTO.board_id ?? 'none', selectedImageName: imageDTO.image_name }));
});
}, [dispatch, galleryPanel, imageDTO]);
return (
<MenuItem icon={<PiCrosshairBold />} onClickCapture={onClick} isDisabled={!isGalleryImage}>
{t('boards.locateInGalery')}
</MenuItem>
);
});
ImageMenuItemLocateInGalery.displayName = 'ImageMenuItemLocateInGalery';

View File

@@ -2,6 +2,7 @@ import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { useRecallAll } from 'features/gallery/hooks/useRecallAll';
import { useRecallCLIPSkip } from 'features/gallery/hooks/useRecallCLIPSkip';
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
@@ -17,7 +18,7 @@ import {
PiRulerBold,
} from 'react-icons/pi';
export const ImageMenuItemMetadataRecallActions = memo(() => {
export const ImageMenuItemMetadataRecallActionsCanvasGenerateTabs = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
@@ -28,6 +29,7 @@ export const ImageMenuItemMetadataRecallActions = memo(() => {
const recallPrompts = useRecallPrompts(imageDTO);
const recallSeed = useRecallSeed(imageDTO);
const recallDimensions = useRecallDimensions(imageDTO);
const recallCLIPSkip = useRecallCLIPSkip(imageDTO);
return (
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiArrowBendUpLeftBold />}>
@@ -55,10 +57,14 @@ export const ImageMenuItemMetadataRecallActions = memo(() => {
<MenuItem icon={<PiRulerBold />} onClick={recallDimensions.recall} isDisabled={!recallDimensions.isEnabled}>
{t('parameters.useSize')}
</MenuItem>
<MenuItem icon={<PiRulerBold />} onClick={recallCLIPSkip.recall} isDisabled={!recallCLIPSkip.isEnabled}>
{t('parameters.useClipSkip')}
</MenuItem>
</MenuList>
</Menu>
</MenuItem>
);
});
ImageMenuItemMetadataRecallActions.displayName = 'ImageMenuItemMetadataRecallActions';
ImageMenuItemMetadataRecallActionsCanvasGenerateTabs.displayName =
'ImageMenuItemMetadataRecallActionsCanvasGenerateTabs';

View File

@@ -0,0 +1,38 @@
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowBendUpLeftBold, PiPlantBold, PiQuotesBold } from 'react-icons/pi';
export const ImageMenuItemMetadataRecallActionsUpscaleTab = memo(() => {
const { t } = useTranslation();
const subMenu = useSubMenu();
const imageDTO = useImageDTOContext();
const recallPrompts = useRecallPrompts(imageDTO);
const recallSeed = useRecallSeed(imageDTO);
return (
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiArrowBendUpLeftBold />}>
<Menu {...subMenu.menuProps}>
<MenuButton {...subMenu.menuButtonProps}>
<SubMenuButtonContent label={t('parameters.recallMetadata')} />
</MenuButton>
<MenuList {...subMenu.menuListProps}>
<MenuItem icon={<PiQuotesBold />} onClick={recallPrompts.recall} isDisabled={!recallPrompts.isEnabled}>
{t('parameters.usePrompt')}
</MenuItem>
<MenuItem icon={<PiPlantBold />} onClick={recallSeed.recall} isDisabled={!recallSeed.isEnabled}>
{t('parameters.useSeed')}
</MenuItem>
</MenuList>
</Menu>
</MenuItem>
);
});
ImageMenuItemMetadataRecallActionsUpscaleTab.displayName = 'ImageMenuItemMetadataRecallActionsUpscaleTab';

View File

@@ -6,7 +6,8 @@ import { ImageMenuItemCopy } from 'features/gallery/components/ImageContextMenu/
import { ImageMenuItemDelete } from 'features/gallery/components/ImageContextMenu/ImageMenuItemDelete';
import { ImageMenuItemDownload } from 'features/gallery/components/ImageContextMenu/ImageMenuItemDownload';
import { ImageMenuItemLoadWorkflow } from 'features/gallery/components/ImageContextMenu/ImageMenuItemLoadWorkflow';
import { ImageMenuItemMetadataRecallActions } from 'features/gallery/components/ImageContextMenu/ImageMenuItemMetadataRecallActions';
import { ImageMenuItemLocateInGalery } from 'features/gallery/components/ImageContextMenu/ImageMenuItemLocateInGalery';
import { ImageMenuItemMetadataRecallActionsCanvasGenerateTabs } from 'features/gallery/components/ImageContextMenu/ImageMenuItemMetadataRecallActionsCanvasGenerateTabs';
import { ImageMenuItemNewCanvasFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewCanvasFromImageSubMenu';
import { ImageMenuItemNewLayerFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewLayerFromImageSubMenu';
import { ImageMenuItemOpenInNewTab } from 'features/gallery/components/ImageContextMenu/ImageMenuItemOpenInNewTab';
@@ -21,6 +22,7 @@ import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo } from 'react';
import type { ImageDTO } from 'services/api/types';
import { ImageMenuItemMetadataRecallActionsUpscaleTab } from './ImageMenuItemMetadataRecallActionsUpscaleTab';
import { ImageMenuItemUseAsPromptTemplate } from './ImageMenuItemUseAsPromptTemplate';
type SingleSelectionMenuItemsProps = {
@@ -42,7 +44,8 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
</IconMenuItemGroup>
<MenuDivider />
<ImageMenuItemLoadWorkflow />
{(tab === 'canvas' || tab === 'generate') && <ImageMenuItemMetadataRecallActions />}
{(tab === 'canvas' || tab === 'generate') && <ImageMenuItemMetadataRecallActionsCanvasGenerateTabs />}
{tab === 'upscaling' && <ImageMenuItemMetadataRecallActionsUpscaleTab />}
<MenuDivider />
<ImageMenuItemSendToUpscale />
<ImageMenuItemUseForPromptGeneration />
@@ -53,6 +56,11 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
<MenuDivider />
<ImageMenuItemChangeBoard />
<ImageMenuItemStarUnstar />
{(tab === 'canvas' || tab === 'generate' || tab === 'workflows' || tab === 'upscaling') &&
!imageDTO.is_intermediate && (
// Only render this button on tabs with a gallery.
<ImageMenuItemLocateInGalery />
)}
</ImageDTOContextProvider>
);
};

View File

@@ -118,6 +118,9 @@ const buildOnClick =
const start = Math.min(lastClickedIndex, currentClickedIndex);
const end = Math.max(lastClickedIndex, currentClickedIndex);
const imagesToSelect = imageNames.slice(start, end + 1);
if (currentClickedIndex < lastClickedIndex) {
imagesToSelect.reverse();
}
dispatch(selectionChanged(uniq(selection.concat(imagesToSelect))));
}
} else if (ctrlKey || metaKey) {

View File

@@ -33,8 +33,6 @@ const ImageMetadataActions = (props: Props) => {
<UnrecallableMetadataDatum metadata={metadata} handler={MetadataHandlers.GenerationMode} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.PositivePrompt} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.NegativePrompt} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.PositiveStylePrompt} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.NegativeStylePrompt} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.MainModel} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.VAEModel} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Width} />
@@ -42,6 +40,7 @@ const ImageMetadataActions = (props: Props) => {
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Seed} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Steps} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Scheduler} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.CLIPSkip} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.CFGScale} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.CFGRescaleMultiplier} />
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Guidance} />

View File

@@ -1,5 +1,5 @@
import { Button, Divider, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { DeleteImageButton } from 'features/deleteImageModal/components/DeleteImageButton';
import SingleSelectionMenuItems from 'features/gallery/components/ImageContextMenu/SingleSelectionMenuItems';
import { useDeleteImage } from 'features/gallery/hooks/useDeleteImage';
@@ -10,14 +10,19 @@ import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions'
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
import { PostProcessingPopover } from 'features/parameters/components/PostProcessing/PostProcessingPopover';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo } from 'react';
import { memo, useCallback, useMemo } from 'react';
import { flushSync } from 'react-dom';
import { useTranslation } from 'react-i18next';
import {
PiArrowsCounterClockwiseBold,
PiAsteriskBold,
PiCrosshairBold,
PiDotsThreeOutlineFill,
PiFlowArrowBold,
PiPencilBold,
@@ -30,7 +35,25 @@ import type { ImageDTO } from 'services/api/types';
export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) => {
const { t } = useTranslation();
const tab = useAppSelector(selectActiveTab);
const dispatch = useAppDispatch();
const activeTab = useAppSelector(selectActiveTab);
const galleryPanel = useGalleryPanel(activeTab);
const isGalleryImage = useMemo(() => {
return !imageDTO.is_intermediate;
}, [imageDTO]);
const locateInGallery = useCallback(() => {
navigationApi.expandRightPanel();
galleryPanel.expand();
flushSync(() => {
dispatch(boardIdSelected({ boardId: imageDTO.board_id ?? 'none', selectedImageName: imageDTO.image_name }));
});
}, [dispatch, galleryPanel, imageDTO]);
const isCanvasOrGenerateTab = tab === 'canvas' || tab === 'generate';
const isCanvasOrGenerateOrUpscalingTab = tab === 'canvas' || tab === 'generate' || tab === 'upscaling';
const doesTabHaveGallery = tab === 'canvas' || tab === 'generate' || tab === 'workflows' || tab === 'upscaling';
const isUpscalingEnabled = useFeatureStatus('upscaling');
@@ -74,6 +97,17 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
<Divider orientation="vertical" h={8} mx={2} />
{doesTabHaveGallery && isGalleryImage && (
<IconButton
icon={<PiCrosshairBold />}
aria-label={t('boards.locateInGalery')}
tooltip={t('boards.locateInGalery')}
onClick={locateInGallery}
variant="link"
size="sm"
alignSelf="stretch"
/>
)}
<IconButton
icon={<PiFlowArrowBold />}
tooltip={`${t('nodes.loadWorkflow')} (W)`}
@@ -94,7 +128,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
onClick={recallRemix.recall}
/>
)}
{isCanvasOrGenerateTab && (
{isCanvasOrGenerateOrUpscalingTab && (
<IconButton
icon={<PiQuotesBold />}
tooltip={`${t('parameters.usePrompt')} (P)`}
@@ -105,7 +139,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
onClick={recallPrompts.recall}
/>
)}
{isCanvasOrGenerateTab && (
{isCanvasOrGenerateOrUpscalingTab && (
<IconButton
icon={<PiPlantBold />}
tooltip={`${t('parameters.useSeed')} (S)`}

View File

@@ -83,7 +83,15 @@ export const ImageViewerContextProvider = memo((props: PropsWithChildren) => {
// switch to the final image automatically. In this case, we clear the progress image immediately.
//
// We also clear the progress image if the queue item is canceled or failed, as there is no final image to show.
if (data.status === 'canceled' || data.status === 'failed' || !autoSwitch) {
if (
data.status === 'canceled' ||
data.status === 'failed' ||
!autoSwitch ||
// When the origin is 'canvas' and destination is 'canvas' (without a ':<session id>' suffix), that means the
// image is going to be added to the staging area. In this case, we need to clear the progress image else it
// will be stuck on the viewer.
(data.origin === 'canvas' && data.destination !== 'canvas')
) {
$progressEvent.set(null);
$progressImage.set(null);
}

View File

@@ -9,6 +9,7 @@ import {
selectGalleryImageMinimumWidth,
selectImageToCompare,
selectLastSelectedImage,
selectSelection,
selectSelectionCount,
} from 'features/gallery/store/gallerySelectors';
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
@@ -138,6 +139,7 @@ const scrollIntoView = (
) => {
if (range.endIndex === 0) {
// No range is rendered; no need to scroll to anything.
log.trace('Not scrolling into view: Range endIdex is 0');
return;
}
@@ -145,6 +147,7 @@ const scrollIntoView = (
if (targetIndex === -1) {
// The image isn't in the currently rendered list.
log.trace('Not scrolling into view: targetIndex is -1');
return;
}
@@ -154,12 +157,28 @@ const scrollIntoView = (
if (!targetItem) {
if (targetIndex > range.endIndex) {
log.trace(
{
index: targetIndex,
behavior: 'auto',
align: 'start',
},
'Scrolling into view: not in DOM'
);
virtuosoGridHandle.scrollToIndex({
index: targetIndex,
behavior: 'auto',
align: 'start',
});
} else if (targetIndex < range.startIndex) {
log.trace(
{
index: targetIndex,
behavior: 'auto',
align: 'end',
},
'Scrolling into view: not in DOM'
);
virtuosoGridHandle.scrollToIndex({
index: targetIndex,
behavior: 'auto',
@@ -180,12 +199,28 @@ const scrollIntoView = (
const rootRect = rootEl.getBoundingClientRect();
if (itemRect.top < rootRect.top) {
log.trace(
{
index: targetIndex,
behavior: 'auto',
align: 'start',
},
'Scrolling into view: in overscan'
);
virtuosoGridHandle.scrollToIndex({
index: targetIndex,
behavior: 'auto',
align: 'start',
});
} else if (itemRect.bottom > rootRect.bottom) {
log.trace(
{
index: targetIndex,
behavior: 'auto',
align: 'end',
},
'Scrolling into view: in overscan'
);
virtuosoGridHandle.scrollToIndex({
index: targetIndex,
behavior: 'auto',
@@ -193,6 +228,7 @@ const scrollIntoView = (
});
} else {
// Image is already in view
log.debug('Not scrolling into view: Image is already in view');
}
return;
@@ -392,9 +428,10 @@ const useKeepSelectedImageInView = (
rootRef: React.RefObject<HTMLDivElement>,
rangeRef: MutableRefObject<ListRange>
) => {
const targetImageName = useAppSelector(selectLastSelectedImage);
const selection = useAppSelector(selectSelection);
useEffect(() => {
const targetImageName = selection.at(-1);
const virtuosoGridHandle = virtuosoRef.current;
const rootEl = rootRef.current;
const range = rangeRef.current;
@@ -402,8 +439,11 @@ const useKeepSelectedImageInView = (
if (!virtuosoGridHandle || !rootEl || !targetImageName || !imageNames || imageNames.length === 0) {
return;
}
scrollIntoView(targetImageName, imageNames, rootEl, virtuosoGridHandle, range);
}, [targetImageName, imageNames, rangeRef, rootRef, virtuosoRef]);
setTimeout(() => {
scrollIntoView(targetImageName, imageNames, rootEl, virtuosoGridHandle, range);
}, 0);
}, [imageNames, rangeRef, rootRef, virtuosoRef, selection]);
};
/**

View File

@@ -0,0 +1,72 @@
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
import { selectHasModelCLIPSkip } from 'features/controlLayers/store/paramsSlice';
import { MetadataHandlers, MetadataUtils } from 'features/metadata/parsing';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { TabName } from 'features/ui/store/uiTypes';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
import type { ImageDTO } from 'services/api/types';
const ALLOWED_TABS: TabName[] = ['canvas', 'generate', 'upscaling'];
export const useRecallCLIPSkip = (imageDTO: ImageDTO) => {
const store = useAppStore();
const hasModelCLIPSkip = useAppSelector(selectHasModelCLIPSkip);
const tab = useAppSelector(selectActiveTab);
const [hasCLIPSkip, setHasCLIPSkip] = useState(false);
const { metadata, isLoading } = useDebouncedMetadata(imageDTO.image_name);
useEffect(() => {
const parse = async () => {
try {
await MetadataHandlers.CLIPSkip.parse(metadata, store);
setHasCLIPSkip(true);
} catch {
setHasCLIPSkip(false);
}
};
if (!hasModelCLIPSkip) {
setHasCLIPSkip(false);
return;
}
parse();
}, [metadata, store, hasModelCLIPSkip]);
const isEnabled = useMemo(() => {
if (isLoading) {
return false;
}
if (!ALLOWED_TABS.includes(tab)) {
return false;
}
if (!metadata) {
return false;
}
if (!hasCLIPSkip) {
return false;
}
return true;
}, [hasCLIPSkip, isLoading, metadata, tab]);
const recall = useCallback(() => {
if (!metadata) {
return;
}
if (!isEnabled) {
return;
}
MetadataUtils.recallByHandler({ metadata, handler: MetadataHandlers.CLIPSkip, store });
}, [metadata, isEnabled, store]);
return {
recall,
isEnabled,
};
};

View File

@@ -1,12 +1,15 @@
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
import { MetadataHandlers, MetadataUtils } from 'features/metadata/parsing';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { TabName } from 'features/ui/store/uiTypes';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
import type { ImageDTO } from 'services/api/types';
import { useClearStylePresetWithToast } from './useClearStylePresetWithToast';
const ALLOWED_TABS: TabName[] = ['canvas', 'generate', 'upscaling'];
export const useRecallPrompts = (imageDTO: ImageDTO) => {
const store = useAppStore();
const tab = useAppSelector(selectActiveTab);
@@ -19,12 +22,7 @@ export const useRecallPrompts = (imageDTO: ImageDTO) => {
const parse = async () => {
try {
const result = await MetadataUtils.hasMetadataByHandlers({
handlers: [
MetadataHandlers.PositivePrompt,
MetadataHandlers.NegativePrompt,
MetadataHandlers.PositiveStylePrompt,
MetadataHandlers.NegativeStylePrompt,
],
handlers: [MetadataHandlers.PositivePrompt, MetadataHandlers.NegativePrompt],
metadata,
store,
require: 'some',
@@ -43,7 +41,7 @@ export const useRecallPrompts = (imageDTO: ImageDTO) => {
return false;
}
if (tab !== 'canvas' && tab !== 'generate') {
if (!ALLOWED_TABS.includes(tab)) {
return false;
}

View File

@@ -1,10 +1,13 @@
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
import { MetadataHandlers, MetadataUtils } from 'features/metadata/parsing';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import type { TabName } from 'features/ui/store/uiTypes';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
import type { ImageDTO } from 'services/api/types';
const ALLOWED_TABS: TabName[] = ['canvas', 'generate', 'upscaling'];
export const useRecallSeed = (imageDTO: ImageDTO) => {
const store = useAppStore();
const tab = useAppSelector(selectActiveTab);
@@ -30,7 +33,7 @@ export const useRecallSeed = (imageDTO: ImageDTO) => {
return false;
}
if (tab !== 'canvas' && tab !== 'generate') {
if (!ALLOWED_TABS.includes(tab)) {
return false;
}

View File

@@ -1,4 +1,3 @@
import { objectEquals } from '@observ33r/object-equals';
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import type { RootState } from 'app/store/store';
@@ -43,54 +42,16 @@ const slice = createSlice({
initialState: getInitialState(),
reducers: {
imageSelected: (state, action: PayloadAction<string | null>) => {
// Let's be efficient here and not update the selection unless it has actually changed. This helps to prevent
// unnecessary re-renders of the gallery.
const selectedImageName = action.payload;
// If we got `null`, clear the selection
if (!selectedImageName) {
// But only if we have images selected
if (state.selection.length > 0) {
state.selection = [];
}
return;
}
// If we have multiple images selected, clear the selection and select the new image
if (state.selection.length !== 1) {
state.selection = [];
} else {
state.selection = [selectedImageName];
return;
}
// If the selected image is different from the current selection, clear the selection and select the new image
if (state.selection[0] !== selectedImageName) {
state.selection = [selectedImageName];
return;
}
// Else we have the same image selected, do nothing
},
selectionChanged: (state, action: PayloadAction<string[]>) => {
// Let's be efficient here and not update the selection unless it has actually changed. This helps to prevent
// unnecessary re-renders of the gallery.
// Remove duplicates from the selection
const newSelection = uniq(action.payload);
// If the new selection has a different length, update the selection
if (newSelection.length !== state.selection.length) {
state.selection = newSelection;
return;
}
// If the new selection is different, update the selection
if (!objectEquals(newSelection, state.selection)) {
state.selection = newSelection;
return;
}
// Else we have the same selection, do nothing
state.selection = uniq(action.payload);
},
imageToCompareChanged: (state, action: PayloadAction<string | null>) => {
state.imageToCompare = action.payload;

View File

@@ -9,14 +9,13 @@ import { bboxHeightChanged, bboxWidthChanged, canvasMetadataRecalled } from 'fea
import { loraAllDeleted, loraRecalled } from 'features/controlLayers/store/lorasSlice';
import {
heightChanged,
negativePrompt2Changed,
negativePromptChanged,
positivePrompt2Changed,
positivePromptChanged,
refinerModelChanged,
selectBase,
setCfgRescaleMultiplier,
setCfgScale,
setClipSkip,
setGuidance,
setImg2imgStrength,
setRefinerCFGScale,
@@ -30,7 +29,6 @@ import {
setSeamlessYAxis,
setSeed,
setSteps,
shouldConcatPromptsChanged,
vaeSelected,
widthChanged,
} from 'features/controlLayers/store/paramsSlice';
@@ -44,12 +42,12 @@ import { modelSelected } from 'features/parameters/store/actions';
import type {
ParameterCFGRescaleMultiplier,
ParameterCFGScale,
ParameterCLIPSkip,
ParameterGuidance,
ParameterHeight,
ParameterModel,
ParameterNegativePrompt,
ParameterPositivePrompt,
ParameterPositiveStylePromptSDXL,
ParameterScheduler,
ParameterSDXLRefinerModel,
ParameterSDXLRefinerNegativeAestheticScore,
@@ -67,12 +65,11 @@ import {
zLoRAWeight,
zParameterCFGRescaleMultiplier,
zParameterCFGScale,
zParameterCLIPSkip,
zParameterGuidance,
zParameterImageDimension,
zParameterNegativePrompt,
zParameterNegativeStylePromptSDXL,
zParameterPositivePrompt,
zParameterPositiveStylePromptSDXL,
zParameterScheduler,
zParameterSDXLRefinerNegativeAestheticScore,
zParameterSDXLRefinerPositiveAestheticScore,
@@ -289,46 +286,6 @@ const NegativePrompt: SingleMetadataHandler<ParameterNegativePrompt> = {
};
//#endregion Negative Prompt
//#region SDXL Positive Style Prompt
const PositiveStylePrompt: SingleMetadataHandler<ParameterPositiveStylePromptSDXL> = {
[SingleMetadataKey]: true,
type: 'PositiveStylePrompt',
parse: (metadata, _store) => {
const raw = getProperty(metadata, 'positive_style_prompt');
const parsed = zParameterPositiveStylePromptSDXL.parse(raw);
return Promise.resolve(parsed);
},
recall: (value, store) => {
store.dispatch(positivePrompt2Changed(value));
},
i18nKey: 'sdxl.posStylePrompt',
LabelComponent: MetadataLabel,
ValueComponent: ({ value }: SingleMetadataValueProps<ParameterPositiveStylePromptSDXL>) => (
<MetadataPrimitiveValue value={value} />
),
};
//#endregion SDXL Positive Style Prompt
//#region SDXL Negative Style Prompt
const NegativeStylePrompt: SingleMetadataHandler<ParameterPositiveStylePromptSDXL> = {
[SingleMetadataKey]: true,
type: 'NegativeStylePrompt',
parse: (metadata, _store) => {
const raw = getProperty(metadata, 'negative_style_prompt');
const parsed = zParameterNegativeStylePromptSDXL.parse(raw);
return Promise.resolve(parsed);
},
recall: (value, store) => {
store.dispatch(negativePrompt2Changed(value));
},
i18nKey: 'sdxl.negStylePrompt',
LabelComponent: MetadataLabel,
ValueComponent: ({ value }: SingleMetadataValueProps<ParameterPositiveStylePromptSDXL>) => (
<MetadataPrimitiveValue value={value} />
),
};
//#endregion SDXL Negative Style Prompt
//#region CFG Scale
const CFGScale: SingleMetadataHandler<ParameterCFGScale> = {
[SingleMetadataKey]: true,
@@ -367,6 +324,24 @@ const CFGRescaleMultiplier: SingleMetadataHandler<ParameterCFGRescaleMultiplier>
};
//#endregion CFG Rescale Multiplier
//#region CLIP Skip
const CLIPSkip: SingleMetadataHandler<ParameterCLIPSkip> = {
[SingleMetadataKey]: true,
type: 'CLIPSkip',
parse: (metadata, _store) => {
const raw = getProperty(metadata, 'clip_skip');
const parsed = zParameterCLIPSkip.parse(raw);
return Promise.resolve(parsed);
},
recall: (value, store) => {
store.dispatch(setClipSkip(value));
},
i18nKey: 'metadata.clipSkip',
LabelComponent: MetadataLabel,
ValueComponent: ({ value }: SingleMetadataValueProps<ParameterCLIPSkip>) => <MetadataPrimitiveValue value={value} />,
};
//#endregion CLIP Skip
//#region Guidance
const Guidance: SingleMetadataHandler<ParameterGuidance> = {
[SingleMetadataKey]: true,
@@ -927,10 +902,9 @@ export const MetadataHandlers = {
GenerationMode,
PositivePrompt,
NegativePrompt,
PositiveStylePrompt,
NegativeStylePrompt,
CFGScale,
CFGRescaleMultiplier,
CLIPSkip,
Guidance,
Scheduler,
Width,
@@ -1052,26 +1026,6 @@ const recallByHandlers = async (arg: {
}
}
// We may need to update the prompt concat flag based on the recalled prompts
const positivePrompt = recalled.get(MetadataHandlers.PositivePrompt);
const negativePrompt = recalled.get(MetadataHandlers.NegativePrompt);
const positiveStylePrompt = recalled.get(MetadataHandlers.PositiveStylePrompt);
const negativeStylePrompt = recalled.get(MetadataHandlers.NegativeStylePrompt);
// The values will be undefined if the handler was not recalled
if (
positivePrompt !== undefined ||
negativePrompt !== undefined ||
positiveStylePrompt !== undefined ||
negativeStylePrompt !== undefined
) {
const concat =
(Boolean(positiveStylePrompt) && positiveStylePrompt === positivePrompt) ||
(Boolean(negativeStylePrompt) && negativeStylePrompt === negativePrompt);
store.dispatch(shouldConcatPromptsChanged(concat));
}
if (!silent) {
if (recalled.size > 0) {
toast({
@@ -1094,12 +1048,7 @@ const recallByHandlers = async (arg: {
const recallPrompts = async (metadata: unknown, store: AppStore) => {
const recalled = await recallByHandlers({
metadata,
handlers: [
MetadataHandlers.PositivePrompt,
MetadataHandlers.NegativePrompt,
MetadataHandlers.PositiveStylePrompt,
MetadataHandlers.NegativeStylePrompt,
],
handlers: [MetadataHandlers.PositivePrompt, MetadataHandlers.NegativePrompt],
store,
silent: true,
});

View File

@@ -2,7 +2,7 @@ import { Button, Flex, Grid, Heading, Text } from '@invoke-ai/ui-library';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
import { map } from 'es-toolkit/compat';
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
import { StarterBundleButton } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterBundle';
import { StarterBundleButton } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterBundleButton';
import { StarterBundleTooltipContentCompact } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterBundleTooltipContentCompact';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';

View File

@@ -1,21 +0,0 @@
import type { ButtonProps } from '@invoke-ai/ui-library';
import { Button } from '@invoke-ai/ui-library';
import { useStarterBundleInstall } from 'features/modelManagerV2/hooks/useStarterBundleInstall';
import { useStarterBundleInstallStatus } from 'features/modelManagerV2/hooks/useStarterBundleInstallStatus';
import { useCallback } from 'react';
import type { S } from 'services/api/types';
export const StarterBundleButton = ({ bundle, ...rest }: { bundle: S['StarterModelBundle'] } & ButtonProps) => {
const { installBundle } = useStarterBundleInstall();
const { install } = useStarterBundleInstallStatus(bundle);
const handleClickBundle = useCallback(() => {
installBundle(bundle);
}, [installBundle, bundle]);
return (
<Button onClick={handleClickBundle} isDisabled={install.length === 0} {...rest}>
{bundle.name}
</Button>
);
};

View File

@@ -0,0 +1,61 @@
import type { ButtonProps } from '@invoke-ai/ui-library';
import {
Button,
ConfirmationAlertDialog,
Flex,
ListItem,
Text,
UnorderedList,
useDisclosure,
} from '@invoke-ai/ui-library';
import { useStarterBundleInstall } from 'features/modelManagerV2/hooks/useStarterBundleInstall';
import { useStarterBundleInstallStatus } from 'features/modelManagerV2/hooks/useStarterBundleInstallStatus';
import { t } from 'i18next';
import type { MouseEvent } from 'react';
import { useCallback } from 'react';
import type { S } from 'services/api/types';
export const StarterBundleButton = ({ bundle, ...rest }: { bundle: S['StarterModelBundle'] } & ButtonProps) => {
const { installBundle } = useStarterBundleInstall();
const { install } = useStarterBundleInstallStatus(bundle);
const { isOpen, onOpen, onClose } = useDisclosure();
const onClickBundle = useCallback(
(e: MouseEvent<HTMLButtonElement>) => {
e.stopPropagation();
onOpen();
},
[onOpen]
);
const handleInstallBundle = useCallback(() => {
installBundle(bundle);
}, [installBundle, bundle]);
return (
<>
<Button onClick={onClickBundle} isDisabled={install.length === 0} {...rest}>
{bundle.name}
</Button>
<ConfirmationAlertDialog
isOpen={isOpen}
onClose={onClose}
title={t('modelManager.installBundle')}
acceptCallback={handleInstallBundle}
acceptButtonText={t('modelManager.install')}
useInert={false}
>
<Flex rowGap={4} flexDirection="column">
<Text fontWeight="bold">{t('modelManager.installBundleMsg1', { bundleName: bundle.name })}</Text>
<Text>{t('modelManager.installBundleMsg2', { count: install.length })}</Text>
<UnorderedList>
{install.map((model, index) => (
<ListItem key={index} wordBreak="break-all">
<Text>{model.config.name}</Text>
</ListItem>
))}
</UnorderedList>
</Flex>
</ConfirmationAlertDialog>
</>
);
};

View File

@@ -7,7 +7,7 @@ import { useTranslation } from 'react-i18next';
import { PiInfoBold, PiXBold } from 'react-icons/pi';
import type { GetStarterModelsResponse } from 'services/api/endpoints/models';
import { StarterBundleButton } from './StarterBundle';
import { StarterBundleButton } from './StarterBundleButton';
import { StarterBundleTooltipContent } from './StarterBundleTooltipContent';
import { StarterModelsResultItem } from './StarterModelsResultItem';

View File

@@ -20,6 +20,7 @@ export const BASE_COLOR_MAP: Record<BaseModelType, string> = {
imagen4: 'pink',
'chatgpt-4o': 'pink',
'flux-kontext': 'pink',
'gemini-2.5': 'pink',
};
const ModelBaseBadge = ({ base }: Props) => {

View File

@@ -1,7 +1,7 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import { Flex } from '@invoke-ai/ui-library';
import InvocationNodeTitle from 'features/nodes/components/flow/nodes/common/InvocationNodeTitle';
import NodeCollapseButton from 'features/nodes/components/flow/nodes/common/NodeCollapseButton';
import NodeTitle from 'features/nodes/components/flow/nodes/common/NodeTitle';
import InvocationNodeClassificationIcon from 'features/nodes/components/flow/nodes/Invocation/InvocationNodeClassificationIcon';
import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid';
import { memo } from 'react';
@@ -35,7 +35,7 @@ const InvocationNodeHeader = ({ nodeId, isOpen }: Props) => {
<Flex sx={sx} data-is-open={isOpen} data-is-invalid={isInvalid}>
<NodeCollapseButton nodeId={nodeId} isOpen={isOpen} />
<InvocationNodeClassificationIcon nodeId={nodeId} />
<NodeTitle nodeId={nodeId} />
<InvocationNodeTitle nodeId={nodeId} />
<Flex alignItems="center">
<InvocationNodeStatusIndicator nodeId={nodeId} />
<InvocationNodeInfoIcon nodeId={nodeId} />

View File

@@ -1,35 +1,43 @@
import { CompositeNumberInput } from '@invoke-ai/ui-library';
import { Button, CompositeNumberInput } from '@invoke-ai/ui-library';
import { useFloatField } from 'features/nodes/components/flow/nodes/Invocation/fields/FloatField/useFloatField';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldFloatSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShuffleBold } from 'react-icons/pi';
export const FloatFieldInput = memo(
(
props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate, { settings?: NodeFieldFloatSettings }>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useFloatField(
nodeId,
field.name,
fieldTemplate,
settings
);
const { defaultValue, onChange, min, max, step, fineStep, constrainValue, showShuffle, randomizeValue } =
useFloatField(nodeId, field.name, fieldTemplate, settings);
const { t } = useTranslation();
return (
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
/>
<>
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
constrainValue={constrainValue}
/>
{showShuffle && (
<Button size="sm" isDisabled={false} onClick={randomizeValue} leftIcon={<PiShuffleBold />} flexShrink={0}>
{t('workflows.builder.shuffle')}
</Button>
)}
</>
);
}
);

View File

@@ -1,22 +1,22 @@
import { CompositeNumberInput, CompositeSlider } from '@invoke-ai/ui-library';
import { Button, CompositeNumberInput, CompositeSlider } from '@invoke-ai/ui-library';
import { useFloatField } from 'features/nodes/components/flow/nodes/Invocation/fields/FloatField/useFloatField';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldFloatSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShuffleBold } from 'react-icons/pi';
export const FloatFieldInputAndSlider = memo(
(
props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate, { settings?: NodeFieldFloatSettings }>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useFloatField(
nodeId,
field.name,
fieldTemplate,
settings
);
const { defaultValue, onChange, min, max, step, fineStep, constrainValue, showShuffle, randomizeValue } =
useFloatField(nodeId, field.name, fieldTemplate, settings);
const { t } = useTranslation();
return (
<>
@@ -43,7 +43,13 @@ export const FloatFieldInputAndSlider = memo(
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
constrainValue={constrainValue}
/>
{showShuffle && (
<Button size="sm" isDisabled={false} onClick={randomizeValue} leftIcon={<PiShuffleBold />} flexShrink={0}>
{t('workflows.builder.shuffle')}
</Button>
)}
</>
);
}

View File

@@ -1,37 +1,48 @@
import { CompositeSlider } from '@invoke-ai/ui-library';
import { Button, CompositeSlider } from '@invoke-ai/ui-library';
import { useFloatField } from 'features/nodes/components/flow/nodes/Invocation/fields/FloatField/useFloatField';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FloatFieldInputInstance, FloatFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldFloatSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShuffleBold } from 'react-icons/pi';
export const FloatFieldSlider = memo(
(
props: FieldComponentProps<FloatFieldInputInstance, FloatFieldInputTemplate, { settings?: NodeFieldFloatSettings }>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useFloatField(
const { defaultValue, onChange, min, max, step, fineStep, showShuffle, randomizeValue } = useFloatField(
nodeId,
field.name,
fieldTemplate,
settings
);
const { t } = useTranslation();
return (
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
marks
withThumbTooltip
flex="1 1 0"
/>
<>
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
marks
withThumbTooltip
flex="1 1 0"
/>
{showShuffle && (
<Button size="sm" isDisabled={false} onClick={randomizeValue} leftIcon={<PiShuffleBold />} flexShrink={0}>
{t('workflows.builder.shuffle')}
</Button>
)}
</>
);
}
);

View File

@@ -1,5 +1,6 @@
import { NUMPY_RAND_MAX } from 'app/constants';
import { useAppDispatch } from 'app/store/storeHooks';
import randomFloat from 'common/util/randomFloat';
import { roundDownToMultiple, roundUpToMultiple } from 'common/util/roundDownToMultiple';
import { isNil } from 'es-toolkit/compat';
import { fieldFloatValueChanged } from 'features/nodes/store/nodesSlice';
@@ -11,9 +12,9 @@ export const useFloatField = (
nodeId: string,
fieldName: string,
fieldTemplate: FloatFieldInputTemplate,
overrides: { min?: number; max?: number; step?: number } = {}
overrides: { showShuffle?: boolean; min?: number; max?: number; step?: number } = {}
) => {
const { min: overrideMin, max: overrideMax, step: overrideStep } = overrides;
const { showShuffle, min: overrideMin, max: overrideMax, step: overrideStep } = overrides;
const dispatch = useAppDispatch();
const step = useMemo(() => {
@@ -36,6 +37,13 @@ export const useFloatField = (
return fieldTemplate.multipleOf;
}, [fieldTemplate.multipleOf, overrideStep]);
const baseStep = useMemo(() => {
if (isNil(fieldTemplate.multipleOf)) {
return undefined;
}
return fieldTemplate.multipleOf;
}, [fieldTemplate.multipleOf]);
const min = useMemo(() => {
let min = -NUMPY_RAND_MAX;
@@ -66,8 +74,8 @@ export const useFloatField = (
const constrainValue = useCallback(
(v: number) =>
constrainNumber(v, { min, max, step: step }, { min: overrideMin, max: overrideMax, step: overrideStep }),
[max, min, overrideMax, overrideMin, overrideStep, step]
constrainNumber(v, { min, max, step: baseStep }, { min: overrideMin, max: overrideMax, step: overrideStep }),
[max, min, overrideMax, overrideMin, overrideStep, baseStep]
);
const onChange = useCallback(
@@ -77,6 +85,11 @@ export const useFloatField = (
[dispatch, fieldName, nodeId]
);
const randomizeValue = useCallback(() => {
const value = Number((Math.round(randomFloat(min, max) / step) * step).toFixed(10));
dispatch(fieldFloatValueChanged({ nodeId, fieldName, value }));
}, [dispatch, fieldName, nodeId, min, max, step]);
return {
defaultValue: fieldTemplate.default,
onChange,
@@ -85,5 +98,7 @@ export const useFloatField = (
step,
fineStep,
constrainValue,
showShuffle,
randomizeValue,
};
};

View File

@@ -0,0 +1,41 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAddRemoveFormElement } from 'features/nodes/components/sidePanel/builder/use-add-remove-form-element';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiMinusBold, PiPlusBold } from 'react-icons/pi';
type Props = {
nodeId: string;
fieldName: string;
};
export const InputFieldAddRemoveFormRoot = memo(({ nodeId, fieldName }: Props) => {
const { t } = useTranslation();
const { isAddedToRoot, addNodeFieldToRoot, removeNodeFieldFromRoot } = useAddRemoveFormElement(nodeId, fieldName);
const description = useMemo(() => {
return isAddedToRoot ? t('workflows.builder.removeFromForm') : t('workflows.builder.addToForm');
}, [isAddedToRoot, t]);
const icon = useMemo(() => {
return isAddedToRoot ? <PiMinusBold /> : <PiPlusBold />;
}, [isAddedToRoot]);
const onClick = useCallback(() => {
return isAddedToRoot ? removeNodeFieldFromRoot() : addNodeFieldToRoot();
}, [isAddedToRoot, addNodeFieldToRoot, removeNodeFieldFromRoot]);
return (
<IconButton
variant="ghost"
tooltip={description}
aria-label={description}
icon={icon}
pointerEvents="auto"
size="xs"
onClick={onClick}
/>
);
});
InputFieldAddRemoveFormRoot.displayName = 'InputFieldAddRemoveFormRoot';

View File

@@ -1,29 +0,0 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAddNodeFieldToRoot } from 'features/nodes/components/sidePanel/builder/use-add-node-field-to-root';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
type Props = {
nodeId: string;
fieldName: string;
};
export const InputFieldAddToFormRoot = memo(({ nodeId, fieldName }: Props) => {
const { t } = useTranslation();
const addToRoot = useAddNodeFieldToRoot(nodeId, fieldName);
return (
<IconButton
variant="ghost"
tooltip={t('workflows.builder.addToForm')}
aria-label={t('workflows.builder.addToForm')}
icon={<PiPlusBold />}
pointerEvents="auto"
size="xs"
onClick={addToRoot}
/>
);
});
InputFieldAddToFormRoot.displayName = 'InputFieldAddToFormRoot';

View File

@@ -1,6 +1,5 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import { Flex, Spacer } from '@invoke-ai/ui-library';
import { InputFieldAddToFormRoot } from 'features/nodes/components/flow/nodes/Invocation/fields/InputFieldAddToFormRoot';
import { InputFieldDescriptionPopover } from 'features/nodes/components/flow/nodes/Invocation/fields/InputFieldDescriptionPopover';
import { InputFieldHandle } from 'features/nodes/components/flow/nodes/Invocation/fields/InputFieldHandle';
import { InputFieldResetToDefaultValueIconButton } from 'features/nodes/components/flow/nodes/Invocation/fields/InputFieldResetToDefaultValueIconButton';
@@ -12,6 +11,7 @@ import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { FieldInputTemplate } from 'features/nodes/types/field';
import { memo, useRef } from 'react';
import { InputFieldAddRemoveFormRoot } from './InputFieldAddRemoveFormRoot';
import { InputFieldRenderer } from './InputFieldRenderer';
import { InputFieldTitle } from './InputFieldTitle';
import { InputFieldWrapper } from './InputFieldWrapper';
@@ -113,7 +113,7 @@ const DirectField = memo(({ nodeId, fieldName, isInvalid, isConnected, fieldTemp
<Flex className="direct-field-action-buttons">
<InputFieldDescriptionPopover nodeId={nodeId} fieldName={fieldName} />
<InputFieldResetToDefaultValueIconButton nodeId={nodeId} fieldName={fieldName} />
<InputFieldAddToFormRoot nodeId={nodeId} fieldName={fieldName} />
<InputFieldAddRemoveFormRoot nodeId={nodeId} fieldName={fieldName} />
</Flex>
</Flex>
<InputFieldRenderer nodeId={nodeId} fieldName={fieldName} />

View File

@@ -30,12 +30,12 @@ const labelSx: SystemStyleObject = {
_hover: {
fontWeight: 'semibold !important',
},
'&[data-is-invalid="true"]': {
color: 'error.300',
},
'&[data-is-added-to-form="true"]': {
color: 'blue.300',
},
'&[data-is-invalid="true"]': {
color: 'error.300',
},
'&[data-is-disabled="true"]': {
opacity: 0.5,
},
@@ -106,7 +106,7 @@ export const InputFieldTitle = memo((props: Props) => {
onDoubleClick={onDoubleClick}
>
{editable.value}
{isAddedToForm && <Icon as={PiLinkBold} color="blue.200" ml={1} />}
{isAddedToForm && <Icon as={PiLinkBold} color={isInvalid ? 'error.300' : 'blue.200'} ml={1} />}
</Text>
</Tooltip>
);

View File

@@ -1,10 +1,12 @@
import { CompositeNumberInput } from '@invoke-ai/ui-library';
import { Button, CompositeNumberInput } from '@invoke-ai/ui-library';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { useIntegerField } from 'features/nodes/components/flow/nodes/Invocation/fields/IntegerField/useIntegerField';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { IntegerFieldInputInstance, IntegerFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldIntegerSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShuffleBold } from 'react-icons/pi';
export const IntegerFieldInput = memo(
(
@@ -15,26 +17,31 @@ export const IntegerFieldInput = memo(
>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep, constrainValue } = useIntegerField(
nodeId,
field.name,
fieldTemplate,
settings
);
const { defaultValue, onChange, min, max, step, fineStep, constrainValue, showShuffle, randomizeValue } =
useIntegerField(nodeId, field.name, fieldTemplate, settings);
const { t } = useTranslation();
return (
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
constrainValue={constrainValue}
/>
<>
<CompositeNumberInput
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
constrainValue={constrainValue}
/>
{showShuffle && (
<Button size="sm" isDisabled={false} onClick={randomizeValue} leftIcon={<PiShuffleBold />} flexShrink={0}>
{t('workflows.builder.shuffle')}
</Button>
)}
</>
);
}
);

View File

@@ -1,10 +1,12 @@
import { CompositeNumberInput, CompositeSlider } from '@invoke-ai/ui-library';
import { Button, CompositeNumberInput, CompositeSlider } from '@invoke-ai/ui-library';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { useIntegerField } from 'features/nodes/components/flow/nodes/Invocation/fields/IntegerField/useIntegerField';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { IntegerFieldInputInstance, IntegerFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldIntegerSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShuffleBold } from 'react-icons/pi';
export const IntegerFieldInputAndSlider = memo(
(
@@ -15,12 +17,10 @@ export const IntegerFieldInputAndSlider = memo(
>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useIntegerField(
nodeId,
field.name,
fieldTemplate,
settings
);
const { defaultValue, onChange, min, max, step, fineStep, constrainValue, showShuffle, randomizeValue } =
useIntegerField(nodeId, field.name, fieldTemplate, settings);
const { t } = useTranslation();
return (
<>
@@ -47,7 +47,13 @@ export const IntegerFieldInputAndSlider = memo(
fineStep={fineStep}
className={NO_DRAG_CLASS}
flex="1 1 0"
constrainValue={constrainValue}
/>
{showShuffle && (
<Button size="sm" isDisabled={false} onClick={randomizeValue} leftIcon={<PiShuffleBold />} flexShrink={0}>
{t('workflows.builder.shuffle')}
</Button>
)}
</>
);
}

View File

@@ -1,10 +1,12 @@
import { CompositeSlider } from '@invoke-ai/ui-library';
import { Button, CompositeSlider } from '@invoke-ai/ui-library';
import type { FieldComponentProps } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/types';
import { useIntegerField } from 'features/nodes/components/flow/nodes/Invocation/fields/IntegerField/useIntegerField';
import { NO_DRAG_CLASS } from 'features/nodes/types/constants';
import type { IntegerFieldInputInstance, IntegerFieldInputTemplate } from 'features/nodes/types/field';
import type { NodeFieldIntegerSettings } from 'features/nodes/types/workflow';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiShuffleBold } from 'react-icons/pi';
export const IntegerFieldSlider = memo(
(
@@ -15,27 +17,36 @@ export const IntegerFieldSlider = memo(
>
) => {
const { nodeId, field, fieldTemplate, settings } = props;
const { defaultValue, onChange, min, max, step, fineStep } = useIntegerField(
const { defaultValue, onChange, min, max, step, fineStep, showShuffle, randomizeValue } = useIntegerField(
nodeId,
field.name,
fieldTemplate,
settings
);
const { t } = useTranslation();
return (
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
marks
withThumbTooltip
flex="1 1 0"
/>
<>
<CompositeSlider
defaultValue={defaultValue}
onChange={onChange}
value={field.value}
min={min}
max={max}
step={step}
fineStep={fineStep}
className={NO_DRAG_CLASS}
marks
withThumbTooltip
flex="1 1 0"
/>
{showShuffle && (
<Button size="sm" isDisabled={false} onClick={randomizeValue} leftIcon={<PiShuffleBold />} flexShrink={0}>
{t('workflows.builder.shuffle')}
</Button>
)}
</>
);
}
);

View File

@@ -1,5 +1,6 @@
import { NUMPY_RAND_MAX } from 'app/constants';
import { useAppDispatch } from 'app/store/storeHooks';
import randomInt from 'common/util/randomInt';
import { roundDownToMultiple, roundUpToMultiple } from 'common/util/roundDownToMultiple';
import { isNil } from 'es-toolkit/compat';
import { fieldIntegerValueChanged } from 'features/nodes/store/nodesSlice';
@@ -11,9 +12,9 @@ export const useIntegerField = (
nodeId: string,
fieldName: string,
fieldTemplate: IntegerFieldInputTemplate,
overrides: { min?: number; max?: number; step?: number } = {}
overrides: { showShuffle?: boolean; min?: number; max?: number; step?: number } = {}
) => {
const { min: overrideMin, max: overrideMax, step: overrideStep } = overrides;
const { showShuffle, min: overrideMin, max: overrideMax, step: overrideStep } = overrides;
const dispatch = useAppDispatch();
const step = useMemo(() => {
@@ -65,8 +66,7 @@ export const useIntegerField = (
}, [fieldTemplate.exclusiveMaximum, fieldTemplate.maximum, overrideMax, step]);
const constrainValue = useCallback(
(v: number) =>
constrainNumber(v, { min, max, step: step }, { min: overrideMin, max: overrideMax, step: overrideStep }),
(v: number) => constrainNumber(v, { min, max, step }, { min: overrideMin, max: overrideMax, step: overrideStep }),
[max, min, overrideMax, overrideMin, overrideStep, step]
);
@@ -77,6 +77,11 @@ export const useIntegerField = (
[dispatch, fieldName, nodeId]
);
const randomizeValue = useCallback(() => {
const value = Math.round(randomInt(min, max) / step) * step;
dispatch(fieldIntegerValueChanged({ nodeId, fieldName, value }));
}, [dispatch, fieldName, nodeId, min, max, step]);
return {
defaultValue: fieldTemplate.default,
onChange,
@@ -85,5 +90,7 @@ export const useIntegerField = (
step,
fineStep,
constrainValue,
showShuffle,
randomizeValue,
};
};

View File

@@ -1,22 +1,32 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import { Flex, Input, Text } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useEditable } from 'common/hooks/useEditable';
import { useBatchGroupColorToken } from 'features/nodes/hooks/useBatchGroupColorToken';
import { useBatchGroupId } from 'features/nodes/hooks/useBatchGroupId';
import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid';
import { useNodeTemplateTitleSafe } from 'features/nodes/hooks/useNodeTemplateTitleSafe';
import { useNodeUserTitleSafe } from 'features/nodes/hooks/useNodeUserTitleSafe';
import { nodeLabelChanged } from 'features/nodes/store/nodesSlice';
import { NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
import { NO_DRAG_CLASS, NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
import { memo, useCallback, useMemo, useRef } from 'react';
import { useTranslation } from 'react-i18next';
const labelSx: SystemStyleObject = {
fontWeight: 'semibold',
'&[data-is-invalid="true"]': {
color: 'error.300',
},
};
type Props = {
nodeId: string;
title?: string;
};
const NodeTitle = ({ nodeId, title }: Props) => {
const InvocationNodeTitle = ({ nodeId, title }: Props) => {
const dispatch = useAppDispatch();
const isInvalid = useNodeHasErrors();
const label = useNodeUserTitleSafe();
const batchGroupId = useBatchGroupId(nodeId);
const batchGroupColorToken = useBatchGroupColorToken(batchGroupId);
@@ -53,16 +63,18 @@ const NodeTitle = ({ nodeId, title }: Props) => {
{!editable.isEditing && (
<Text
className={NO_FIT_ON_DOUBLE_CLICK_CLASS}
fontWeight="semibold"
color={batchGroupColorToken}
onDoubleClick={editable.startEditing}
sx={labelSx}
noOfLines={1}
color={batchGroupColorToken}
data-is-invalid={isInvalid}
onDoubleClick={editable.startEditing}
>
{titleWithBatchGroupId}
</Text>
)}
{editable.isEditing && (
<Input
className={NO_DRAG_CLASS}
ref={inputRef}
{...editable.inputProps}
variant="outline"
@@ -73,4 +85,4 @@ const NodeTitle = ({ nodeId, title }: Props) => {
);
};
export default memo(NodeTitle);
export default memo(InvocationNodeTitle);

View File

@@ -5,6 +5,7 @@ import { useInvocationNodeContext } from 'features/nodes/components/flow/nodes/I
import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
import { useMouseOverFormField, useMouseOverNode } from 'features/nodes/hooks/useMouseOverNode';
import { useNodeExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid';
import { useZoomToNode } from 'features/nodes/hooks/useZoomToNode';
import { selectNodeOpacity } from 'features/nodes/store/workflowSettingsSlice';
import { DRAG_HANDLE_CLASSNAME, NO_FIT_ON_DOUBLE_CLICK_CLASS, NODE_WIDTH } from 'features/nodes/types/constants';
@@ -29,6 +30,8 @@ const NodeWrapper = (props: NodeWrapperProps) => {
const mouseOverFormField = useMouseOverFormField(nodeId);
const zoomToNode = useZoomToNode(nodeId);
const isLocked = useIsWorkflowEditorLocked();
const isInvalid = useNodeHasErrors();
const hasError = isMissingTemplate || isInvalid;
const executionState = useNodeExecutionState(nodeId);
const isInProgress = executionState?.status === zNodeStatus.enum.IN_PROGRESS;
@@ -74,7 +77,7 @@ const NodeWrapper = (props: NodeWrapperProps) => {
data-is-editor-locked={isLocked}
data-is-selected={selected}
data-is-mouse-over-form-field={mouseOverFormField.isMouseOverFormField}
data-status={isMissingTemplate ? 'error' : needsUpdate ? 'warning' : undefined}
data-status={hasError ? 'error' : needsUpdate ? 'warning' : undefined}
>
<Box sx={shadowsSx} />
<Box sx={inProgressSx} data-is-in-progress={isInProgress} />

View File

@@ -4,7 +4,7 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useEditable } from 'common/hooks/useEditable';
import { nodeLabelChanged } from 'features/nodes/store/nodesSlice';
import { selectNodes } from 'features/nodes/store/selectors';
import { NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
import { NO_DRAG_CLASS, NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
import { memo, useCallback, useMemo, useRef } from 'react';
import { useTranslation } from 'react-i18next';
@@ -56,6 +56,7 @@ const NonInvocationNodeTitle = ({ nodeId, title }: Props) => {
)}
{editable.isEditing && (
<Input
className={NO_DRAG_CLASS}
ref={inputRef}
{...editable.inputProps}
variant="outline"

View File

@@ -22,6 +22,7 @@ type Props = {
export const NodeFieldElementFloatSettings = memo(({ id, settings, nodeId, fieldName, fieldTemplate }: Props) => {
return (
<>
<SettingShuffle id={id} settings={settings} nodeId={nodeId} fieldName={fieldName} fieldTemplate={fieldTemplate} />
<SettingComponent
id={id}
settings={settings}
@@ -36,6 +37,29 @@ export const NodeFieldElementFloatSettings = memo(({ id, settings, nodeId, field
});
NodeFieldElementFloatSettings.displayName = 'NodeFieldElementFloatSettings';
const SettingShuffle = memo(({ id, settings }: Props) => {
const { showShuffle } = settings;
const { t } = useTranslation();
const dispatch = useAppDispatch();
const toggleShowShuffle = useCallback(() => {
const newSettings: NodeFieldFloatSettings = {
...settings,
showShuffle: !showShuffle,
};
dispatch(formElementNodeFieldDataChanged({ id, changes: { settings: newSettings } }));
}, [dispatch, id, settings, showShuffle]);
return (
<FormControl>
<FormLabel flex={1}>{t('workflows.builder.showShuffle')}</FormLabel>
<Switch size="sm" isChecked={showShuffle} onChange={toggleShowShuffle} />
</FormControl>
);
});
SettingShuffle.displayName = 'SettingShuffle';
const SettingComponent = memo(({ id, settings }: Props) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();

View File

@@ -23,6 +23,7 @@ type Props = {
export const NodeFieldElementIntegerSettings = memo(({ id, settings, nodeId, fieldName, fieldTemplate }: Props) => {
return (
<>
<SettingShuffle id={id} settings={settings} nodeId={nodeId} fieldName={fieldName} fieldTemplate={fieldTemplate} />
<SettingComponent
id={id}
settings={settings}
@@ -37,6 +38,29 @@ export const NodeFieldElementIntegerSettings = memo(({ id, settings, nodeId, fie
});
NodeFieldElementIntegerSettings.displayName = 'NodeFieldElementIntegerSettings';
const SettingShuffle = memo(({ id, settings }: Props) => {
const { showShuffle } = settings;
const { t } = useTranslation();
const dispatch = useAppDispatch();
const toggleShowShuffle = useCallback(() => {
const newSettings: NodeFieldIntegerSettings = {
...settings,
showShuffle: !showShuffle,
};
dispatch(formElementNodeFieldDataChanged({ id, changes: { settings: newSettings } }));
}, [dispatch, id, settings, showShuffle]);
return (
<FormControl>
<FormLabel flex={1}>{t('workflows.builder.showShuffle')}</FormLabel>
<Switch size="sm" isChecked={showShuffle} onChange={toggleShowShuffle} />
</FormControl>
);
});
SettingShuffle.displayName = 'SettingShuffle';
const SettingComponent = memo(({ id, settings }: Props) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();

View File

@@ -34,7 +34,7 @@ import {
import { selectFormRootElementId, selectNodesSlice, selectWorkflowForm } from 'features/nodes/store/selectors';
import type { FieldInputTemplate, StatefulFieldValue } from 'features/nodes/types/field';
import type { ElementId, FormElement } from 'features/nodes/types/workflow';
import { buildNodeFieldElement, isContainerElement } from 'features/nodes/types/workflow';
import { buildNodeFieldElement, isContainerElement, isNodeFieldElement } from 'features/nodes/types/workflow';
import type { RefObject } from 'react';
import { useCallback, useEffect, useMemo, useState } from 'react';
import { flushSync } from 'react-dom';
@@ -121,6 +121,29 @@ const useElementExists = () => {
return _elementExists;
};
/**
* Checks if a node field element exists in the form.
*
* @param form The form to check
* @param nodeId The id of the node
* @param fieldName The name of field
*
* @returns True if the element exists, false otherwise
*/
const useNodeFieldElementExists = () => {
const store = useAppStore();
const nodeFieldElementExists = useCallback(
(nodeId: string, fieldName: string): boolean => {
const form = selectWorkflowForm(store.getState());
return Object.values(form.elements)
.filter(isNodeFieldElement)
.some((el) => el.data.fieldIdentifier.nodeId === nodeId && el.data.fieldIdentifier.fieldName === fieldName);
},
[store]
);
return nodeFieldElementExists;
};
/**
* Wrapper around `getAllowedDropRegions` that provides the form state from the store.
* @see {@link getAllowedDropRegions}
@@ -368,6 +391,7 @@ export const useFormElementDnd = (
const [activeDropRegion, setActiveDropRegion] = useState<CenterOrEdge | null>(null);
const getElement = useGetElement();
const getAllowedDropRegions = useGetAllowedDropRegions();
const nodeFieldElementExists = useNodeFieldElementExists();
useEffect(() => {
if (isRootElement) {
@@ -401,7 +425,7 @@ export const useFormElementDnd = (
// TODO(psyche): This causes a kinda jittery behaviour - need a better heuristic to determine stickiness
getIsSticky: () => false,
canDrop: ({ source }) => {
if (isNodeFieldDndData(source.data)) {
if (isNodeFieldDndData(source.data) && !nodeFieldElementExists(source.data.nodeId, source.data.fieldName)) {
return true;
}
if (isFormElementDndData(source.data)) {
@@ -449,7 +473,15 @@ export const useFormElementDnd = (
},
})
);
}, [dragHandleRef, draggableRef, elementId, getAllowedDropRegions, getElement, isRootElement]);
}, [
dragHandleRef,
draggableRef,
elementId,
getAllowedDropRegions,
getElement,
nodeFieldElementExists,
isRootElement,
]);
return [activeDropRegion, isDragging] as const;
};

View File

@@ -1,27 +0,0 @@
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useInputFieldInstance } from 'features/nodes/hooks/useInputFieldInstance';
import { useInputFieldTemplateOrThrow } from 'features/nodes/hooks/useInputFieldTemplateOrThrow';
import { formElementAdded } from 'features/nodes/store/nodesSlice';
import { selectFormRootElementId } from 'features/nodes/store/selectors';
import { buildNodeFieldElement } from 'features/nodes/types/workflow';
import { useCallback } from 'react';
export const useAddNodeFieldToRoot = (nodeId: string, fieldName: string) => {
const dispatch = useAppDispatch();
const rootElementId = useAppSelector(selectFormRootElementId);
const fieldTemplate = useInputFieldTemplateOrThrow(fieldName);
const field = useInputFieldInstance(fieldName);
const addNodeFieldToRoot = useCallback(() => {
const element = buildNodeFieldElement(nodeId, fieldName, fieldTemplate.type);
dispatch(
formElementAdded({
element,
parentId: rootElementId,
initialValue: field.value,
})
);
}, [nodeId, fieldName, fieldTemplate.type, dispatch, rootElementId, field.value]);
return addNodeFieldToRoot;
};

Some files were not shown because too many files have changed in this diff Show More