Compare commits

..

190 Commits
v6.6.0 ... main

Author SHA1 Message Date
psychedelicious
3707c3b034 fix(ui): do not bake opacity when rasterizing layer adjustments 2025-09-22 11:43:08 +10:00
Mary Hipp
5885db4ab5 ruff 2025-09-19 11:07:36 -04:00
Mary Hipp
36ed9b750d restore list_queue_items method 2025-09-19 11:07:36 -04:00
psychedelicious
3cec06f86e chore(ui): typegen 2025-09-19 22:13:12 +10:00
psychedelicious
28b5f7a1c5 feat(nodes): better deprecation handling for ui_type
- Move migration of model-specific ui_types into BaseInvocation. This
gives us access to the node and field names, so the warnings are more
useful to the end user.
- Ensure we serialize the fields' json_schema_extra with enum values.
This wasn't a problem until now, when it interferes with migrating
ui_type cleanly. It's a transparent change.
- Improve warnings when validating fields (which includes the ui_type
migration logic)
2025-09-19 22:13:12 +10:00
psychedelicious
22cbb23ae0 fix(ui): ref images for flux kontext & api models not parsed correctly 2025-09-19 21:40:17 +10:00
Riccardo Giovanetti
4d585e3eec translationBot(ui): update translation (Italian)
Currently translated at 98.4% (2130 of 2163 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.4% (2127 of 2161 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-18 14:01:31 +10:00
psychedelicious
006b4356bb chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
da947866f2 fix(nodes): ensure SD2 models are pickable in loader/cnet nodes 2025-09-18 12:39:27 +10:00
psychedelicious
84a2cc6fc9 chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
b50534bb49 revert(nodes): do not deprecate ui_type for output fields! only deprecate the model ui types 2025-09-18 12:39:27 +10:00
psychedelicious
c305e79fee tests(ui): update tests to reflect new model parsing logic 2025-09-18 12:39:27 +10:00
psychedelicious
c32949d113 tidy(nodes): mark all UIType.*ModelField as deprecated 2025-09-18 12:39:27 +10:00
psychedelicious
87a98902da tidy(nodes): remove unused UIType.Video 2025-09-18 12:39:27 +10:00
psychedelicious
2857a446c9 docs(nodes): update docstrings for InputField 2025-09-18 12:39:27 +10:00
psychedelicious
035d9432bd feat(ui): support filtering on model format 2025-09-18 12:39:27 +10:00
psychedelicious
bdeb9fb1cf chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
dadff57061 feat(nodes): add ui_model_format filter for nodes 2025-09-18 12:39:27 +10:00
psychedelicious
480857ae4e fix(nodes): add base to SD1 model loader 2025-09-18 12:39:27 +10:00
psychedelicious
eaf0624004 feat(ui): remove explicit model type handling from workflow editor 2025-09-18 12:39:27 +10:00
psychedelicious
58bca1b9f4 feat(nodes): use new ui_model_[base|type|variant] on all core nodes 2025-09-18 12:39:27 +10:00
psychedelicious
54aa6908fa feat(ui): update invocation parsing to handle new ui_model_[base|type|variant] attrs 2025-09-18 12:39:27 +10:00
psychedelicious
e6d9daca96 chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
6e5a529cb7 feat(nodes): add ui_model_[base|type|variant] to InputField args for dynamic UI generation 2025-09-18 12:39:27 +10:00
Iq1pl
8c742a6e38 ruff format 2025-09-18 11:05:32 +10:00
Iq1pl
693373f1c1 Update ip_adapter.py
added support for NOOB-IPA-MARK1
2025-09-18 11:05:32 +10:00
Josh Corbett
4809080fd9 fix(ui): allow scrolling in ModelPane 2025-09-18 10:33:22 +10:00
psychedelicious
efcb1bea7f chore: bump version to v6.8.0rc1 2025-09-17 13:57:43 +10:00
psychedelicious
e0d7a401f3 feat(ui): make ref images croppable 2025-09-17 13:43:13 +10:00
psychedelicious
aac979e9a4 fix(ui): issue w/ setting initial aspect ratio in cropper 2025-09-17 13:43:13 +10:00
psychedelicious
3b0d7f076d tidy(ui): rename from "editor" to "cropper", minor cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
e1acbcdbd5 fix(ui): store floats for box 2025-09-17 13:43:13 +10:00
psychedelicious
7d9b81550b feat(ui): revert to original image when crop discarded 2025-09-17 13:43:13 +10:00
psychedelicious
6a447dd1fe refactor(ui): remove "apply", "start" and "cancel" concepts from editor 2025-09-17 13:43:13 +10:00
psychedelicious
c2dc63ddbc fix(ui): video graphs 2025-09-17 13:43:13 +10:00
psychedelicious
1bc689d531 docs(ui): add comments to startingframeimage 2025-09-17 13:43:13 +10:00
psychedelicious
4829975827 feat(ui): make the editor components not care about the image 2025-09-17 13:43:13 +10:00
psychedelicious
49da4e00c3 feat(ui): add concept for editable image state 2025-09-17 13:43:13 +10:00
psychedelicious
89dfe5e729 docs(ui): add comments to editor 2025-09-17 13:43:13 +10:00
psychedelicious
6816d366df tidy(ui): editor misc 2025-09-17 13:43:13 +10:00
psychedelicious
9d3d2a36c9 tidy(ui): editor listeners 2025-09-17 13:43:13 +10:00
psychedelicious
ed231044c8 refactor(ui): simplify crop constraints 2025-09-17 13:43:13 +10:00
psychedelicious
b51a232794 feat(ui): extract config to own obj 2025-09-17 13:43:13 +10:00
psychedelicious
4412143a6e feat(ui): clean up editor 2025-09-17 13:43:13 +10:00
psychedelicious
de11cafdb3 refactor(ui): editor (wip) 2025-09-17 13:43:13 +10:00
psychedelicious
4d9114aa7d refactor(ui): editor (wip) 2025-09-17 13:43:13 +10:00
psychedelicious
67e2da1ebf refactor(ui): editor (wip) 2025-09-17 13:43:13 +10:00
psychedelicious
33ecc591c3 refactor(ui): editor init 2025-09-17 13:43:13 +10:00
psychedelicious
b57459a226 chore(ui): lint 2025-09-17 13:43:13 +10:00
psychedelicious
01282b1c90 feat(ui): do not clear crop when canceling 2025-09-17 13:43:13 +10:00
psychedelicious
3f302906dc feat(ui): crop doesn't hide outside cropped region 2025-09-17 13:43:13 +10:00
psychedelicious
81d56596fb tidy(ui): cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
b536b0df0c feat(ui): misc iterate on editor 2025-09-17 13:43:13 +10:00
psychedelicious
692af1d93d feat(ui): type narrowing for editor output types 2025-09-17 13:43:13 +10:00
psychedelicious
bb7ef77b50 tidy(ui): lint/react conventions for editor component 2025-09-17 13:43:13 +10:00
psychedelicious
1862548573 feat(ui): image editor bg checkerboard pattern 2025-09-17 13:43:13 +10:00
psychedelicious
242c1b6350 feat(ui): tweak editor konva styles 2025-09-17 13:43:13 +10:00
psychedelicious
fc6e4bb04e tidy(ui): editor component cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
20841abca6 tidy(ui): editor cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
e8b69d99a4 chore(ui): lint 2025-09-17 13:43:13 +10:00
Mary Hipp
d6eaff8237 create editImageModal that takes an imageDTO, loads blob onto canvas, and allows cropping. cropped blob is uploaded as new asset 2025-09-17 13:43:13 +10:00
Mary Hipp
068b095956 show warning state with tooltip if starting frame image aspect ratio does not match the video output aspect ratio' 2025-09-17 13:43:13 +10:00
psychedelicious
f795a47340 tidy(ui): remove unused translation string 2025-09-16 15:04:03 +10:00
psychedelicious
df47345eb0 feat(ui): add translation strings for prompt history 2025-09-16 15:04:03 +10:00
psychedelicious
def04095a4 feat(ui): tweak prompt history styling 2025-09-16 15:04:03 +10:00
psychedelicious
28be8f0911 refactor(ui): simplify prompt history shortcuts 2025-09-16 15:04:03 +10:00
Kent Keirsey
b50c44bac0 handle potential for invalid list item 2025-09-16 15:04:03 +10:00
Kent Keirsey
b4ce0e02fc lint 2025-09-16 15:04:03 +10:00
Kent Keirsey
d6442d9a34 Prompt history shortcuts 2025-09-16 15:04:03 +10:00
Josh Corbett
4528bcafaf feat(model manager): add ModelFooter component and reusable ModelDeleteButton 2025-09-16 12:29:57 +10:00
Josh Corbett
8b82b81ee2 fix(ModelImage): change MODEL_IMAGE_THUMBNAIL_SIZE to a local constant 2025-09-16 12:29:57 +10:00
Josh Corbett
757acdd49e feat(model manager): 💄 update model manager ui, initial commit 2025-09-16 12:29:57 +10:00
psychedelicious
94b7cc583a fix(ui): do not reset params state on studio init nav to generate tab 2025-09-16 12:25:25 +10:00
psychedelicious
b663a6bac4 chore: bump version to v6.7.0 2025-09-15 14:37:56 +10:00
psychedelicious
65d40153fb chore(ui): update whatsnew 2025-09-15 14:37:56 +10:00
Riccardo Giovanetti
c8b741a514 translationBot(ui): update translation (Italian)
Currently translated at 98.4% (2120 of 2153 strings)

translationBot(ui): update translation (Italian)

Currently translated at 97.3% (2097 of 2153 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-15 14:25:41 +10:00
psychedelicious
6d3aeffed9 fix(ui): dedupe prompt history 2025-09-15 14:22:44 +10:00
psychedelicious
203be96910 fix(ui): render popovers in portals to ensure they are on top of other ui elements 2025-09-15 14:19:54 +10:00
psychedelicious
b0aa48ddb8 feat(ui): simple prompt history 2025-09-12 10:19:48 -04:00
psychedelicious
867dbe51e5 fix(ui): extend lora weight schema to accept full range of weights
This could cause a failure to rehydrate LoRA state, or failure to recall
a LoRA.

Closes #8551
2025-09-12 11:50:10 +10:00
psychedelicious
ff8948b6f1 chore(ui): update whatsnew 2025-09-11 18:09:31 +10:00
psychedelicious
fa3a6425a6 tests(ui): update staging area test to reflect new behaviour 2025-09-11 18:09:31 +10:00
psychedelicious
c5992ece89 fix(ui): better logic in staging area when canceling the selected item 2025-09-11 18:09:31 +10:00
psychedelicious
12a6239929 chore: bump version to v6.7.0rc1 2025-09-11 18:09:31 +10:00
Riccardo Giovanetti
e9238c59f4 translationBot(ui): update translation (Italian)
Currently translated at 96.5% (2053 of 2127 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-11 17:42:41 +10:00
Linos
c1cbbe51d6 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2127 of 2127 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-09-11 17:42:41 +10:00
Hosted Weblate
4219b4a288 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

translationBot(ui): update translation files

Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-09-11 17:42:41 +10:00
psychedelicious
48c8a9c09d chore(ui): lint 2025-09-11 17:25:57 +10:00
psychedelicious
a67efdf4ad perf(ui): optimize curves graph component
Do not use whole layer as trigger for histo recalc; use the canvas cache
of the layer - it more reliably indicates when the layer pixel data has
changed, and fixes an issue where we can miss the first histo calc due
to race conditiong with async layer bbox calculation.
2025-09-11 17:25:57 +10:00
psychedelicious
d6ff9c2e49 tidy(ui): split curves graph into own component 2025-09-11 17:25:57 +10:00
psychedelicious
e768a3bc7b perf(ui): use narrow selectors in adjustments to reduce rerenders
dramatically improves the feel of the sliders
2025-09-11 17:25:57 +10:00
psychedelicious
7273700f61 fix(ui): sharpness range 2025-09-11 17:25:57 +10:00
psychedelicious
f909e81d91 feat(ui): better types & runtime guarantees for filter data stored in konva node attrs 2025-09-11 17:25:57 +10:00
psychedelicious
8c85f168f6 refactor(ui): make layer adjustments schemas/types composable 2025-09-11 17:25:57 +10:00
psychedelicious
263d86d46f fix(ui): points where x=255 sorted incorrectly 2025-09-11 17:25:57 +10:00
psychedelicious
0921805160 feat(ui): tweak adjustments panel styling 2025-09-11 17:25:57 +10:00
psychedelicious
517f4811e7 feat(ui): single action to reset adjustments 2025-09-11 17:25:57 +10:00
psychedelicious
0dc73c8803 tidy(ui): move some histogram drawing logic out of components and into calblacks 2025-09-11 17:25:57 +10:00
psychedelicious
26702b54c0 feat(ui): tweak layouts, use react conventions, disabled state 2025-09-11 17:25:57 +10:00
dunkeroni
2d65e4543f minor padding changes 2025-09-11 17:25:57 +10:00
dunkeroni
309113956b remove unknown type annotations 2025-09-11 17:25:57 +10:00
dunkeroni
0ac4099bc6 allow negative sharpness to soften 2025-09-11 17:25:57 +10:00
dunkeroni
899dc739fa defaultValue on adjusters 2025-09-11 17:25:57 +10:00
dunkeroni
4e2439fc8e remove extra edit comments 2025-09-11 17:25:57 +10:00
dunkeroni
00864c24e0 layout fixes 2025-09-11 17:25:57 +10:00
dunkeroni
b73aaa7d6f fix several points of curve editor jank 2025-09-11 17:25:57 +10:00
dunkeroni
85057ae704 splitup adjustment panel objects 2025-09-11 17:25:57 +10:00
dunkeroni
c3fb3a43a2 blue mode switch indicator 2025-09-11 17:25:57 +10:00
dunkeroni
51d0a15a1b use default factory on reset 2025-09-11 17:25:57 +10:00
dunkeroni
5991067fd9 simplify adjustments type to optional not null 2025-09-11 17:25:57 +10:00
dunkeroni
32c2d3f740 remove extra casts and types from filters.ts 2025-09-11 17:25:57 +10:00
dunkeroni
c661f86b34 fix: crop to bbox doubles adjustment filters 2025-09-11 17:25:57 +10:00
dunkeroni
cc72d8eab4 curves editor syntax and structure fixes 2025-09-11 17:25:57 +10:00
dunkeroni
e8550f9355 move constants in curves editor 2025-09-11 17:25:57 +10:00
dunkeroni
a1d0386ca4 move memoized slider to component 2025-09-11 17:25:57 +10:00
dunkeroni
495d089f85 clean up right click menu 2025-09-11 17:25:57 +10:00
dunkeroni
913b91e9dd remove redundant en.json colors 2025-09-11 17:25:57 +10:00
dunkeroni
3e907f4e14 remove extra title 2025-09-11 17:25:57 +10:00
dunkeroni
756df6ebe4 Finish button on adjustments 2025-09-11 17:25:57 +10:00
dunkeroni
2a6be99152 Fix tint not shifting green in negative direction 2025-09-11 17:25:57 +10:00
dunkeroni
3099e2bf9d fix disable toggle reverts to simple view 2025-09-11 17:25:57 +10:00
dunkeroni
6921f0412a log scale and panel width compatibility 2025-09-11 17:25:57 +10:00
dunkeroni
022d5a8863 curves editor 2025-09-11 17:25:57 +10:00
dunkeroni
af99beedc5 apply filters to operations 2025-09-11 17:25:57 +10:00
dunkeroni
f3d83dc6b7 visual adjustment filters 2025-09-11 17:25:57 +10:00
psychedelicious
ebc3f18a1a ai(ui): add CLAUDE.md to frontend 2025-09-11 13:26:39 +10:00
Mary Hipp
aeb512f8d9 ruff 2025-09-11 12:41:56 +10:00
Mary Hipp
a1810acb93 accidental commit 2025-09-11 12:41:56 +10:00
Mary Hipp
aa35a5083b remove completed_at from queue list so that created_at is only sort option, restore field values in UI 2025-09-11 12:41:56 +10:00
psychedelicious
4f17de0b32 fix(ui): ensure mask image is deleted when no more inputs to select object 2025-09-11 12:15:41 +10:00
psychedelicious
370c3cd59b feat(ui): update select object info tooltip 2025-09-11 12:15:41 +10:00
psychedelicious
67214e16c0 tidy(ui): organize select object components 2025-09-11 12:15:41 +10:00
psychedelicious
4880a1d946 feat(nodes): accept neg coords for bbox
This actually works fine for SAM.
2025-09-11 12:15:41 +10:00
psychedelicious
0f0988610f feat(ui): spruce up UI a bit 2025-09-11 12:15:41 +10:00
psychedelicious
6805d28b7a feat(ui): increase hit area for bbox anchors 2025-09-11 12:15:41 +10:00
psychedelicious
9b45a24136 fix(ui): respect selected point type 2025-09-11 12:15:41 +10:00
psychedelicious
4e9d66a64b tidy(ui): clean up CanvasSegmentAnythingModule 2025-09-11 12:15:41 +10:00
psychedelicious
8fec530b0f fix(ui): restore old tooltip for select object
need to add translation strigns for new functionality
2025-09-11 12:15:41 +10:00
psychedelicious
50c66f8671 fix(ui): select obj box moving on mmb pan 2025-09-11 12:15:41 +10:00
psychedelicious
f0aa39ea81 fix(ui): prevent bbox from following cursor after middle mouse pan
Added button checks to bbox rect and transformer mousedown/touchstart handlers to only process left clicks. Also added stage dragging check in onBboxDragMove to clear bbox drag state when middle mouse panning is active.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
faac814a3d fix(ui): prevent middle mouse from creating points in segmentation module
When middle mouse button is used for canvas panning, the pointerup event was still creating points in the segmentation module. Added button check to onBboxDragEnd handler to only process left clicks.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
fb9545bb90 fix(ui): bbox no shrinkies 2025-09-11 12:15:41 +10:00
psychedelicious
8ad2ee83b6 fix(ui): prevent bbox scale accumulation in SAM module
Fixed an issue where bounding boxes could grow exponentially when created at small sizes. The problem occurred because Konva Transformer modifies scaleX/scaleY rather than width/height directly, and the scale values weren't consistently reset after being applied to dimensions.

Changes:
- Ensure scale values are always reset to 1 after applying to dimensions
- Add minimum size constraints to prevent zero/negative dimensions
- Fix scale handling in transformend, dragend, and initial bbox creation

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
f8ad62b5eb tidy(backend) cleanup sam pipelines 2025-09-11 12:15:41 +10:00
psychedelicious
03ae78bc7c tidy(nodes): clean up sam node 2025-09-11 12:15:41 +10:00
psychedelicious
ec1a058dbe fix(backend): issue w/ multiple bbox and sam1 2025-09-11 12:15:41 +10:00
psychedelicious
9e4d441e2e feat(ui): allow adding point inside bbox 2025-09-11 12:15:41 +10:00
psychedelicious
3770fd22f8 tidy(ui): ts issues 2025-09-11 12:15:41 +10:00
psychedelicious
a0232b0e63 feat(ui): combine points and bbox in visual mode for SAM
Revised the Select Object feature to support two input modes:
- Visual mode: Combined points and bounding box input for paired SAM inputs
- Prompt mode: Text-based object selection (unchanged)

Key changes:
- Replaced three input types (points, prompt, bbox) with two (visual, prompt)
- Visual mode supports both point and bbox inputs simultaneously
- Click to add include points, Shift+click for exclude points
- Click and drag to draw bounding box
- Fixed bbox visibility issues when adding points
- Fixed coordinate system issues for proper bbox positioning
- Added proper event handling and interaction controls

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
e1e964bf0e experiment(ui): support bboxes in select object 2025-09-11 12:15:41 +10:00
psychedelicious
1b1759cffc feat(ui): support prompt-based selection for object selection 2025-09-11 12:15:41 +10:00
psychedelicious
d828502bc8 refactor(backend): simplify segment anything APIs
There was a really confusing aspect of the SAM pipeline classes where
they accepted deeply nested lists of different dimensions (bbox, points,
and labels).

The lengths of the lists are related; each point must have a
corresponding label, and if bboxes are provided with points, they must
be same length.

I've refactored the backend API to take a single list of SAMInput
objects. This class has a bbox and/or a list of points, making it much
simpler to provide the right shape of inputs.

Internally, the pipeline classes take rejigger these input classes to
have the correct nesting.

The Nodes still have an awkward API where you can provide both bboxes
and points of different lengths, so I added a pydantic validator that
enforces correct lenghts.
2025-09-11 12:15:41 +10:00
psychedelicious
7a073b6de7 feat(ui): hold shift to add inverse point type 2025-09-11 12:15:41 +10:00
psychedelicious
338ff8d588 chore: typegen 2025-09-11 12:15:41 +10:00
psychedelicious
a3625efd3a chore: ruff 2025-09-11 12:15:41 +10:00
Kent Keirsey
5efb37fe63 consolidate into one node. 2025-09-11 12:15:41 +10:00
Kent Keirsey
aef0b81d5b fix models 2025-09-11 12:15:41 +10:00
Kent Keirsey
544edff507 update uv.lock 2025-09-11 12:15:41 +10:00
Kent Keirsey
42b1adab22 init Sam2 2025-09-11 12:15:41 +10:00
Attila Cseh
a2b9d12e88 prettier errors fixed 2025-09-10 11:28:50 +10:00
Attila Cseh
7a94fb6c04 maths enabled on numeric input fields in worklow editor 2025-09-10 11:28:50 +10:00
psychedelicious
efcd159704 fix(app): path traversal via bulk downloads paths 2025-09-10 11:18:12 +10:00
psychedelicious
997e619a9d feat(ui): address feedback 2025-09-09 14:42:30 +10:00
Attila Cseh
4bc184ff16 LoRA number input min/max restored 2025-09-09 14:42:30 +10:00
psychedelicious
0b605a745b fix(ui): route metadata to gemini node 2025-09-09 14:31:07 +10:00
Attila Cseh
22b038ce3b unused translations removed 2025-09-08 20:41:36 +10:00
psychedelicious
0bb5d647b5 tidy(app): method naming snake case 2025-09-08 20:41:36 +10:00
psychedelicious
4a3599929b fix(ui): do not pass scroll seek props to DOM in queue list 2025-09-08 20:41:36 +10:00
psychedelicious
f959ce8323 feat(ui): reduce overscan for queue
makes it a bit less sluggish
2025-09-08 20:41:36 +10:00
Attila Cseh
74e1047870 build errors fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
732881c51b createdAt column fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
107be8e166 queueSlice cleaned up 2025-09-08 20:41:36 +10:00
Attila Cseh
3c2f654da8 queue api listQueueItems removed 2025-09-08 20:41:36 +10:00
Attila Cseh
474fd44e50 status column not sortable 2025-09-08 20:41:36 +10:00
Attila Cseh
0dc5f8fd65 getQueueItemIds cache invalidation added 2025-09-08 20:41:36 +10:00
Attila Cseh
d4215fb460 isOpen refactored 2025-09-08 20:41:36 +10:00
Attila Cseh
0cd05ee9fd ListContext reverted with queryArgs 2025-09-08 20:41:36 +10:00
Attila Cseh
9fcb3af1d8 ListContext removed 2025-09-08 20:41:36 +10:00
Attila Cseh
c9da7e2172 typegen fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
9788735d6b code review fixes 2025-09-08 20:41:36 +10:00
Attila Cseh
d6139748e2 Update invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-09-08 20:41:36 +10:00
Attila Cseh
602dfb1e5d Update invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-09-08 20:41:36 +10:00
Attila Cseh
5bb3a78f56 Update invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemComponent.tsx
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-09-08 20:41:36 +10:00
Attila Cseh
d58df1e17b schema re-generated 2025-09-08 20:41:36 +10:00
Attila Cseh
5d0e37eb2f lint errors fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
486b333cef queue list virtualized 2025-09-08 20:41:36 +10:00
Attila Cseh
6fa437af03 get_queue_itemIds endpoint created 2025-09-08 20:41:36 +10:00
Attila Cseh
787ef6fa27 ColumnSortIcon refactored 2025-09-08 20:41:36 +10:00
Attila Cseh
7f0571c229 QueueListHeaderColumnProps.field turned into SortBy 2025-09-08 20:41:36 +10:00
Attila Cseh
f5a58c0ceb QueueListHeaderColumn created 2025-09-08 20:41:36 +10:00
226 changed files with 9703 additions and 6127 deletions

View File

@@ -7,7 +7,6 @@ from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus
from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
Batch,
BatchStatus,
CancelAllExceptCurrentResult,
@@ -18,6 +17,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
DeleteByDestinationResult,
EnqueueBatchResult,
FieldIdentifier,
ItemIdsResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
@@ -25,7 +25,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
SessionQueueItemNotFoundError,
SessionQueueStatus,
)
from invokeai.app.services.shared.pagination import CursorPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"])
@@ -68,36 +68,6 @@ async def enqueue_batch(
raise HTTPException(status_code=500, detail=f"Unexpected error while enqueuing batch: {e}")
@session_queue_router.get(
"/{queue_id}/list",
operation_id="list_queue_items",
responses={
200: {"model": CursorPaginatedResults[SessionQueueItem]},
},
)
async def list_queue_items(
queue_id: str = Path(description="The queue id to perform this operation on"),
limit: int = Query(default=50, description="The number of items to fetch"),
status: Optional[QUEUE_ITEM_STATUS] = Query(default=None, description="The status of items to fetch"),
cursor: Optional[int] = Query(default=None, description="The pagination cursor"),
priority: int = Query(default=0, description="The pagination cursor priority"),
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
) -> CursorPaginatedResults[SessionQueueItem]:
"""Gets cursor-paginated queue items"""
try:
return ApiDependencies.invoker.services.session_queue.list_queue_items(
queue_id=queue_id,
limit=limit,
status=status,
cursor=cursor,
priority=priority,
destination=destination,
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all items: {e}")
@session_queue_router.get(
"/{queue_id}/list_all",
operation_id="list_all_queue_items",
@@ -119,6 +89,56 @@ async def list_all_queue_items(
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue items: {e}")
@session_queue_router.get(
"/{queue_id}/item_ids",
operation_id="get_queue_item_ids",
responses={
200: {"model": ItemIdsResult},
},
)
async def get_queue_item_ids(
queue_id: str = Path(description="The queue id to perform this operation on"),
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
) -> ItemIdsResult:
"""Gets all queue item ids that match the given parameters"""
try:
return ApiDependencies.invoker.services.session_queue.get_queue_item_ids(queue_id=queue_id, order_dir=order_dir)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue item ids: {e}")
@session_queue_router.post(
"/{queue_id}/items_by_ids",
operation_id="get_queue_items_by_item_ids",
responses={200: {"model": list[SessionQueueItem]}},
)
async def get_queue_items_by_item_ids(
queue_id: str = Path(description="The queue id to perform this operation on"),
item_ids: list[int] = Body(
embed=True, description="Object containing list of queue item ids to fetch queue items for"
),
) -> list[SessionQueueItem]:
"""Gets queue items for the specified queue item ids. Maintains order of item ids."""
try:
session_queue_service = ApiDependencies.invoker.services.session_queue
# Fetch queue items preserving the order of requested item ids
queue_items: list[SessionQueueItem] = []
for item_id in item_ids:
try:
queue_item = session_queue_service.get_queue_item(item_id=item_id)
if queue_item.queue_id != queue_id: # Auth protection for items from other queues
continue
queue_items.append(queue_item)
except Exception:
# Skip missing queue items - they may have been deleted between item id fetch and queue item fetch
continue
return queue_items
except Exception:
raise HTTPException(status_code=500, detail="Failed to get queue items")
@session_queue_router.put(
"/{queue_id}/processor/resume",
operation_id="resume",
@@ -354,7 +374,10 @@ async def get_queue_item(
) -> SessionQueueItem:
"""Gets a queue item"""
try:
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
queue_item = ApiDependencies.invoker.services.session_queue.get_queue_item(item_id=item_id)
if queue_item.queue_id != queue_id:
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
return queue_item
except SessionQueueItemNotFoundError:
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
except Exception as e:

View File

@@ -36,6 +36,9 @@ from pydantic_core import PydanticUndefined
from invokeai.app.invocations.fields import (
FieldKind,
Input,
InputFieldJSONSchemaExtra,
UIType,
migrate_model_ui_type,
)
from invokeai.app.services.config.config_default import get_config
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -256,7 +259,9 @@ class BaseInvocation(ABC, BaseModel):
is_intermediate: bool = Field(
default=False,
description="Whether or not this is an intermediate invocation.",
json_schema_extra={"ui_type": "IsIntermediate", "field_kind": FieldKind.NodeAttribute},
json_schema_extra=InputFieldJSONSchemaExtra(
input=Input.Direct, field_kind=FieldKind.NodeAttribute, ui_type=UIType._IsIntermediate
).model_dump(exclude_none=True),
)
use_cache: bool = Field(
default=True,
@@ -445,6 +450,15 @@ with warnings.catch_warnings():
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
def is_enum_member(value: Any, enum_class: type[Enum]) -> bool:
"""Checks if a value is a member of an enum class."""
try:
enum_class(value)
return True
except ValueError:
return False
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
"""
Validates the fields of an invocation or invocation output:
@@ -456,51 +470,99 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
"""
for name, field in model_fields.items():
if name in RESERVED_PYDANTIC_FIELD_NAMES:
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved by pydantic)')
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved by pydantic)")
if not field.annotation:
raise InvalidFieldError(f'Invalid field type "{name}" on "{model_type}" (missing annotation)')
raise InvalidFieldError(f"{model_type}.{name}: Invalid field type (missing annotation)")
if not isinstance(field.json_schema_extra, dict):
raise InvalidFieldError(
f'Invalid field definition for "{name}" on "{model_type}" (missing json_schema_extra dict)'
)
raise InvalidFieldError(f"{model_type}.{name}: Invalid field definition (missing json_schema_extra dict)")
field_kind = field.json_schema_extra.get("field_kind", None)
# must have a field_kind
if not isinstance(field_kind, FieldKind):
if not is_enum_member(field_kind, FieldKind):
raise InvalidFieldError(
f'Invalid field definition for "{name}" on "{model_type}" (maybe it\'s not an InputField or OutputField?)'
f"{model_type}.{name}: Invalid field definition for (maybe it's not an InputField or OutputField?)"
)
if field_kind is FieldKind.Input and (
if field_kind == FieldKind.Input.value and (
name in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES or name in RESERVED_INPUT_FIELD_NAMES
):
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved input field name)')
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved input field name)")
if field_kind is FieldKind.Output and name in RESERVED_OUTPUT_FIELD_NAMES:
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved output field name)')
if field_kind == FieldKind.Output.value and name in RESERVED_OUTPUT_FIELD_NAMES:
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved output field name)")
if (field_kind is FieldKind.Internal) and name not in RESERVED_INPUT_FIELD_NAMES:
raise InvalidFieldError(
f'Invalid field name "{name}" on "{model_type}" (internal field without reserved name)'
)
if field_kind == FieldKind.Internal.value and name not in RESERVED_INPUT_FIELD_NAMES:
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (internal field without reserved name)")
# node attribute fields *must* be in the reserved list
if (
field_kind is FieldKind.NodeAttribute
field_kind == FieldKind.NodeAttribute.value
and name not in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES
and name not in RESERVED_OUTPUT_FIELD_NAMES
):
raise InvalidFieldError(
f'Invalid field name "{name}" on "{model_type}" (node attribute field without reserved name)'
f"{model_type}.{name}: Invalid field name (node attribute field without reserved name)"
)
ui_type = field.json_schema_extra.get("ui_type", None)
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
field.json_schema_extra.pop("ui_type")
ui_model_base = field.json_schema_extra.get("ui_model_base", None)
ui_model_type = field.json_schema_extra.get("ui_model_type", None)
ui_model_variant = field.json_schema_extra.get("ui_model_variant", None)
ui_model_format = field.json_schema_extra.get("ui_model_format", None)
if ui_type is not None:
# There are 3 cases where we may need to take action:
#
# 1. The ui_type is a migratable, deprecated value. For example, ui_type=UIType.MainModel value is
# deprecated and should be migrated to:
# - ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]
# - ui_model_type=[ModelType.Main]
#
# 2. ui_type was set in conjunction with any of the new ui_model_[base|type|variant|format] fields, which
# is not allowed (they are mutually exclusive). In this case, we ignore ui_type and log a warning.
#
# 3. ui_type is a deprecated value that is not migratable. For example, ui_type=UIType.Image is deprecated;
# Image fields are now automatically detected based on the field's type annotation. In this case, we
# ignore ui_type and log a warning.
#
# The cases must be checked in this order to ensure proper handling.
# Easier to work with as an enum
ui_type = UIType(ui_type)
# The enum member values are not always the same as their names - we want to log the name so the user can
# easily review their code and see where the deprecated enum member is used.
human_readable_name = f"UIType.{ui_type.name}"
# Case 1: migratable deprecated value
did_migrate = migrate_model_ui_type(ui_type, field.json_schema_extra)
if did_migrate:
logger.warning(
f'{model_type}.{name}: Migrated deprecated "ui_type" "{human_readable_name}" to new ui_model_[base|type|variant|format] fields'
)
field.json_schema_extra.pop("ui_type")
# Case 2: mutually exclusive with new fields
elif (
ui_model_base is not None
or ui_model_type is not None
or ui_model_variant is not None
or ui_model_format is not None
):
logger.warning(
f'{model_type}.{name}: "ui_type" is mutually exclusive with "ui_model_[base|type|format|variant]", ignoring "ui_type"'
)
field.json_schema_extra.pop("ui_type")
# Case 3: deprecated value that is not migratable
elif ui_type.startswith("DEPRECATED_"):
logger.warning(f'{model_type}.{name}: Deprecated "ui_type" "{human_readable_name}", ignoring')
field.json_schema_extra.pop("ui_type")
return None

View File

@@ -5,7 +5,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import (
GlmEncoderField,
ModelIdentifierField,
@@ -14,6 +14,7 @@ from invokeai.app.invocations.model import (
)
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation_output("cogview4_model_loader_output")
@@ -38,8 +39,9 @@ class CogView4ModelLoaderInvocation(BaseInvocation):
model: ModelIdentifierField = InputField(
description=FieldDescriptions.cogview4_model,
ui_type=UIType.CogView4MainModel,
input=Input.Direct,
ui_model_base=BaseModelType.CogView4,
ui_model_type=ModelType.Main,
)
def invoke(self, context: InvocationContext) -> CogView4ModelLoaderOutput:

View File

@@ -16,7 +16,6 @@ from invokeai.app.invocations.fields import (
ImageField,
InputField,
OutputField,
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageOutput
@@ -28,6 +27,7 @@ from invokeai.app.util.controlnet_utils import (
heuristic_resize_fast,
)
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
class ControlField(BaseModel):
@@ -63,13 +63,17 @@ class ControlOutput(BaseInvocationOutput):
control: ControlField = OutputField(description=FieldDescriptions.control)
@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
@invocation(
"controlnet", title="ControlNet - SD1.5, SD2, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3"
)
class ControlNetInvocation(BaseInvocation):
"""Collects ControlNet info to pass to other nodes"""
image: ImageField = InputField(description="The control image")
control_model: ModelIdentifierField = InputField(
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
description=FieldDescriptions.controlnet_model,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.ControlNet,
)
control_weight: Union[float, List[float]] = InputField(
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"

View File

@@ -1,11 +1,19 @@
from enum import Enum
from typing import Any, Callable, Optional, Tuple
from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter, model_validator
from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter
from pydantic.fields import _Unset
from pydantic_core import PydanticUndefined
from invokeai.app.util.metaenum import MetaEnum
from invokeai.backend.image_util.segment_anything.shared import BoundingBox
from invokeai.backend.model_manager.taxonomy import (
BaseModelType,
ClipVariantType,
ModelFormat,
ModelType,
ModelVariantType,
)
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
@@ -38,47 +46,15 @@ class UIType(str, Enum, metaclass=MetaEnum):
used, and the type will be ignored. They are included here for backwards compatibility.
"""
# region Model Field Types
MainModel = "MainModelField"
CogView4MainModel = "CogView4MainModelField"
FluxMainModel = "FluxMainModelField"
SD3MainModel = "SD3MainModelField"
SDXLMainModel = "SDXLMainModelField"
SDXLRefinerModel = "SDXLRefinerModelField"
ONNXModel = "ONNXModelField"
VAEModel = "VAEModelField"
FluxVAEModel = "FluxVAEModelField"
LoRAModel = "LoRAModelField"
ControlNetModel = "ControlNetModelField"
IPAdapterModel = "IPAdapterModelField"
T2IAdapterModel = "T2IAdapterModelField"
T5EncoderModel = "T5EncoderModelField"
CLIPEmbedModel = "CLIPEmbedModelField"
CLIPLEmbedModel = "CLIPLEmbedModelField"
CLIPGEmbedModel = "CLIPGEmbedModelField"
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
ControlLoRAModel = "ControlLoRAModelField"
SigLipModel = "SigLipModelField"
FluxReduxModel = "FluxReduxModelField"
LlavaOnevisionModel = "LLaVAModelField"
Imagen3Model = "Imagen3ModelField"
Imagen4Model = "Imagen4ModelField"
ChatGPT4oModel = "ChatGPT4oModelField"
Gemini2_5Model = "Gemini2_5ModelField"
FluxKontextModel = "FluxKontextModelField"
Veo3Model = "Veo3ModelField"
RunwayModel = "RunwayModelField"
# endregion
# region Misc Field Types
Scheduler = "SchedulerField"
Any = "AnyField"
Video = "VideoField"
# endregion
# region Internal Field Types
_Collection = "CollectionField"
_CollectionItem = "CollectionItemField"
_IsIntermediate = "IsIntermediate"
# endregion
# region DEPRECATED
@@ -116,13 +92,44 @@ class UIType(str, Enum, metaclass=MetaEnum):
CollectionItem = "DEPRECATED_CollectionItem"
Enum = "DEPRECATED_Enum"
WorkflowField = "DEPRECATED_WorkflowField"
IsIntermediate = "DEPRECATED_IsIntermediate"
BoardField = "DEPRECATED_BoardField"
MetadataItem = "DEPRECATED_MetadataItem"
MetadataItemCollection = "DEPRECATED_MetadataItemCollection"
MetadataItemPolymorphic = "DEPRECATED_MetadataItemPolymorphic"
MetadataDict = "DEPRECATED_MetadataDict"
# Deprecated Model Field Types - use ui_model_[base|type|variant|format] instead
MainModel = "DEPRECATED_MainModelField"
CogView4MainModel = "DEPRECATED_CogView4MainModelField"
FluxMainModel = "DEPRECATED_FluxMainModelField"
SD3MainModel = "DEPRECATED_SD3MainModelField"
SDXLMainModel = "DEPRECATED_SDXLMainModelField"
SDXLRefinerModel = "DEPRECATED_SDXLRefinerModelField"
ONNXModel = "DEPRECATED_ONNXModelField"
VAEModel = "DEPRECATED_VAEModelField"
FluxVAEModel = "DEPRECATED_FluxVAEModelField"
LoRAModel = "DEPRECATED_LoRAModelField"
ControlNetModel = "DEPRECATED_ControlNetModelField"
IPAdapterModel = "DEPRECATED_IPAdapterModelField"
T2IAdapterModel = "DEPRECATED_T2IAdapterModelField"
T5EncoderModel = "DEPRECATED_T5EncoderModelField"
CLIPEmbedModel = "DEPRECATED_CLIPEmbedModelField"
CLIPLEmbedModel = "DEPRECATED_CLIPLEmbedModelField"
CLIPGEmbedModel = "DEPRECATED_CLIPGEmbedModelField"
SpandrelImageToImageModel = "DEPRECATED_SpandrelImageToImageModelField"
ControlLoRAModel = "DEPRECATED_ControlLoRAModelField"
SigLipModel = "DEPRECATED_SigLipModelField"
FluxReduxModel = "DEPRECATED_FluxReduxModelField"
LlavaOnevisionModel = "DEPRECATED_LLaVAModelField"
Imagen3Model = "DEPRECATED_Imagen3ModelField"
Imagen4Model = "DEPRECATED_Imagen4ModelField"
ChatGPT4oModel = "DEPRECATED_ChatGPT4oModelField"
Gemini2_5Model = "DEPRECATED_Gemini2_5ModelField"
FluxKontextModel = "DEPRECATED_FluxKontextModelField"
Veo3Model = "DEPRECATED_Veo3ModelField"
RunwayModel = "DEPRECATED_RunwayModelField"
# endregion
class UIComponent(str, Enum, metaclass=MetaEnum):
"""
@@ -331,14 +338,9 @@ class ConditioningField(BaseModel):
)
class BoundingBoxField(BaseModel):
class BoundingBoxField(BoundingBox):
"""A bounding box primitive value."""
x_min: int = Field(ge=0, description="The minimum x-coordinate of the bounding box (inclusive).")
x_max: int = Field(ge=0, description="The maximum x-coordinate of the bounding box (exclusive).")
y_min: int = Field(ge=0, description="The minimum y-coordinate of the bounding box (inclusive).")
y_max: int = Field(ge=0, description="The maximum y-coordinate of the bounding box (exclusive).")
score: Optional[float] = Field(
default=None,
ge=0.0,
@@ -347,21 +349,6 @@ class BoundingBoxField(BaseModel):
"when the bounding box was produced by a detector and has an associated confidence score.",
)
@model_validator(mode="after")
def check_coords(self):
if self.x_min > self.x_max:
raise ValueError(f"x_min ({self.x_min}) is greater than x_max ({self.x_max}).")
if self.y_min > self.y_max:
raise ValueError(f"y_min ({self.y_min}) is greater than y_max ({self.y_max}).")
return self
def tuple(self) -> Tuple[int, int, int, int]:
"""
Returns the bounding box as a tuple suitable for use with PIL's `Image.crop()` method.
This method returns a tuple of the form (left, upper, right, lower) == (x_min, y_min, x_max, y_max).
"""
return (self.x_min, self.y_min, self.x_max, self.y_max)
class MetadataField(RootModel[dict[str, Any]]):
"""
@@ -428,10 +415,15 @@ class InputFieldJSONSchemaExtra(BaseModel):
ui_component: Optional[UIComponent] = None
ui_order: Optional[int] = None
ui_choice_labels: Optional[dict[str, str]] = None
ui_model_base: Optional[list[BaseModelType]] = None
ui_model_type: Optional[list[ModelType]] = None
ui_model_variant: Optional[list[ClipVariantType | ModelVariantType]] = None
ui_model_format: Optional[list[ModelFormat]] = None
model_config = ConfigDict(
validate_assignment=True,
json_schema_serialization_defaults_required=True,
use_enum_values=True,
)
@@ -484,16 +476,121 @@ class OutputFieldJSONSchemaExtra(BaseModel):
"""
field_kind: FieldKind
ui_hidden: bool
ui_type: Optional[UIType]
ui_order: Optional[int]
ui_hidden: bool = False
ui_order: Optional[int] = None
ui_type: Optional[UIType] = None
model_config = ConfigDict(
validate_assignment=True,
json_schema_serialization_defaults_required=True,
use_enum_values=True,
)
def migrate_model_ui_type(ui_type: UIType | str, json_schema_extra: dict[str, Any]) -> bool:
"""Migrate deprecated model-specifier ui_type values to new-style ui_model_[base|type|variant|format] in json_schema_extra."""
if not isinstance(ui_type, UIType):
ui_type = UIType(ui_type)
ui_model_type: list[ModelType] | None = None
ui_model_base: list[BaseModelType] | None = None
ui_model_format: list[ModelFormat] | None = None
ui_model_variant: list[ClipVariantType | ModelVariantType] | None = None
match ui_type:
case UIType.MainModel:
ui_model_base = [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]
ui_model_type = [ModelType.Main]
case UIType.CogView4MainModel:
ui_model_base = [BaseModelType.CogView4]
ui_model_type = [ModelType.Main]
case UIType.FluxMainModel:
ui_model_base = [BaseModelType.Flux]
ui_model_type = [ModelType.Main]
case UIType.SD3MainModel:
ui_model_base = [BaseModelType.StableDiffusion3]
ui_model_type = [ModelType.Main]
case UIType.SDXLMainModel:
ui_model_base = [BaseModelType.StableDiffusionXL]
ui_model_type = [ModelType.Main]
case UIType.SDXLRefinerModel:
ui_model_base = [BaseModelType.StableDiffusionXLRefiner]
ui_model_type = [ModelType.Main]
case UIType.VAEModel:
ui_model_type = [ModelType.VAE]
case UIType.FluxVAEModel:
ui_model_base = [BaseModelType.Flux]
ui_model_type = [ModelType.VAE]
case UIType.LoRAModel:
ui_model_type = [ModelType.LoRA]
case UIType.ControlNetModel:
ui_model_type = [ModelType.ControlNet]
case UIType.IPAdapterModel:
ui_model_type = [ModelType.IPAdapter]
case UIType.T2IAdapterModel:
ui_model_type = [ModelType.T2IAdapter]
case UIType.T5EncoderModel:
ui_model_type = [ModelType.T5Encoder]
case UIType.CLIPEmbedModel:
ui_model_type = [ModelType.CLIPEmbed]
case UIType.CLIPLEmbedModel:
ui_model_type = [ModelType.CLIPEmbed]
ui_model_variant = [ClipVariantType.L]
case UIType.CLIPGEmbedModel:
ui_model_type = [ModelType.CLIPEmbed]
ui_model_variant = [ClipVariantType.G]
case UIType.SpandrelImageToImageModel:
ui_model_type = [ModelType.SpandrelImageToImage]
case UIType.ControlLoRAModel:
ui_model_type = [ModelType.ControlLoRa]
case UIType.SigLipModel:
ui_model_type = [ModelType.SigLIP]
case UIType.FluxReduxModel:
ui_model_type = [ModelType.FluxRedux]
case UIType.LlavaOnevisionModel:
ui_model_type = [ModelType.LlavaOnevision]
case UIType.Imagen3Model:
ui_model_base = [BaseModelType.Imagen3]
ui_model_type = [ModelType.Main]
case UIType.Imagen4Model:
ui_model_base = [BaseModelType.Imagen4]
ui_model_type = [ModelType.Main]
case UIType.ChatGPT4oModel:
ui_model_base = [BaseModelType.ChatGPT4o]
ui_model_type = [ModelType.Main]
case UIType.Gemini2_5Model:
ui_model_base = [BaseModelType.Gemini2_5]
ui_model_type = [ModelType.Main]
case UIType.FluxKontextModel:
ui_model_base = [BaseModelType.FluxKontext]
ui_model_type = [ModelType.Main]
case UIType.Veo3Model:
ui_model_base = [BaseModelType.Veo3]
ui_model_type = [ModelType.Video]
case UIType.RunwayModel:
ui_model_base = [BaseModelType.Runway]
ui_model_type = [ModelType.Video]
case _:
pass
did_migrate = False
if ui_model_type is not None:
json_schema_extra["ui_model_type"] = [m.value for m in ui_model_type]
did_migrate = True
if ui_model_base is not None:
json_schema_extra["ui_model_base"] = [m.value for m in ui_model_base]
did_migrate = True
if ui_model_format is not None:
json_schema_extra["ui_model_format"] = [m.value for m in ui_model_format]
did_migrate = True
if ui_model_variant is not None:
json_schema_extra["ui_model_variant"] = [m.value for m in ui_model_variant]
did_migrate = True
return did_migrate
def InputField(
# copied from pydantic's Field
# TODO: Can we support default_factory?
@@ -520,35 +617,63 @@ def InputField(
ui_hidden: Optional[bool] = None,
ui_order: Optional[int] = None,
ui_choice_labels: Optional[dict[str, str]] = None,
ui_model_base: Optional[BaseModelType | list[BaseModelType]] = None,
ui_model_type: Optional[ModelType | list[ModelType]] = None,
ui_model_variant: Optional[ClipVariantType | ModelVariantType | list[ClipVariantType | ModelVariantType]] = None,
ui_model_format: Optional[ModelFormat | list[ModelFormat]] = None,
) -> Any:
"""
Creates an input field for an invocation.
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field) \
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field)
that adds a few extra parameters to support graph execution and the node editor UI.
:param Input input: [Input.Any] The kind of input this field requires. \
`Input.Direct` means a value must be provided on instantiation. \
`Input.Connection` means the value must be provided by a connection. \
`Input.Any` means either will do.
If the field is a `ModelIdentifierField`, use the `ui_model_[base|type|variant|format]` args to filter the model list
in the Workflow Editor. Otherwise, use `ui_type` to provide extra type hints for the UI.
:param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \
In some situations, the field's type is not enough to infer the correct UI type. \
For example, model selection fields should render a dropdown UI component to select a model. \
Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
Don't use both `ui_type` and `ui_model_[base|type|variant|format]` - if both are provided, a warning will be
logged and `ui_type` will be ignored.
:param UIComponent ui_component: [None] Optionally specifies a specific component to use in the UI. \
The UI will always render a suitable component, but sometimes you want something different than the default. \
For example, a `string` field will default to a single-line input, but you may want a multi-line textarea instead. \
For this case, you could provide `UIComponent.Textarea`.
Args:
input: The kind of input this field requires.
- `Input.Direct` means a value must be provided on instantiation.
- `Input.Connection` means the value must be provided by a connection.
- `Input.Any` means either will do.
:param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI.
ui_type: Optionally provides an extra type hint for the UI. In some situations, the field's type is not enough
to infer the correct UI type. For example, Scheduler fields are enums, but we want to render a special scheduler
dropdown in the UI. Use `UIType.Scheduler` to indicate this.
:param int ui_order: [None] Specifies the order in which this field should be rendered in the UI.
ui_component: Optionally specifies a specific component to use in the UI. The UI will always render a suitable
component, but sometimes you want something different than the default. For example, a `string` field will
default to a single-line input, but you may want a multi-line textarea instead. In this case, you could use
`UIComponent.Textarea`.
:param dict[str, str] ui_choice_labels: [None] Specifies the labels to use for the choices in an enum field.
ui_hidden: Specifies whether or not this field should be hidden in the UI.
ui_order: Specifies the order in which this field should be rendered in the UI. If omitted, the field will be
rendered after all fields with an explicit order, in the order they are defined in the Invocation class.
ui_model_base: Specifies the base model architectures to filter the model list by in the Workflow Editor. For
example, `ui_model_base=BaseModelType.StableDiffusionXL` will show only SDXL architecture models. This arg is
only valid if this Input field is annotated as a `ModelIdentifierField`.
ui_model_type: Specifies the model type(s) to filter the model list by in the Workflow Editor. For example,
`ui_model_type=ModelType.VAE` will show only VAE models. This arg is only valid if this Input field is
annotated as a `ModelIdentifierField`.
ui_model_variant: Specifies the model variant(s) to filter the model list by in the Workflow Editor. For example,
`ui_model_variant=ModelVariantType.Inpainting` will show only inpainting models. This arg is only valid if this
Input field is annotated as a `ModelIdentifierField`.
ui_model_format: Specifies the model format(s) to filter the model list by in the Workflow Editor. For example,
`ui_model_format=ModelFormat.Diffusers` will show only models in the diffusers format. This arg is only valid
if this Input field is annotated as a `ModelIdentifierField`.
ui_choice_labels: Specifies the labels to use for the choices in an enum field. If omitted, the enum values
will be used. This arg is only valid if the field is annotated with as a `Literal`. For example,
`Literal["choice1", "choice2", "choice3"]` with `ui_choice_labels={"choice1": "Choice 1", "choice2": "Choice 2",
"choice3": "Choice 3"}` will render a dropdown with the labels "Choice 1", "Choice 2" and "Choice 3".
"""
json_schema_extra_ = InputFieldJSONSchemaExtra(
@@ -556,8 +681,6 @@ def InputField(
field_kind=FieldKind.Input,
)
if ui_type is not None:
json_schema_extra_.ui_type = ui_type
if ui_component is not None:
json_schema_extra_.ui_component = ui_component
if ui_hidden is not None:
@@ -566,6 +689,28 @@ def InputField(
json_schema_extra_.ui_order = ui_order
if ui_choice_labels is not None:
json_schema_extra_.ui_choice_labels = ui_choice_labels
if ui_model_base is not None:
if isinstance(ui_model_base, list):
json_schema_extra_.ui_model_base = ui_model_base
else:
json_schema_extra_.ui_model_base = [ui_model_base]
if ui_model_type is not None:
if isinstance(ui_model_type, list):
json_schema_extra_.ui_model_type = ui_model_type
else:
json_schema_extra_.ui_model_type = [ui_model_type]
if ui_model_variant is not None:
if isinstance(ui_model_variant, list):
json_schema_extra_.ui_model_variant = ui_model_variant
else:
json_schema_extra_.ui_model_variant = [ui_model_variant]
if ui_model_format is not None:
if isinstance(ui_model_format, list):
json_schema_extra_.ui_model_format = ui_model_format
else:
json_schema_extra_.ui_model_format = [ui_model_format]
if ui_type is not None:
json_schema_extra_.ui_type = ui_type
"""
There is a conflict between the typing of invocation definitions and the typing of an invocation's
@@ -667,20 +812,20 @@ def OutputField(
"""
Creates an output field for an invocation output.
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization)
that adds a few extra parameters to support graph execution and the node editor UI.
:param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \
In some situations, the field's type is not enough to infer the correct UI type. \
For example, model selection fields should render a dropdown UI component to select a model. \
Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
Args:
ui_type: Optionally provides an extra type hint for the UI. In some situations, the field's type is not enough
to infer the correct UI type. For example, Scheduler fields are enums, but we want to render a special scheduler
dropdown in the UI. Use `UIType.Scheduler` to indicate this.
:param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \
ui_hidden: Specifies whether or not this field should be hidden in the UI.
:param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
ui_order: Specifies the order in which this field should be rendered in the UI. If omitted, the field will be
rendered after all fields with an explicit order, in the order they are defined in the Invocation class.
"""
return Field(
default=default,
title=title,
@@ -698,9 +843,9 @@ def OutputField(
min_length=min_length,
max_length=max_length,
json_schema_extra=OutputFieldJSONSchemaExtra(
ui_type=ui_type,
ui_hidden=ui_hidden,
ui_order=ui_order,
ui_type=ui_type,
field_kind=FieldKind.Output,
).model_dump(exclude_none=True),
)

View File

@@ -4,9 +4,10 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ControlLoRAField, ModelIdentifierField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation_output("flux_control_lora_loader_output")
@@ -29,7 +30,10 @@ class FluxControlLoRALoaderInvocation(BaseInvocation):
"""LoRA model and Image to use with FLUX transformer generation."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.control_lora_model, title="Control LoRA", ui_type=UIType.ControlLoRAModel
description=FieldDescriptions.control_lora_model,
title="Control LoRA",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.ControlLoRa,
)
image: ImageField = InputField(description="The image to encode.")
weight: float = InputField(description="The weight of the LoRA.", default=1.0)

View File

@@ -6,11 +6,12 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
class FluxControlNetField(BaseModel):
@@ -57,7 +58,9 @@ class FluxControlNetInvocation(BaseInvocation):
image: ImageField = InputField(description="The control image")
control_model: ModelIdentifierField = InputField(
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
description=FieldDescriptions.controlnet_model,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.ControlNet,
)
control_weight: float | list[float] = InputField(
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"

View File

@@ -5,7 +5,7 @@ from pydantic import field_validator, model_validator
from typing_extensions import Self
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import InputField, UIType
from invokeai.app.invocations.fields import InputField
from invokeai.app.invocations.ip_adapter import (
CLIP_VISION_MODEL_MAP,
IPAdapterField,
@@ -20,6 +20,7 @@ from invokeai.backend.model_manager.config import (
IPAdapterCheckpointConfig,
IPAdapterInvokeAIConfig,
)
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation(
@@ -36,7 +37,10 @@ class FluxIPAdapterInvocation(BaseInvocation):
image: ImageField = InputField(description="The IP-Adapter image prompt(s).")
ip_adapter_model: ModelIdentifierField = InputField(
description="The IP-Adapter model.", title="IP-Adapter Model", ui_type=UIType.IPAdapterModel
description="The IP-Adapter model.",
title="IP-Adapter Model",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.IPAdapter,
)
# Currently, the only known ViT model used by FLUX IP-Adapters is ViT-L.
clip_vision_model: Literal["ViT-L"] = InputField(description="CLIP Vision model to use.", default="ViT-L")

View File

@@ -6,10 +6,10 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation_output("flux_lora_loader_output")
@@ -36,7 +36,10 @@ class FluxLoRALoaderInvocation(BaseInvocation):
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
transformer: TransformerField | None = InputField(

View File

@@ -6,7 +6,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.t5_model_identifier import (
@@ -17,7 +17,7 @@ from invokeai.backend.flux.util import max_seq_lengths
from invokeai.backend.model_manager.config import (
CheckpointConfigBase,
)
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
@invocation_output("flux_model_loader_output")
@@ -46,23 +46,30 @@ class FluxModelLoaderInvocation(BaseInvocation):
model: ModelIdentifierField = InputField(
description=FieldDescriptions.flux_model,
ui_type=UIType.FluxMainModel,
input=Input.Direct,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.Main,
)
t5_encoder_model: ModelIdentifierField = InputField(
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
description=FieldDescriptions.t5_encoder,
input=Input.Direct,
title="T5 Encoder",
ui_model_type=ModelType.T5Encoder,
)
clip_embed_model: ModelIdentifierField = InputField(
description=FieldDescriptions.clip_embed_model,
ui_type=UIType.CLIPEmbedModel,
input=Input.Direct,
title="CLIP Embed",
ui_model_type=ModelType.CLIPEmbed,
)
vae_model: ModelIdentifierField = InputField(
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
description=FieldDescriptions.vae_model,
title="VAE",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.VAE,
)
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:

View File

@@ -18,7 +18,6 @@ from invokeai.app.invocations.fields import (
InputField,
OutputField,
TensorField,
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
@@ -64,7 +63,8 @@ class FluxReduxInvocation(BaseInvocation):
redux_model: ModelIdentifierField = InputField(
description="The FLUX Redux model to use.",
title="FLUX Redux Model",
ui_type=UIType.FluxReduxModel,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.FluxRedux,
)
downsampling_factor: int = InputField(
ge=1,

View File

@@ -5,7 +5,7 @@ from pydantic import BaseModel, Field, field_validator, model_validator
from typing_extensions import Self
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
@@ -85,7 +85,8 @@ class IPAdapterInvocation(BaseInvocation):
description="The IP-Adapter model.",
title="IP-Adapter Model",
ui_order=-1,
ui_type=UIType.IPAdapterModel,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.IPAdapter,
)
clip_vision_model: Literal["ViT-H", "ViT-G", "ViT-L"] = InputField(
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",

View File

@@ -6,11 +6,12 @@ from pydantic import field_validator
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration, LlavaOnevisionProcessor
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import StringOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.llava_onevision_pipeline import LlavaOnevisionPipeline
from invokeai.backend.model_manager.taxonomy import ModelType
from invokeai.backend.util.devices import TorchDevice
@@ -34,7 +35,7 @@ class LlavaOnevisionVllmInvocation(BaseInvocation):
vllm_model: ModelIdentifierField = InputField(
title="LLaVA Model Type",
description=FieldDescriptions.vllm_model,
ui_type=UIType.LlavaOnevisionModel,
ui_model_type=ModelType.LlavaOnevision,
)
@field_validator("images", mode="before")

View File

@@ -53,7 +53,7 @@ from invokeai.app.invocations.primitives import (
from invokeai.app.invocations.scheduler import SchedulerOutput
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import ModelType, SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
from invokeai.version import __version__
@@ -473,7 +473,6 @@ class MetadataToModelOutput(BaseInvocationOutput):
model: ModelIdentifierField = OutputField(
description=FieldDescriptions.main_model,
title="Model",
ui_type=UIType.MainModel,
)
name: str = OutputField(description="Model Name", title="Name")
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
@@ -488,7 +487,6 @@ class MetadataToSDXLModelOutput(BaseInvocationOutput):
model: ModelIdentifierField = OutputField(
description=FieldDescriptions.main_model,
title="Model",
ui_type=UIType.SDXLMainModel,
)
name: str = OutputField(description="Model Name", title="Name")
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
@@ -519,8 +517,7 @@ class MetadataToModelInvocation(BaseInvocation, WithMetadata):
input=Input.Direct,
)
default_value: ModelIdentifierField = InputField(
description="The default model to use if not found in the metadata",
ui_type=UIType.MainModel,
description="The default model to use if not found in the metadata", ui_model_type=ModelType.Main
)
_validate_custom_label = model_validator(mode="after")(validate_custom_label)
@@ -575,7 +572,8 @@ class MetadataToSDXLModelInvocation(BaseInvocation, WithMetadata):
)
default_value: ModelIdentifierField = InputField(
description="The default SDXL Model to use if not found in the metadata",
ui_type=UIType.SDXLMainModel,
ui_model_type=ModelType.Main,
ui_model_base=BaseModelType.StableDiffusionXL,
)
_validate_custom_label = model_validator(mode="after")(validate_custom_label)

View File

@@ -9,7 +9,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import (
@@ -145,7 +145,7 @@ class ModelIdentifierInvocation(BaseInvocation):
@invocation(
"main_model_loader",
title="Main Model - SD1.5",
title="Main Model - SD1.5, SD2",
tags=["model"],
category="model",
version="1.0.4",
@@ -153,7 +153,11 @@ class ModelIdentifierInvocation(BaseInvocation):
class MainModelLoaderInvocation(BaseInvocation):
"""Loads a main model, outputting its submodels."""
model: ModelIdentifierField = InputField(description=FieldDescriptions.main_model, ui_type=UIType.MainModel)
model: ModelIdentifierField = InputField(
description=FieldDescriptions.main_model,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2],
ui_model_type=ModelType.Main,
)
# TODO: precision?
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
@@ -187,7 +191,10 @@ class LoRALoaderInvocation(BaseInvocation):
"""Apply selected lora to unet and text_encoder."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.StableDiffusion1,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
unet: Optional[UNetField] = InputField(
@@ -250,7 +257,9 @@ class LoRASelectorInvocation(BaseInvocation):
"""Selects a LoRA model and weight."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
@@ -332,7 +341,10 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
"""Apply selected lora to unet and text_encoder."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.StableDiffusionXL,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
unet: Optional[UNetField] = InputField(
@@ -473,13 +485,26 @@ class SDXLLoRACollectionLoader(BaseInvocation):
@invocation(
"vae_loader", title="VAE Model - SD1.5, SDXL, SD3, FLUX", tags=["vae", "model"], category="model", version="1.0.4"
"vae_loader",
title="VAE Model - SD1.5, SD2, SDXL, SD3, FLUX",
tags=["vae", "model"],
category="model",
version="1.0.4",
)
class VAELoaderInvocation(BaseInvocation):
"""Loads a VAE model, outputting a VaeLoaderOutput"""
vae_model: ModelIdentifierField = InputField(
description=FieldDescriptions.vae_model, title="VAE", ui_type=UIType.VAEModel
description=FieldDescriptions.vae_model,
title="VAE",
ui_model_base=[
BaseModelType.StableDiffusion1,
BaseModelType.StableDiffusion2,
BaseModelType.StableDiffusionXL,
BaseModelType.StableDiffusion3,
BaseModelType.Flux,
],
ui_model_type=ModelType.VAE,
)
def invoke(self, context: InvocationContext) -> VAEOutput:

View File

@@ -6,14 +6,14 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.t5_model_identifier import (
preprocess_t5_encoder_model_identifier,
preprocess_t5_tokenizer_model_identifier,
)
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ClipVariantType, ModelType, SubModelType
@invocation_output("sd3_model_loader_output")
@@ -39,36 +39,43 @@ class Sd3ModelLoaderInvocation(BaseInvocation):
model: ModelIdentifierField = InputField(
description=FieldDescriptions.sd3_model,
ui_type=UIType.SD3MainModel,
input=Input.Direct,
ui_model_base=BaseModelType.StableDiffusion3,
ui_model_type=ModelType.Main,
)
t5_encoder_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.t5_encoder,
ui_type=UIType.T5EncoderModel,
input=Input.Direct,
title="T5 Encoder",
default=None,
ui_model_type=ModelType.T5Encoder,
)
clip_l_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.clip_embed_model,
ui_type=UIType.CLIPLEmbedModel,
input=Input.Direct,
title="CLIP L Encoder",
default=None,
ui_model_type=ModelType.CLIPEmbed,
ui_model_variant=ClipVariantType.L,
)
clip_g_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.clip_g_model,
ui_type=UIType.CLIPGEmbedModel,
input=Input.Direct,
title="CLIP G Encoder",
default=None,
ui_model_type=ModelType.CLIPEmbed,
ui_model_variant=ClipVariantType.G,
)
vae_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.vae_model, ui_type=UIType.VAEModel, title="VAE", default=None
description=FieldDescriptions.vae_model,
title="VAE",
default=None,
ui_model_base=BaseModelType.StableDiffusion3,
ui_model_type=ModelType.VAE,
)
def invoke(self, context: InvocationContext) -> Sd3ModelLoaderOutput:

View File

@@ -1,8 +1,8 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
@invocation_output("sdxl_model_loader_output")
@@ -29,7 +29,9 @@ class SDXLModelLoaderInvocation(BaseInvocation):
"""Loads an sdxl base model, outputting its submodels."""
model: ModelIdentifierField = InputField(
description=FieldDescriptions.sdxl_main_model, ui_type=UIType.SDXLMainModel
description=FieldDescriptions.sdxl_main_model,
ui_model_base=BaseModelType.StableDiffusionXL,
ui_model_type=ModelType.Main,
)
# TODO: precision?
@@ -67,7 +69,9 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation):
"""Loads an sdxl refiner model, outputting its submodels."""
model: ModelIdentifierField = InputField(
description=FieldDescriptions.sdxl_refiner_model, ui_type=UIType.SDXLRefinerModel
description=FieldDescriptions.sdxl_refiner_model,
ui_model_base=BaseModelType.StableDiffusionXLRefiner,
ui_model_type=ModelType.Main,
)
# TODO: precision?

View File

@@ -1,72 +1,75 @@
from enum import Enum
from itertools import zip_longest
from pathlib import Path
from typing import Literal
import numpy as np
import torch
from PIL import Image
from pydantic import BaseModel, Field
from transformers import AutoProcessor
from pydantic import BaseModel, Field, model_validator
from transformers.models.sam import SamModel
from transformers.models.sam.processing_sam import SamProcessor
from transformers.models.sam2 import Sam2Model
from transformers.models.sam2.processing_sam2 import Sam2Processor
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import BoundingBoxField, ImageField, InputField, TensorField
from invokeai.app.invocations.primitives import MaskOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.segment_anything.mask_refinement import mask_to_polygon, polygon_to_mask
from invokeai.backend.image_util.segment_anything.segment_anything_2_pipeline import SegmentAnything2Pipeline
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
from invokeai.backend.image_util.segment_anything.shared import SAMInput, SAMPoint
SegmentAnythingModelKey = Literal["segment-anything-base", "segment-anything-large", "segment-anything-huge"]
SegmentAnythingModelKey = Literal[
"segment-anything-base",
"segment-anything-large",
"segment-anything-huge",
"segment-anything-2-tiny",
"segment-anything-2-small",
"segment-anything-2-base",
"segment-anything-2-large",
]
SEGMENT_ANYTHING_MODEL_IDS: dict[SegmentAnythingModelKey, str] = {
"segment-anything-base": "facebook/sam-vit-base",
"segment-anything-large": "facebook/sam-vit-large",
"segment-anything-huge": "facebook/sam-vit-huge",
"segment-anything-2-tiny": "facebook/sam2.1-hiera-tiny",
"segment-anything-2-small": "facebook/sam2.1-hiera-small",
"segment-anything-2-base": "facebook/sam2.1-hiera-base-plus",
"segment-anything-2-large": "facebook/sam2.1-hiera-large",
}
class SAMPointLabel(Enum):
negative = -1
neutral = 0
positive = 1
class SAMPoint(BaseModel):
x: int = Field(..., description="The x-coordinate of the point")
y: int = Field(..., description="The y-coordinate of the point")
label: SAMPointLabel = Field(..., description="The label of the point")
class SAMPointsField(BaseModel):
points: list[SAMPoint] = Field(..., description="The points of the object")
points: list[SAMPoint] = Field(..., description="The points of the object", min_length=1)
def to_list(self) -> list[list[int]]:
def to_list(self) -> list[list[float]]:
return [[point.x, point.y, point.label.value] for point in self.points]
@invocation(
"segment_anything",
title="Segment Anything",
tags=["prompt", "segmentation"],
tags=["prompt", "segmentation", "sam", "sam2"],
category="segmentation",
version="1.2.0",
version="1.3.0",
)
class SegmentAnythingInvocation(BaseInvocation):
"""Runs a Segment Anything Model."""
"""Runs a Segment Anything Model (SAM or SAM2)."""
# Reference:
# - https://arxiv.org/pdf/2304.02643
# - https://huggingface.co/docs/transformers/v4.43.3/en/model_doc/grounding-dino#grounded-sam
# - https://github.com/NielsRogge/Transformers-Tutorials/blob/a39f33ac1557b02ebfb191ea7753e332b5ca933f/Grounding%20DINO/GroundingDINO_with_Segment_Anything.ipynb
model: SegmentAnythingModelKey = InputField(description="The Segment Anything model to use.")
model: SegmentAnythingModelKey = InputField(description="The Segment Anything model to use (SAM or SAM2).")
image: ImageField = InputField(description="The image to segment.")
bounding_boxes: list[BoundingBoxField] | None = InputField(
default=None, description="The bounding boxes to prompt the SAM model with."
default=None, description="The bounding boxes to prompt the model with."
)
point_lists: list[SAMPointsField] | None = InputField(
default=None,
description="The list of point lists to prompt the SAM model with. Each list of points represents a single object.",
description="The list of point lists to prompt the model with. Each list of points represents a single object.",
)
apply_polygon_refinement: bool = InputField(
description="Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging).",
@@ -77,14 +80,18 @@ class SegmentAnythingInvocation(BaseInvocation):
default="all",
)
@model_validator(mode="after")
def validate_points_and_boxes_len(self):
if self.point_lists is not None and self.bounding_boxes is not None:
if len(self.point_lists) != len(self.bounding_boxes):
raise ValueError("If both point_lists and bounding_boxes are provided, they must have the same length.")
return self
@torch.no_grad()
def invoke(self, context: InvocationContext) -> MaskOutput:
# The models expect a 3-channel RGB image.
image_pil = context.images.get_pil(self.image.image_name, mode="RGB")
if self.point_lists is not None and self.bounding_boxes is not None:
raise ValueError("Only one of point_lists or bounding_box can be provided.")
if (not self.bounding_boxes or len(self.bounding_boxes) == 0) and (
not self.point_lists or len(self.point_lists) == 0
):
@@ -111,26 +118,38 @@ class SegmentAnythingInvocation(BaseInvocation):
# model, and figure out how to make it work in the pipeline.
# torch_dtype=TorchDevice.choose_torch_dtype(),
)
sam_processor = AutoProcessor.from_pretrained(model_path, local_files_only=True)
assert isinstance(sam_processor, SamProcessor)
sam_processor = SamProcessor.from_pretrained(model_path, local_files_only=True)
return SegmentAnythingPipeline(sam_model=sam_model, sam_processor=sam_processor)
def _segment(self, context: InvocationContext, image: Image.Image) -> list[torch.Tensor]:
"""Use Segment Anything (SAM) to generate masks given an image + a set of bounding boxes."""
# Convert the bounding boxes to the SAM input format.
sam_bounding_boxes = (
[[bb.x_min, bb.y_min, bb.x_max, bb.y_max] for bb in self.bounding_boxes] if self.bounding_boxes else None
)
sam_points = [p.to_list() for p in self.point_lists] if self.point_lists else None
@staticmethod
def _load_sam_2_model(model_path: Path):
sam2_model = Sam2Model.from_pretrained(model_path, local_files_only=True)
sam2_processor = Sam2Processor.from_pretrained(model_path, local_files_only=True)
return SegmentAnything2Pipeline(sam2_model=sam2_model, sam2_processor=sam2_processor)
with (
context.models.load_remote_model(
source=SEGMENT_ANYTHING_MODEL_IDS[self.model], loader=SegmentAnythingInvocation._load_sam_model
) as sam_pipeline,
):
assert isinstance(sam_pipeline, SegmentAnythingPipeline)
masks = sam_pipeline.segment(image=image, bounding_boxes=sam_bounding_boxes, point_lists=sam_points)
def _segment(self, context: InvocationContext, image: Image.Image) -> list[torch.Tensor]:
"""Use Segment Anything (SAM or SAM2) to generate masks given an image + a set of bounding boxes."""
source = SEGMENT_ANYTHING_MODEL_IDS[self.model]
inputs: list[SAMInput] = []
for bbox_field, point_field in zip_longest(self.bounding_boxes or [], self.point_lists or [], fillvalue=None):
inputs.append(
SAMInput(
bounding_box=bbox_field,
points=point_field.points if point_field else None,
)
)
if "sam2" in source:
loader = SegmentAnythingInvocation._load_sam_2_model
with context.models.load_remote_model(source=source, loader=loader) as pipeline:
assert isinstance(pipeline, SegmentAnything2Pipeline)
masks = pipeline.segment(image=image, inputs=inputs)
else:
loader = SegmentAnythingInvocation._load_sam_model
with context.models.load_remote_model(source=source, loader=loader) as pipeline:
assert isinstance(pipeline, SegmentAnythingPipeline)
masks = pipeline.segment(image=image, inputs=inputs)
masks = self._process_masks(masks)
if self.apply_polygon_refinement:

View File

@@ -11,7 +11,6 @@ from invokeai.app.invocations.fields import (
FieldDescriptions,
ImageField,
InputField,
UIType,
WithBoard,
WithMetadata,
)
@@ -19,6 +18,7 @@ from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.session_processor.session_processor_common import CanceledException
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import ModelType
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.tiles.tiles import calc_tiles_min_overlap
from invokeai.backend.tiles.utils import TBLR, Tile
@@ -33,7 +33,7 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
image_to_image_model: ModelIdentifierField = InputField(
title="Image-to-Image Model",
description=FieldDescriptions.spandrel_image_to_image_model,
ui_type=UIType.SpandrelImageToImageModel,
ui_model_type=ModelType.SpandrelImageToImage,
)
tile_size: int = InputField(
default=512, description="The tile size for tiled image-to-image. Set to 0 to disable tiling."

View File

@@ -8,11 +8,12 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
class T2IAdapterField(BaseModel):
@@ -60,7 +61,8 @@ class T2IAdapterInvocation(BaseInvocation):
description="The T2I-Adapter model.",
title="T2I-Adapter Model",
ui_order=-1,
ui_type=UIType.T2IAdapterModel,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.T2IAdapter,
)
weight: Union[float, list[float]] = InputField(
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"

View File

@@ -150,4 +150,15 @@ class BulkDownloadService(BulkDownloadBase):
def _is_valid_path(self, path: Union[str, Path]) -> bool:
"""Validates the path given for a bulk download."""
path = path if isinstance(path, Path) else Path(path)
return path.exists()
# Resolve the path to handle any path traversal attempts (e.g., ../)
resolved_path = path.resolve()
# The path may not traverse out of the bulk downloads folder or its subfolders
does_not_traverse = resolved_path.parent == self._bulk_downloads_folder.resolve()
# The path must exist and be a .zip file
does_exist = resolved_path.exists()
is_zip_file = resolved_path.suffix == ".zip"
return does_exist and is_zip_file and does_not_traverse

View File

@@ -234,8 +234,8 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
error_type: Optional[str] = Field(default=None, description="The error type, if any")
error_message: Optional[str] = Field(default=None, description="The error message, if any")
error_traceback: Optional[str] = Field(default=None, description="The error traceback, if any")
created_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was created")
updated_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was last updated")
created_at: str = Field(description="The timestamp when the queue item was created")
updated_at: str = Field(description="The timestamp when the queue item was last updated")
started_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was started")
completed_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was completed")
batch_status: BatchStatus = Field(description="The status of the batch")
@@ -258,8 +258,8 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
error_type=queue_item.error_type,
error_message=queue_item.error_message,
error_traceback=queue_item.error_traceback,
created_at=str(queue_item.created_at) if queue_item.created_at else None,
updated_at=str(queue_item.updated_at) if queue_item.updated_at else None,
created_at=str(queue_item.created_at),
updated_at=str(queue_item.updated_at),
started_at=str(queue_item.started_at) if queue_item.started_at else None,
completed_at=str(queue_item.completed_at) if queue_item.completed_at else None,
batch_status=batch_status,

View File

@@ -15,6 +15,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
ItemIdsResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
@@ -23,6 +24,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
)
from invokeai.app.services.shared.graph import GraphExecutionState
from invokeai.app.services.shared.pagination import CursorPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
class SessionQueueBase(ABC):
@@ -145,7 +147,7 @@ class SessionQueueBase(ABC):
status: Optional[QUEUE_ITEM_STATUS] = None,
destination: Optional[str] = None,
) -> CursorPaginatedResults[SessionQueueItem]:
"""Gets a page of session queue items"""
"""Gets a page of session queue items. Do not remove."""
pass
@abstractmethod
@@ -157,9 +159,18 @@ class SessionQueueBase(ABC):
"""Gets all queue items that match the given parameters"""
pass
@abstractmethod
def get_queue_item_ids(
self,
queue_id: str,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
) -> ItemIdsResult:
"""Gets all queue item ids that match the given parameters"""
pass
@abstractmethod
def get_queue_item(self, item_id: int) -> SessionQueueItem:
"""Gets a session queue item by ID"""
"""Gets a session queue item by ID for a given queue"""
pass
@abstractmethod

View File

@@ -176,6 +176,14 @@ DEFAULT_QUEUE_ID = "default"
QUEUE_ITEM_STATUS = Literal["pending", "in_progress", "completed", "failed", "canceled"]
class ItemIdsResult(BaseModel):
"""Response containing ordered item ids with metadata for optimistic updates."""
item_ids: list[int] = Field(description="Ordered list of item ids")
total_count: int = Field(description="Total number of queue items matching the query")
NodeFieldValueValidator = TypeAdapter(list[NodeFieldValue])

View File

@@ -22,6 +22,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
ItemIdsResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
@@ -34,6 +35,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
)
from invokeai.app.services.shared.graph import GraphExecutionState
from invokeai.app.services.shared.pagination import CursorPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
@@ -671,6 +673,26 @@ class SqliteSessionQueue(SessionQueueBase):
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
return items
def get_queue_item_ids(
self,
queue_id: str,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
) -> ItemIdsResult:
with self._db.transaction() as cursor_:
query = f"""--sql
SELECT item_id
FROM session_queue
WHERE queue_id = ?
ORDER BY created_at {order_dir.value}
"""
query_params = [queue_id]
cursor_.execute(query, query_params)
result = cast(list[sqlite3.Row], cursor_.fetchall())
item_ids = [row[0] for row in result]
return ItemIdsResult(item_ids=item_ids, total_count=len(item_ids))
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
with self._db.transaction() as cursor:
cursor.execute(

View File

@@ -0,0 +1,109 @@
from typing import Optional
import torch
from PIL import Image
# Import SAM2 components - these should be available in transformers 4.56.0+
from transformers.models.sam2 import Sam2Model
from transformers.models.sam2.processing_sam2 import Sam2Processor
from invokeai.backend.image_util.segment_anything.shared import SAMInput
from invokeai.backend.raw_model import RawModel
class SegmentAnything2Pipeline(RawModel):
"""A wrapper class for the transformers SAM2 model and processor that makes it compatible with the model manager."""
def __init__(self, sam2_model: Sam2Model, sam2_processor: Sam2Processor):
"""Initialize the SAM2 pipeline.
Args:
sam2_model: The SAM2 model
sam2_processor: The SAM2 processor (can be Sam2Processor or Sam2VideoProcessor)
"""
self._sam2_model = sam2_model
self._sam2_processor = sam2_processor
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
# HACK: The SAM2 pipeline may not work on MPS devices. We only allow it to be moved to CPU or CUDA.
if device is not None and device.type not in {"cpu", "cuda"}:
device = None
self._sam2_model.to(device=device, dtype=dtype)
def calc_size(self) -> int:
# HACK: Fix the circular import issue.
from invokeai.backend.model_manager.load.model_util import calc_module_size
return calc_module_size(self._sam2_model)
def segment(
self,
image: Image.Image,
inputs: list[SAMInput],
) -> torch.Tensor:
"""Segment the image using the provided inputs.
Args:
image: The image to segment.
inputs: A list of SAMInput objects containing bounding boxes and/or point lists.
Returns:
torch.Tensor: The segmentation masks. dtype: torch.bool. shape: [num_masks, channels, height, width].
"""
input_boxes: list[list[float]] = []
input_points: list[list[list[float]]] = []
input_labels: list[list[int]] = []
for i in inputs:
box: list[float] | None = None
points: list[list[float]] | None = None
labels: list[int] | None = None
if i.bounding_box is not None:
box: list[float] | None = [
i.bounding_box.x_min,
i.bounding_box.y_min,
i.bounding_box.x_max,
i.bounding_box.y_max,
]
if i.points is not None:
points = []
labels = []
for point in i.points:
points.append([point.x, point.y])
labels.append(point.label.value)
if box is not None:
input_boxes.append(box)
if points is not None:
input_points.append(points)
if labels is not None:
input_labels.append(labels)
batched_input_boxes = [input_boxes] if input_boxes else None
batched_input_points = [input_points] if input_points else None
batched_input_labels = [input_labels] if input_labels else None
processed_inputs = self._sam2_processor(
images=image,
input_boxes=batched_input_boxes,
input_points=batched_input_points,
input_labels=batched_input_labels,
return_tensors="pt",
).to(self._sam2_model.device)
# Generate masks using the SAM2 model
outputs = self._sam2_model(**processed_inputs)
# Post-process the masks to get the final segmentation
masks = self._sam2_processor.post_process_masks(
masks=outputs.pred_masks,
original_sizes=processed_inputs.original_sizes,
reshaped_input_sizes=processed_inputs.reshaped_input_sizes,
)
# There should be only one batch.
assert len(masks) == 1
return masks[0]

View File

@@ -1,20 +1,13 @@
from typing import Optional, TypeAlias
from typing import Optional
import torch
from PIL import Image
from transformers.models.sam import SamModel
from transformers.models.sam.processing_sam import SamProcessor
from invokeai.backend.image_util.segment_anything.shared import SAMInput
from invokeai.backend.raw_model import RawModel
# Type aliases for the inputs to the SAM model.
ListOfBoundingBoxes: TypeAlias = list[list[int]]
"""A list of bounding boxes. Each bounding box is in the format [xmin, ymin, xmax, ymax]."""
ListOfPoints: TypeAlias = list[list[int]]
"""A list of points. Each point is in the format [x, y]."""
ListOfPointLabels: TypeAlias = list[int]
"""A list of SAM point labels. Each label is an integer where -1 is background, 0 is neutral, and 1 is foreground."""
class SegmentAnythingPipeline(RawModel):
"""A wrapper class for the transformers SAM model and processor that makes it compatible with the model manager."""
@@ -38,55 +31,65 @@ class SegmentAnythingPipeline(RawModel):
def segment(
self,
image: Image.Image,
bounding_boxes: list[list[int]] | None = None,
point_lists: list[list[list[int]]] | None = None,
inputs: list[SAMInput],
) -> torch.Tensor:
"""Run the SAM model.
Either bounding_boxes or point_lists must be provided. If both are provided, bounding_boxes will be used and
point_lists will be ignored.
"""Segment the image using the provided inputs.
Args:
image (Image.Image): The image to segment.
bounding_boxes (list[list[int]]): The bounding box prompts. Each bounding box is in the format
[xmin, ymin, xmax, ymax].
point_lists (list[list[list[int]]]): The points prompts. Each point is in the format [x, y, label].
`label` is an integer where -1 is background, 0 is neutral, and 1 is foreground.
image: The image to segment.
inputs: A list of SAMInput objects containing bounding boxes and/or point lists.
Returns:
torch.Tensor: The segmentation masks. dtype: torch.bool. shape: [num_masks, channels, height, width].
"""
# Prep the inputs:
# - Create a list of bounding boxes or points and labels.
# - Add a batch dimension of 1 to the inputs.
if bounding_boxes:
input_boxes: list[ListOfBoundingBoxes] | None = [bounding_boxes]
input_points: list[ListOfPoints] | None = None
input_labels: list[ListOfPointLabels] | None = None
elif point_lists:
input_boxes: list[ListOfBoundingBoxes] | None = None
input_points: list[ListOfPoints] | None = []
input_labels: list[ListOfPointLabels] | None = []
for point_list in point_lists:
input_points.append([[p[0], p[1]] for p in point_list])
input_labels.append([p[2] for p in point_list])
input_boxes: list[list[float]] = []
input_points: list[list[list[float]]] = []
input_labels: list[list[int]] = []
else:
raise ValueError("Either bounding_boxes or points and labels must be provided.")
for i in inputs:
box: list[float] | None = None
points: list[list[float]] | None = None
labels: list[int] | None = None
inputs = self._sam_processor(
if i.bounding_box is not None:
box: list[float] | None = [
i.bounding_box.x_min,
i.bounding_box.y_min,
i.bounding_box.x_max,
i.bounding_box.y_max,
]
if i.points is not None:
points = []
labels = []
for point in i.points:
points.append([point.x, point.y])
labels.append(point.label.value)
if box is not None:
input_boxes.append(box)
if points is not None:
input_points.append(points)
if labels is not None:
input_labels.append(labels)
batched_input_boxes = [input_boxes] if input_boxes else None
batched_input_points = input_points if input_points else None
batched_input_labels = input_labels if input_labels else None
processed_inputs = self._sam_processor(
images=image,
input_boxes=input_boxes,
input_points=input_points,
input_labels=input_labels,
input_boxes=batched_input_boxes,
input_points=batched_input_points,
input_labels=batched_input_labels,
return_tensors="pt",
).to(self._sam_model.device)
outputs = self._sam_model(**inputs)
outputs = self._sam_model(**processed_inputs)
masks = self._sam_processor.post_process_masks(
masks=outputs.pred_masks,
original_sizes=inputs.original_sizes,
reshaped_input_sizes=inputs.reshaped_input_sizes,
original_sizes=processed_inputs.original_sizes,
reshaped_input_sizes=processed_inputs.reshaped_input_sizes,
)
# There should be only one batch.

View File

@@ -0,0 +1,49 @@
from enum import Enum
from pydantic import BaseModel, model_validator
from pydantic.fields import Field
class BoundingBox(BaseModel):
x_min: int = Field(..., description="The minimum x-coordinate of the bounding box (inclusive).")
x_max: int = Field(..., description="The maximum x-coordinate of the bounding box (exclusive).")
y_min: int = Field(..., description="The minimum y-coordinate of the bounding box (inclusive).")
y_max: int = Field(..., description="The maximum y-coordinate of the bounding box (exclusive).")
@model_validator(mode="after")
def check_coords(self):
if self.x_min > self.x_max:
raise ValueError(f"x_min ({self.x_min}) is greater than x_max ({self.x_max}).")
if self.y_min > self.y_max:
raise ValueError(f"y_min ({self.y_min}) is greater than y_max ({self.y_max}).")
return self
def tuple(self) -> tuple[int, int, int, int]:
"""
Returns the bounding box as a tuple suitable for use with PIL's `Image.crop()` method.
This method returns a tuple of the form (left, upper, right, lower) == (x_min, y_min, x_max, y_max).
"""
return (self.x_min, self.y_min, self.x_max, self.y_max)
class SAMPointLabel(Enum):
negative = -1
neutral = 0
positive = 1
class SAMPoint(BaseModel):
x: int = Field(..., description="The x-coordinate of the point")
y: int = Field(..., description="The y-coordinate of the point")
label: SAMPointLabel = Field(..., description="The label of the point")
class SAMInput(BaseModel):
bounding_box: BoundingBox | None = Field(None, description="The bounding box to use for segmentation")
points: list[SAMPoint] | None = Field(None, description="The points to use for segmentation")
@model_validator(mode="after")
def check_input(self):
if not self.bounding_box and not self.points:
raise ValueError("Either bounding_box or points must be provided")
return self

View File

@@ -207,15 +207,24 @@ class IPAdapterPlusXL(IPAdapterPlus):
def load_ip_adapter_tensors(ip_adapter_ckpt_path: pathlib.Path, device: str) -> IPAdapterStateDict:
state_dict: IPAdapterStateDict = {"ip_adapter": {}, "image_proj": {}}
state_dict: IPAdapterStateDict = {
"ip_adapter": {},
"image_proj": {},
"adapter_modules": {}, # added for noobai-mark-ipa
"image_proj_model": {}, # added for noobai-mark-ipa
}
if ip_adapter_ckpt_path.suffix == ".safetensors":
model = safetensors.torch.load_file(ip_adapter_ckpt_path, device=device)
for key in model.keys():
if key.startswith("image_proj."):
state_dict["image_proj"][key.replace("image_proj.", "")] = model[key]
elif key.startswith("ip_adapter."):
if key.startswith("ip_adapter."):
state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = model[key]
elif key.startswith("image_proj_model."):
state_dict["image_proj_model"][key.replace("image_proj_model.", "")] = model[key]
elif key.startswith("image_proj."):
state_dict["image_proj"][key.replace("image_proj.", "")] = model[key]
elif key.startswith("adapter_modules."):
state_dict["adapter_modules"][key.replace("adapter_modules.", "")] = model[key]
else:
raise RuntimeError(f"Encountered unexpected IP Adapter state dict key: '{key}'.")
else:

View File

@@ -0,0 +1,39 @@
# Bash commands
All commands should be run from `<REPO_ROOT>/invokeai/frontend/web/`.
- `pnpm lint:prettier`: check formatting
- `pnpm lint:eslint`: check for linting issues
- `pnpm lint:knip`: check for unused dependencies
- `pnpm lint:dpdm`: check for dependency cycles
- `pnpm lint:tsc`: check for TypeScript issues
- `pnpm lint`: run all checks
- `pnpm fix`: automatically fix issues where possible
- `pnpm test:no-watch`: run the test suite
# Writing Tests
This repo uses `vitest` for unit tests.
Tests should be colocated with the code they test, and should use the `.test.ts` suffix.
Tests do not need to be written for code that is trivial or has no logic (e.g. simple type definitions, re-exports, etc.). We currently do not do UI tests.
# Agents
- Use @agent-javascript-pro and @agent-typescript-pro for JavaScript and TypeScript code generation and assistance.
- Use @frontend-developer for general frontend development tasks.
## Workflow
Split up tasks into smaller subtasks and handle them one at a time using an agent. Ensure each subtask is completed before moving on to the next.
Each agent should maintain a work log in a markdown file.
When an agent completes a task, it should:
1. Summarize the changes made.
2. List any files that were added, modified, or deleted.
3. Commit the changes with a descriptive commit message.
DO NOT PUSH ANY CHANGES TO THE REMOTE REPOSITORY.

View File

@@ -45,7 +45,7 @@
"@dagrejs/dagre": "^1.1.5",
"@dagrejs/graphlib": "^2.2.4",
"@fontsource-variable/inter": "^5.2.6",
"@invoke-ai/ui-library": "^0.0.46",
"@invoke-ai/ui-library": "^0.0.47",
"@nanostores/react": "^1.0.0",
"@observ33r/object-equals": "^1.1.5",
"@reduxjs/toolkit": "2.8.2",

View File

@@ -27,8 +27,8 @@ importers:
specifier: ^5.2.6
version: 5.2.6
'@invoke-ai/ui-library':
specifier: ^0.0.46
version: 0.0.46(@chakra-ui/system@2.6.2(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1))(react@18.3.1))(@fontsource-variable/inter@5.2.6)(@types/react@18.3.23)(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3)
specifier: ^0.0.47
version: 0.0.47(@chakra-ui/system@2.6.2(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1))(react@18.3.1))(@fontsource-variable/inter@5.2.6)(@types/react@18.3.23)(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3)
'@nanostores/react':
specifier: ^1.0.0
version: 1.0.0(nanostores@1.0.1)(react@18.3.1)
@@ -887,8 +887,8 @@ packages:
resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==}
engines: {node: '>=18.18'}
'@invoke-ai/ui-library@0.0.46':
resolution: {integrity: sha512-3YBuWWhRbTUHi0RZKeyvDEvweoyZmeBdUGJIhemjdAgGx6l98rAMeCs8IQH+SYjSAIhiGRGf45fQ33PDK8Jkmw==}
'@invoke-ai/ui-library@0.0.47':
resolution: {integrity: sha512-zmO2bAkkqT2yhkHjrsDnYio3YNKYyBSJXDZFmTSxWdK58UM2+Zq3h7cpVbDgS7Dzo4RXdF7p+DdlYPm2iIey5A==}
peerDependencies:
'@fontsource-variable/inter': ^5.0.16
react: ^18.2.0
@@ -968,13 +968,6 @@ packages:
'@mux/playback-core@0.30.1':
resolution: {integrity: sha512-rnO1NE9xHDyzbAkmE6ygJYcD7cyyMt7xXqWTykxlceaoSXLjUqgp42HDio7Lcidto4x/O4FIa7ztjV2aCBCXgQ==}
'@nanostores/react@0.7.3':
resolution: {integrity: sha512-/XuLAMENRu/Q71biW4AZ4qmU070vkZgiQ28gaTSNRPm2SZF5zGAR81zPE1MaMB4SeOp6ZTst92NBaG75XSspNg==}
engines: {node: ^18.0.0 || >=20.0.0}
peerDependencies:
nanostores: ^0.9.0 || ^0.10.0 || ^0.11.0
react: '>=18.0.0'
'@nanostores/react@1.0.0':
resolution: {integrity: sha512-eDduyNy+lbQJMg6XxZ/YssQqF6b4OXMFEZMYKPJCCmBevp1lg0g+4ZRi94qGHirMtsNfAWKNwsjOhC+q1gvC+A==}
engines: {node: ^20.0.0 || >=22.0.0}
@@ -2423,6 +2416,9 @@ packages:
resolution: {integrity: sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==}
engines: {node: '>= 0.4'}
es-toolkit@1.39.10:
resolution: {integrity: sha512-E0iGnTtbDhkeczB0T+mxmoVlT4YNweEKBLq7oaU4p11mecdsZpNWOglI4895Vh4usbQ+LsJiuLuI2L0Vdmfm2w==}
es-toolkit@1.39.7:
resolution: {integrity: sha512-ek/wWryKouBrZIjkwW2BFf91CWOIMvoy2AE5YYgUrfWsJQM2Su1LoLtrw8uusEpN9RfqLlV/0FVNjT0WMv8Bxw==}
@@ -3198,9 +3194,6 @@ packages:
resolution: {integrity: sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
lodash-es@4.17.21:
resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==}
lodash.merge@4.6.2:
resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
@@ -3249,6 +3242,9 @@ packages:
resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==}
engines: {node: '>=10'}
math-expression-evaluator@2.0.7:
resolution: {integrity: sha512-uwliJZ6BPHRq4eiqNWxZBDzKUiS5RIynFFcgchqhBOloVLVBpZpNG8jRYkedLcBvhph8TnRyWEuxPqiQcwIdog==}
math-intrinsics@1.1.0:
resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
engines: {node: '>= 0.4'}
@@ -3352,10 +3348,6 @@ packages:
engines: {node: ^18 || >=20}
hasBin: true
nanostores@0.11.4:
resolution: {integrity: sha512-k1oiVNN4hDK8NcNERSZLQiMfRzEGtfnvZvdBvey3SQbgn8Dcrk0h1I6vpxApjb10PFUflZrgJ2WEZyJQ+5v7YQ==}
engines: {node: ^18.0.0 || >=20.0.0}
nanostores@1.0.1:
resolution: {integrity: sha512-kNZ9xnoJYKg/AfxjrVL4SS0fKX++4awQReGqWnwTRHxeHGZ1FJFVgTqr/eMrNQdp0Tz7M7tG/TDaX8QfHDwVCw==}
engines: {node: ^20.0.0 || >=22.0.0}
@@ -3449,12 +3441,12 @@ packages:
overlayscrollbars: ^2.0.0
react: '>=16.8.0'
overlayscrollbars@2.10.0:
resolution: {integrity: sha512-diNMeEafWTE0A4GJfwRpdBp2rE/BEvrhptBdBcDu8/UeytWcdCy9Td8tZWnztJeJ26f8/uHCWfPnPUC/dtgJdw==}
overlayscrollbars@2.11.4:
resolution: {integrity: sha512-GKYQo3OZ1QWnppNjQVv5hfpn+glYUxc6+ufW+ivdXUyLWFNc01XoH2Z36KGM4I8e5pXYeA3ElNItcXiLvmUhnQ==}
overlayscrollbars@2.12.0:
resolution: {integrity: sha512-mWJ5MOkcZ/ljHwfLw8+bN0V9ziGCoNoqULcp994j5DTGNQvnkWKWkA7rnO29Kyew5AoHxUnJ4Ndqfcl0HSQjXg==}
own-keys@1.0.1:
resolution: {integrity: sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==}
engines: {node: '>= 0.4'}
@@ -3687,6 +3679,22 @@ packages:
typescript:
optional: true
react-i18next@15.7.3:
resolution: {integrity: sha512-AANws4tOE+QSq/IeMF/ncoHlMNZaVLxpa5uUGW1wjike68elVYr0018L9xYoqBr1OFO7G7boDPrbn0HpMCJxTw==}
peerDependencies:
i18next: '>= 25.4.1'
react: '>= 16.8.0'
react-dom: '*'
react-native: '*'
typescript: ^5
peerDependenciesMeta:
react-dom:
optional: true
react-native:
optional: true
typescript:
optional: true
react-icons@5.5.0:
resolution: {integrity: sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw==}
peerDependencies:
@@ -3743,8 +3751,8 @@ packages:
react: ^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
react-dom: ^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc
react-select@5.10.1:
resolution: {integrity: sha512-roPEZUL4aRZDx6DcsD+ZNreVl+fM8VsKn0Wtex1v4IazH60ILp5xhdlp464IsEAlJdXeD+BhDAFsBVMfvLQueA==}
react-select@5.10.2:
resolution: {integrity: sha512-Z33nHdEFWq9tfnfVXaiM12rbJmk+QjFEztWLtmXqQhz6Al4UZZ9xc0wiatmGtUOCCnHN0WizL3tCMYRENX4rVQ==}
peerDependencies:
react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
@@ -5119,7 +5127,7 @@ snapshots:
'@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1)':
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.28.3
'@emotion/babel-plugin': 11.13.5
'@emotion/is-prop-valid': 1.3.1
'@emotion/react': 11.14.0(@types/react@18.3.23)(react@18.3.1)
@@ -5290,7 +5298,7 @@ snapshots:
'@humanwhocodes/retry@0.4.3': {}
'@invoke-ai/ui-library@0.0.46(@chakra-ui/system@2.6.2(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1))(react@18.3.1))(@fontsource-variable/inter@5.2.6)(@types/react@18.3.23)(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3)':
'@invoke-ai/ui-library@0.0.47(@chakra-ui/system@2.6.2(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1))(react@18.3.1))(@fontsource-variable/inter@5.2.6)(@types/react@18.3.23)(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3)':
dependencies:
'@chakra-ui/anatomy': 2.3.4
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.9(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(framer-motion@10.18.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
@@ -5302,18 +5310,19 @@ snapshots:
'@emotion/react': 11.14.0(@types/react@18.3.23)(react@18.3.1)
'@emotion/styled': 11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1)
'@fontsource-variable/inter': 5.2.6
'@nanostores/react': 0.7.3(nanostores@0.11.4)(react@18.3.1)
'@nanostores/react': 1.0.0(nanostores@1.0.1)(react@18.3.1)
chakra-react-select: 4.10.1(@chakra-ui/react@2.10.9(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@emotion/styled@11.14.1(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(framer-motion@10.18.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(@emotion/react@11.14.0(@types/react@18.3.23)(react@18.3.1))(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
es-toolkit: 1.39.10
framer-motion: 10.18.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
lodash-es: 4.17.21
nanostores: 0.11.4
overlayscrollbars: 2.10.0
overlayscrollbars-react: 0.5.6(overlayscrollbars@2.10.0)(react@18.3.1)
math-expression-evaluator: 2.0.7
nanostores: 1.0.1
overlayscrollbars: 2.12.0
overlayscrollbars-react: 0.5.6(overlayscrollbars@2.12.0)(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-i18next: 15.6.0(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3)
react-i18next: 15.7.3(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3)
react-icons: 5.5.0(react@18.3.1)
react-select: 5.10.1(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
react-select: 5.10.2(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
transitivePeerDependencies:
- '@chakra-ui/system'
- '@types/react'
@@ -5434,11 +5443,6 @@ snapshots:
hls.js: 1.6.9
mux-embed: 5.11.0
'@nanostores/react@0.7.3(nanostores@0.11.4)(react@18.3.1)':
dependencies:
nanostores: 0.11.4
react: 18.3.1
'@nanostores/react@1.0.0(nanostores@1.0.1)(react@18.3.1)':
dependencies:
nanostores: 1.0.1
@@ -7038,6 +7042,8 @@ snapshots:
is-date-object: 1.1.0
is-symbol: 1.1.1
es-toolkit@1.39.10: {}
es-toolkit@1.39.7: {}
esbuild-register@3.6.0(esbuild@0.25.6):
@@ -7869,8 +7875,6 @@ snapshots:
dependencies:
p-locate: 6.0.0
lodash-es@4.17.21: {}
lodash.merge@4.6.2: {}
lodash.mergewith@4.6.2: {}
@@ -7916,6 +7920,8 @@ snapshots:
dependencies:
semver: 7.7.2
math-expression-evaluator@2.0.7: {}
math-intrinsics@1.1.0: {}
mdn-data@2.0.14: {}
@@ -8012,8 +8018,6 @@ snapshots:
nanoid@5.1.5: {}
nanostores@0.11.4: {}
nanostores@1.0.1: {}
native-promise-only@0.8.1: {}
@@ -8120,20 +8124,20 @@ snapshots:
strip-ansi: 6.0.1
wcwidth: 1.0.1
overlayscrollbars-react@0.5.6(overlayscrollbars@2.10.0)(react@18.3.1):
dependencies:
overlayscrollbars: 2.10.0
react: 18.3.1
overlayscrollbars-react@0.5.6(overlayscrollbars@2.11.4)(react@18.3.1):
dependencies:
overlayscrollbars: 2.11.4
react: 18.3.1
overlayscrollbars@2.10.0: {}
overlayscrollbars-react@0.5.6(overlayscrollbars@2.12.0)(react@18.3.1):
dependencies:
overlayscrollbars: 2.12.0
react: 18.3.1
overlayscrollbars@2.11.4: {}
overlayscrollbars@2.12.0: {}
own-keys@1.0.1:
dependencies:
get-intrinsic: 1.3.0
@@ -8293,7 +8297,7 @@ snapshots:
react-clientside-effect@1.2.8(react@18.3.1):
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.28.3
react: 18.3.1
react-colorful@5.6.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
@@ -8342,7 +8346,7 @@ snapshots:
react-focus-lock@2.13.6(@types/react@18.3.23)(react@18.3.1):
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.28.3
focus-lock: 1.3.6
prop-types: 15.8.1
react: 18.3.1
@@ -8371,6 +8375,16 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
typescript: 5.8.3
react-i18next@15.7.3(i18next@25.3.2(typescript@5.8.3))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3):
dependencies:
'@babel/runtime': 7.28.3
html-parse-stringify: 3.0.1
i18next: 25.3.2(typescript@5.8.3)
react: 18.3.1
optionalDependencies:
react-dom: 18.3.1(react@18.3.1)
typescript: 5.8.3
react-icons@5.5.0(react@18.3.1):
dependencies:
react: 18.3.1
@@ -8430,9 +8444,9 @@ snapshots:
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-select@5.10.1(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
react-select@5.10.2(@types/react@18.3.23)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@babel/runtime': 7.27.6
'@babel/runtime': 7.28.3
'@emotion/cache': 11.14.0
'@emotion/react': 11.14.0(@types/react@18.3.23)(react@18.3.1)
'@floating-ui/dom': 1.7.2

View File

@@ -873,7 +873,6 @@
"batchQueuedDesc_other": "{{count}} Einträge an {{direction}} der Wartschlange hinzugefügt",
"openQueue": "Warteschlange öffnen",
"batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung",
"batchFieldValues": "Stapelverarbeitungswerte",
"batchQueued": "Stapelverarbeitung eingereiht",
"graphQueued": "Graph eingereiht",
"graphFailedToQueue": "Fehler beim Einreihen des Graphen",

View File

@@ -104,6 +104,7 @@
"copy": "Copy",
"copyError": "$t(gallery.copy) Error",
"clipboard": "Clipboard",
"crop": "Crop",
"on": "On",
"off": "Off",
"or": "or",
@@ -242,7 +243,10 @@
"resultSubtitle": "Choose how to handle the expanded prompt:",
"replace": "Replace",
"insert": "Insert",
"discard": "Discard"
"discard": "Discard",
"noPromptHistory": "No prompt history recorded.",
"noMatchingPrompts": "No matching prompts in history.",
"toSwitchBetweenPrompts": "to switch between prompts."
},
"queue": {
"queue": "Queue",
@@ -298,7 +302,7 @@
"completedIn": "Completed in",
"batch": "Batch",
"origin": "Origin",
"destination": "Destination",
"destination": "Dest",
"upscaling": "Upscaling",
"canvas": "Canvas",
"generation": "Generation",
@@ -324,7 +328,13 @@
"iterations_other": "Iterations",
"generations_one": "Generation",
"generations_other": "Generations",
"batchSize": "Batch Size"
"batchSize": "Batch Size",
"createdAt": "Created At",
"completedAt": "Completed At",
"sortColumn": "Sort Column",
"sortBy": "Sort by {{column}}",
"sortOrderAscending": "Ascending",
"sortOrderDescending": "Descending"
},
"invocationCache": {
"invocationCache": "Invocation Cache",
@@ -474,6 +484,14 @@
"title": "Focus Prompt",
"desc": "Move cursor focus to the positive prompt."
},
"promptHistoryPrev": {
"title": "Previous Prompt in History",
"desc": "When the prompt is focused, move to the previous (older) prompt in your history."
},
"promptHistoryNext": {
"title": "Next Prompt in History",
"desc": "When the prompt is focused, move to the next (newer) prompt in your history."
},
"toggleLeftPanel": {
"title": "Toggle Left Panel",
"desc": "Show or hide the left panel."
@@ -1252,6 +1270,7 @@
"infillColorValue": "Fill Color",
"info": "Info",
"startingFrameImage": "Start Frame",
"startingFrameImageAspectRatioWarning": "Image aspect ratio does not match the video aspect ratio ({{videoAspectRatio}}). This could lead to unexpected cropping during video generation.",
"invoke": {
"addingImagesTo": "Adding images to",
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.",
@@ -2077,6 +2096,24 @@
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
"pullBboxIntoReferenceImageError": "Problem Pulling BBox Into ReferenceImage",
"addAdjustments": "Add Adjustments",
"removeAdjustments": "Remove Adjustments",
"adjustments": {
"simple": "Simple",
"curves": "Curves",
"heading": "Adjustments",
"expand": "Expand adjustments",
"collapse": "Collapse adjustments",
"brightness": "Brightness",
"contrast": "Contrast",
"saturation": "Saturation",
"temperature": "Temperature",
"tint": "Tint",
"sharpness": "Sharpness",
"finish": "Finish",
"reset": "Reset",
"master": "Master"
},
"regionIsEmpty": "Selected region is empty",
"mergeVisible": "Merge Visible",
"mergeDown": "Merge Down",
@@ -2448,12 +2485,21 @@
"saveAs": "Save As",
"cancel": "Cancel",
"process": "Process",
"help1": "Select a single target object. Add <Bold>Include</Bold> and <Bold>Exclude</Bold> points to indicate which parts of the layer are part of the target object.",
"help2": "Start with one <Bold>Include</Bold> point within the target object. Add more points to refine the selection. Fewer points typically produce better results.",
"help3": "Invert the selection to select everything except the target object.",
"desc": "Select a single target object. After selection is complete, click <Bold>Apply</Bold> to discard everything outside the selected area, or save the selection as a new layer.",
"visualModeDesc": "Visual mode uses box and point inputs to select an object.",
"visualMode1": "Click and drag to draw a box around the object you want to select. You may get better results by drawing the box a bit larger or smaller than the object.",
"visualMode2": "Click to add a green <Bold>include</Bold> point, or shift-click to add a red <Bold>exclude</Bold> point to tell the model what to include or exclude.",
"visualMode3": "Points can be used to refine a box selection or used independently.",
"promptModeDesc": "Prompt mode uses text input to select an object.",
"promptMode1": "Type a brief description of the object you want to select.",
"promptMode2": "Use simple language, avoiding complex descriptions or multiple objects.",
"clickToAdd": "Click on the layer to add a point",
"dragToMove": "Drag a point to move it",
"clickToRemove": "Click on a point to remove it"
"clickToRemove": "Click on a point to remove it",
"model": "Model",
"segmentAnything1": "Segment Anything 1",
"segmentAnything2": "Segment Anything 2",
"prompt": "Selection Prompt"
},
"settings": {
"snapToGrid": {
@@ -2748,8 +2794,9 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"Canvas: Separate foreground and background colors - toggle with 'x', reset to black and white with 'd'",
"LoRAs: Set default weights for individual LoRAs in the Model Manager tab"
"Select Object v2: Improved object selection with point and box inputs or text prompts.",
"Raster Layer Adjustments: Easily adjust layer brightness, contrast, saturation, curves and more.",
"Prompt History: Review and quickly recall your last 100 prompts."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -443,7 +443,6 @@
"other": "Otro",
"queueFront": "Añadir al principio de la cola",
"gallery": "Galería",
"batchFieldValues": "Valores de procesamiento por lotes",
"session": "Sesión",
"notReady": "La cola aún no está lista",
"graphQueued": "Gráfico en cola",

View File

@@ -645,7 +645,6 @@
"batchQueued": "Lot ajouté à la file d'attente",
"gallery": "Galerie",
"notReady": "Impossible d'ajouter à la file d'attente",
"batchFieldValues": "Valeurs Champ Lot",
"front": "début",
"graphQueued": "Graph ajouté à la file d'attente",
"other": "Autre",
@@ -2098,10 +2097,7 @@
"pointType": "Type de point",
"exclude": "Exclure",
"process": "Traiter",
"reset": "Réinitialiser",
"help1": "Sélectionnez un seul objet cible. Ajoutez des points <Bold>Inclure</Bold> et <Bold>Exclure</Bold> pour indiquer quelles parties de la couche font partie de l'objet cible.",
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
"reset": "Réinitialiser"
},
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",

View File

@@ -124,7 +124,15 @@
"fullView": "Vista completa",
"removeNegativePrompt": "Rimuovi prompt negativo",
"addNegativePrompt": "Aggiungi prompt negativo",
"selectYourModel": "Seleziona il modello"
"selectYourModel": "Seleziona il modello",
"goTo": "Vai a",
"imageFailedToLoad": "Impossibile caricare l'immagine",
"localSystem": "Sistema locale",
"notInstalled": "Non $t(common.installed)",
"prevPage": "Pagina precedente",
"nextPage": "Pagina successiva",
"resetToDefaults": "Ripristina impostazioni predefinite",
"crop": "Ritaglia"
},
"gallery": {
"galleryImageSize": "Dimensione dell'immagine",
@@ -194,7 +202,14 @@
"deleteVideo_other": "Elimina {{count}} video",
"deleteVideoPermanent": "I video eliminati non possono essere ripristinati.",
"videos": "Video",
"videosTab": "Video creati e salvati in Invoke."
"videosTab": "Video creati e salvati in Invoke.",
"jump": "Salta",
"noVideoSelected": "Nessun video selezionato",
"noImagesInGallery": "Nessuna immagine da visualizzare",
"unableToLoad": "Impossibile caricare la Galleria",
"selectAnImageToCompare": "Seleziona un'immagine da confrontare",
"openViewer": "Apri Visualizzatore",
"closeViewer": "Chiudi Visualizzatore"
},
"hotkeys": {
"searchHotkeys": "Cerca tasti di scelta rapida",
@@ -264,6 +279,14 @@
"selectVideoTab": {
"title": "Seleziona la scheda Video",
"desc": "Seleziona la scheda Video."
},
"promptHistoryPrev": {
"title": "Prompt precedente nella cronologia",
"desc": "Quando il prompt è attivo, passa al prompt precedente (più vecchio) nella cronologia."
},
"promptHistoryNext": {
"title": "Prossimo prompt nella cronologia",
"desc": "Quando il prompt è attivo, passa al prompt successivo (più recente) nella cronologia."
}
},
"hotkeys": "Tasti di scelta rapida",
@@ -718,12 +741,18 @@
"recommendedModels": "Modelli consigliati",
"exploreStarter": "Oppure sfoglia tutti i modelli iniziali disponibili",
"welcome": "Benvenuti in Gestione Modelli",
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare."
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
"quickStart": "Pacchetti di avvio rapido",
"browseAll": "Oppure sfoglia tutti i modelli disponibili:"
},
"launchpadTab": "Rampa di lancio",
"installBundle": "Installa pacchetto",
"installBundleMsg1": "Vuoi davvero installare il pacchetto {{bundleName}}?",
"installBundleMsg2": "Questo pacchetto installerà i seguenti {{count}} modelli:"
"installBundleMsg2": "Questo pacchetto installerà i seguenti {{count}} modelli:",
"filterModels": "Filtra i modelli",
"ipAdapters": "Adattatori IP",
"showOnlyRelatedModels": "Correlati",
"starterModelsInModelManager": "I modelli di avvio possono essere trovati in Gestione Modelli"
},
"parameters": {
"images": "Immagini",
@@ -807,7 +836,12 @@
"promptExpansionPending": "Espansione del prompt in corso",
"noStartingFrameImage": "Nessuna immagine del fotogramma iniziale",
"videoIsDisabled": "La generazione di video non è abilitata per gli account {{accountType}}.",
"incompatibleLoRAs": "Aggiunti LoRA incompatibili"
"incompatibleLoRAs": "Aggiunti LoRA incompatibili",
"emptyBatches": "lotti vuoti",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la larghezza del riquadro è {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), l'altezza del riquadro è {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la larghezza ridimensionata del riquadro è {{width}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), l'altezza ridimensionata del riquadro è {{height}}"
},
"useCpuNoise": "Usa la CPU per generare rumore",
"iterations": "Iterazioni",
@@ -848,7 +882,10 @@
"videoActions": "Azioni video",
"sendToVideo": "Invia al Video",
"video": "Video",
"resolution": "Risoluzione"
"resolution": "Risoluzione",
"downloadImage": "Scarica l'immagine",
"showOptionsPanel": "Mostra pannello laterale (O o T)",
"startingFrameImageAspectRatioWarning": "Le proporzioni dell'immagine non corrispondono alle proporzioni del video ({{videoAspectRatio}}). Ciò potrebbe causare ritagli imprevisti durante la generazione del video."
},
"settings": {
"models": "Modelli",
@@ -886,7 +923,9 @@
"confirmOnNewSession": "Conferma su nuova sessione",
"enableModelDescriptions": "Abilita le descrizioni dei modelli nei menu a discesa",
"showDetailedInvocationProgress": "Mostra dettagli avanzamento",
"enableHighlightFocusedRegions": "Evidenzia le regioni interessate"
"enableHighlightFocusedRegions": "Evidenzia le regioni interessate",
"modelDescriptionsDisabled": "Descrizioni dei modelli nei menu a discesa disabilitate",
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disattivate. Abilitale nelle Impostazioni."
},
"toast": {
"uploadFailed": "Caricamento fallito",
@@ -967,7 +1006,27 @@
"noVisibleMasksDesc": "Crea o abilita almeno una maschera inpaint da invertire",
"noVisibleMasks": "Nessuna maschera visibile",
"maskInvertFailed": "Impossibile invertire la maschera",
"maskInverted": "Maschera invertita"
"maskInverted": "Maschera invertita",
"uploadFailedInvalidUploadDesc_withCount_one": "Deve essere presente al massimo 1 immagine PNG, JPEG o WEBP.",
"uploadFailedInvalidUploadDesc_withCount_many": "Devono essere presenti al massimo {{count}} immagini PNG, JPEG o WEBP.",
"uploadFailedInvalidUploadDesc_withCount_other": "Devono essere presenti al massimo {{count}} immagini PNG, JPEG o WEBP.",
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
"imageSaved": "Immagine salvata",
"imageSavingFailed": "Salvataggio dell'immagine non riuscito",
"invalidUpload": "Caricamento non valido",
"layerSavedToAssets": "Livello salvato nelle risorse",
"noRasterLayers": "Nessun livello raster trovato",
"noRasterLayersDesc": "Crea almeno un livello raster da esportare in PSD",
"noActiveRasterLayers": "Nessun livello raster attivo",
"noActiveRasterLayersDesc": "Abilita almeno un livello raster da esportare in PSD",
"failedToProcessLayers": "Impossibile elaborare i livelli",
"noValidLayerAdapters": "Nessun adattatore di livello valido trovato",
"setControlImage": "Imposta come immagine di controllo",
"setNodeField": "Imposta come campo nodo",
"noInpaintMaskSelected": "Nessuna maschera di inpaint selezionata",
"noInpaintMaskSelectedDesc": "Seleziona una maschera di inpaint da invertire",
"invalidBbox": "Riquadro di delimitazione non valido",
"invalidBboxDesc": "Il riquadro di delimitazione non ha dimensioni valide"
},
"accessibility": {
"invokeProgressBar": "Barra di avanzamento generazione",
@@ -1017,7 +1076,7 @@
"workflowVersion": "Versione",
"workflow": "Flusso di lavoro",
"noWorkflow": "Nessun flusso di lavoro",
"workflowTags": "Tag",
"workflowTags": "Etichette",
"workflowValidation": "Errore di convalida del flusso di lavoro",
"workflowAuthor": "Autore",
"workflowName": "Nome",
@@ -1048,7 +1107,7 @@
"cannotConnectToSelf": "Impossibile connettersi a se stesso",
"loadingNodes": "Caricamento nodi...",
"enum": "Enumeratore",
"float": "In virgola mobile",
"float": "Decimale",
"currentImageDescription": "Visualizza l'immagine corrente nell'editor dei nodi",
"fieldTypesMustMatch": "I tipi di campo devono corrispondere",
"edge": "Collegamento",
@@ -1161,7 +1220,23 @@
"alignmentUL": "In alto a sinistra",
"alignmentDL": "In basso a sinistra",
"alignmentUR": "In alto a destra"
}
},
"generatorLoading": "caricamento",
"addLinearView": "Aggiungi alla vista lineare",
"hideLegendNodes": "Nascondi legenda tipo di campo",
"mismatchedVersion": "Nodo non valido: il nodo {{node}} di tipo {{type}} ha una versione non corrispondente (provare ad aggiornare?)",
"noFieldsLinearview": "Nessun campo aggiunto alla vista lineare",
"removeLinearView": "Rimuovi dalla vista lineare",
"reorderLinearView": "Riordina vista lineare",
"showLegendNodes": "Mostra legenda tipo di campo",
"unableToLoadWorkflow": "Impossibile caricare il flusso di lavoro",
"unknownTemplate": "Modello sconosciuto",
"unknownInput": "Input sconosciuto: {{name}}",
"loadingTemplates": "Caricamento in corso {{name}}",
"versionUnknown": " Versione sconosciuta",
"generateValues": "Genera valori",
"floatRangeGenerator": "Generatore di intervallo di numeri decimali",
"integerRangeGenerator": "Generatore di intervallo di numeri interi"
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
@@ -1213,7 +1288,10 @@
"movingVideosToBoard_other": "Spostamento di {{count}} video sulla bacheca:",
"videosWithCount_one": "{{count}} video",
"videosWithCount_many": "{{count}} video",
"videosWithCount_other": "{{count}} video"
"videosWithCount_other": "{{count}} video",
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate.",
"hideBoards": "Nascondi bacheche",
"viewBoards": "Visualizza le bacheche"
},
"queue": {
"queueFront": "Aggiungi all'inizio della coda",
@@ -1270,7 +1348,6 @@
"clearQueueAlertDialog2": "Sei sicuro di voler cancellare la coda?",
"item": "Elemento",
"graphFailedToQueue": "Impossibile mettere in coda il grafico",
"batchFieldValues": "Valori Campi Lotto",
"time": "Tempo",
"openQueue": "Apri coda",
"iterations_one": "Iterazione",
@@ -1283,7 +1360,7 @@
"generations_many": "Generazioni",
"generations_other": "Generazioni",
"origin": "Origine",
"destination": "Destinazione",
"destination": "Dest",
"upscaling": "Ampliamento",
"canvas": "Tela",
"workflows": "Flussi di lavoro",
@@ -1299,7 +1376,14 @@
"retryItem": "Riesegui elemento",
"retryFailed": "Problema riesecuzione elemento",
"credits": "Crediti",
"cancelAllExceptCurrent": "Annulla tutto tranne quello corrente"
"cancelAllExceptCurrent": "Annulla tutto tranne quello corrente",
"sortColumn": "Ordina colonna",
"sortBy": "Ordina per {{column}}",
"sortOrderAscending": "Ascendente",
"sortOrderDescending": "Discendente",
"createdAt": "Creato",
"completedAt": "Completato",
"batchFieldValues": "Valori del campo Lotto"
},
"models": {
"noMatchingModels": "Nessun modello corrispondente",
@@ -1311,7 +1395,9 @@
"defaultVAE": "VAE predefinito",
"concepts": "Concetti",
"lora": "LoRA",
"noCompatibleLoRAs": "Nessun LoRA compatibile"
"noCompatibleLoRAs": "Nessun LoRA compatibile",
"noMatchingLoRAs": "Nessun LoRA corrispondente",
"noLoRAsInstalled": "Nessun LoRA installato"
},
"invocationCache": {
"disable": "Disabilita",
@@ -1342,7 +1428,8 @@
"dynamicPrompts": "Prompt dinamici",
"promptsPreview": "Anteprima dei prompt",
"showDynamicPrompts": "Mostra prompt dinamici",
"loading": "Generazione prompt dinamici..."
"loading": "Generazione prompt dinamici...",
"promptsToGenerate": "Prompt da generare"
},
"popovers": {
"paramScheduler": {
@@ -1796,7 +1883,11 @@
"negAestheticScore": "Punteggio estetico negativo",
"refinermodel": "Modello Affinatore",
"posAestheticScore": "Punteggio estetico positivo",
"refinerSteps": "Passi Affinamento"
"refinerSteps": "Passi Affinamento",
"concatPromptStyle": "Collegamento di prompt e stile",
"freePromptStyle": "Prompt manuale Stile",
"negStylePrompt": "Prompt di stile negativo",
"posStylePrompt": "Prompt di stile positivo"
},
"metadata": {
"positivePrompt": "Prompt positivo",
@@ -1833,7 +1924,9 @@
"videoModel": "Modello",
"videoDuration": "Durata",
"videoAspectRatio": "Proporzioni",
"videoResolution": "Risoluzione"
"videoResolution": "Risoluzione",
"parsingFailed": "Analisi non riuscita",
"recallParameter": "Richiama {{label}}"
},
"hrf": {
"metadata": {
@@ -1841,7 +1934,9 @@
"enabled": "Correzione Alta Risoluzione Abilitata",
"method": "Metodo della Correzione Alta Risoluzione"
},
"hrf": "Correzione Alta Risoluzione"
"hrf": "Correzione Alta Risoluzione",
"enableHrf": "Abilita correzione ad alta risoluzione",
"upscaleMethod": "Metodo di ampliamento"
},
"workflows": {
"saveWorkflowAs": "Salva flusso di lavoro come",
@@ -1947,7 +2042,9 @@
"errorWorkflowHasUnpublishableNodes": "Il flusso di lavoro ha nodi di estrazione lotto, generatore o metadati",
"showShuffle": "Mostra Mescola",
"shuffle": "Mescola",
"removeFromForm": "Rimuovi dal modulo"
"removeFromForm": "Rimuovi dal modulo",
"emptyRootPlaceholderViewMode": "Fare clic su Modifica per iniziare a creare un modulo per questo flusso di lavoro.",
"workflowBuilderAlphaWarning": "Il generatore di flussi di lavoro è attualmente in versione alpha. Potrebbero esserci modifiche sostanziali prima della versione stabile."
},
"loadMore": "Carica altro",
"searchPlaceholder": "Cerca per nome, descrizione o etichetta",
@@ -1962,7 +2059,19 @@
"view": "Visualizza",
"recommended": "Consigliato per te",
"emptyStringPlaceholder": "<stringa vuota>",
"published": "Pubblicato"
"published": "Pubblicato",
"defaultWorkflows": "Flussi di lavoro predefiniti",
"userWorkflows": "Flussi di lavoro dell'utente",
"projectWorkflows": "Flussi di lavoro del progetto",
"allLoaded": "Tutti i flussi di lavoro caricati",
"filterByTags": "Filtra per etichetta",
"noRecentWorkflows": "Nessun flusso di lavoro recente",
"openWorkflow": "Apri flusso di lavoro",
"problemLoading": "Problema nel caricamento dei flussi di lavoro",
"noDescription": "Nessuna descrizione",
"searchWorkflows": "Ricerca flussi di lavoro",
"clearWorkflowSearchFilter": "Cancella filtro di ricerca del flusso di lavoro",
"openLibrary": "Apri libreria"
},
"accordions": {
"compositing": {
@@ -1993,7 +2102,13 @@
"expandingPrompt": "Espansione del prompt...",
"uploadImageForPromptGeneration": "Carica l'immagine per la generazione del prompt",
"expandCurrentPrompt": "Espandi il prompt corrente",
"generateFromImage": "Genera prompt dall'immagine"
"generateFromImage": "Genera prompt dall'immagine",
"resultTitle": "Espansione del prompt completata",
"resultSubtitle": "Scegli come gestire il prompt espanso:",
"insert": "Inserisci",
"noPromptHistory": "Nessuna cronologia di prompt registrata.",
"noMatchingPrompts": "Nessun prompt corrispondente nella cronologia.",
"toSwitchBetweenPrompts": "per passare da un prompt all'altro."
},
"controlLayers": {
"addLayer": "Aggiungi Livello",
@@ -2300,8 +2415,8 @@
"accept": "Accetta",
"saveToGallery": "Salva nella Galleria",
"previous": "Precedente",
"showResultsOn": "Risultati visualizzati",
"showResultsOff": "Risultati nascosti"
"showResultsOn": "Visualizzare i risultati",
"showResultsOff": "Nascondere i risultati"
},
"HUD": {
"bbox": "Riquadro di delimitazione",
@@ -2340,7 +2455,6 @@
"dragToMove": "Trascina un punto per spostarlo",
"clickToAdd": "Fare clic sul livello per aggiungere un punto",
"clickToRemove": "Clicca su un punto per rimuoverlo",
"help3": "Inverte la selezione per selezionare tutto tranne l'oggetto di destinazione.",
"pointType": "Tipo punto",
"apply": "Applica",
"reset": "Reimposta",
@@ -2352,8 +2466,16 @@
"neutral": "Neutro",
"saveAs": "Salva come",
"process": "Elabora",
"help1": "Seleziona un singolo oggetto di destinazione. Aggiungi i punti <Bold>Includi</Bold> e <Bold>Escludi</Bold> per indicare quali parti del livello fanno parte dell'oggetto di destinazione.",
"help2": "Inizia con un punto <Bold>Include</Bold> all'interno dell'oggetto di destinazione. Aggiungi altri punti per perfezionare la selezione. Meno punti in genere producono risultati migliori."
"desc": "Seleziona un singolo oggetto di destinazione. Una volta completata la selezione, fai clic su <Bold>Applica</Bold> per eliminare tutto ciò che si trova al di fuori dell'area selezionata, oppure salva la selezione come nuovo livello.",
"visualModeDesc": "La modalità visiva utilizza input di tipo riquadro e punto per selezionare un oggetto.",
"visualMode1": "Fai clic e trascina per disegnare un riquadro attorno all'oggetto che desideri selezionare. Puoi ottenere risultati migliori disegnando il riquadro un po' più grande o più piccolo dell'oggetto.",
"visualMode2": "Fare clic per aggiungere un punto di <Bold>iinclusione</Bold>i verde oppure fare clic tenendo premuto Maiusc per aggiungere un punto di <Bold>iesclusione</Bold>i rosso per indicare al modello cosa includere o escludere.",
"visualMode3": "I punti possono essere utilizzati per perfezionare una selezione di caselle oppure in modo indipendente.",
"promptModeDesc": "La modalità Prompt utilizza l'input di testo per selezionare un oggetto.",
"promptMode1": "Digitare una breve descrizione dell'oggetto che si desidera selezionare.",
"promptMode2": "Utilizzare un linguaggio semplice, evitando descrizioni complesse o oggetti multipli.",
"model": "Modello",
"prompt": "Prompt di selezione"
},
"convertControlLayerTo": "Converti $t(controlLayers.controlLayer) in",
"newRasterLayer": "Nuovo $t(controlLayers.rasterLayer)",
@@ -2425,12 +2547,65 @@
"hideNonRasterLayers": "Nascondi livelli non raster (Shift+H)",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questa immagine di riferimento o <PullBboxButton>trascina il riquadro di delimitazione in questa immagine di riferimento</PullBboxButton> per iniziare.",
"autoSwitch": {
"off": "Spento"
"off": "Spento",
"switchOnStart": "All'inizio",
"switchOnFinish": "Alla fine"
},
"invertMask": "Inverti maschera",
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere",
"maxRefImages": "Max Immagini di rif.to",
"useAsReferenceImage": "Usa come immagine di riferimento"
"useAsReferenceImage": "Usa come immagine di riferimento",
"globalReferenceImage_withCount_one": "$t(controlLayers.globalReferenceImage)",
"globalReferenceImage_withCount_many": "Immagini di riferimento globali",
"globalReferenceImage_withCount_other": "Immagini di riferimento globali",
"layer_withCount_one": "Livello ({{count}})",
"layer_withCount_many": "Livelli ({{count}})",
"layer_withCount_other": "Livelli ({{count}})",
"addAdjustments": "Aggiungi regolazioni",
"removeAdjustments": "Rimuovi regolazioni",
"adjustments": {
"simple": "Semplice",
"curves": "Curve",
"heading": "Regolazioni",
"expand": "Espandi regolazioni",
"collapse": "Comprimi regolazioni",
"brightness": "Luminosità",
"contrast": "Contrasto",
"saturation": "Saturazione",
"temperature": "Temperatura",
"tint": "Tinta",
"sharpness": "Nitidezza",
"reset": "Reimposta"
},
"deletePrompt": "Elimina prompt",
"addGlobalReferenceImage": "Aggiungi $t(controlLayers.globalReferenceImage)",
"referenceImageGlobal": "Immagine di riferimento (globale)",
"sendingToGallery": "Invia generazioni alla Galleria",
"sendToGallery": "Invia alla Galleria",
"sendToGalleryDesc": "Premendo Invoke viene generata e salvata un'immagine unica nella tua galleria.",
"newImg2ImgCanvasFromImage": "Nuovo immagine-a-immagine da Immagine",
"sendToCanvasDesc": "Premendo Invoke il lavoro in corso viene visualizzato sulla tela.",
"viewProgressOnCanvas": "Visualizza i progressi e gli output nel <Btn>Visualizzatore immagini</Btn>.",
"regionalGuidance_withCount_hidden": "Guida regionale ({{count}} nascosti)",
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
"rasterLayers_withCount_hidden": "Livelli raster ({{count}} nascosti)",
"globalReferenceImages_withCount_hidden": "Immagini di riferimento globali ({{count}} nascoste)",
"inpaintMasks_withCount_hidden": "Maschere Inpaint ({{count}} nascoste)",
"regionalGuidance_withCount_visible": "Guida regionale ({{count}})",
"controlLayers_withCount_visible": "Livelli di controllo ({{count}})",
"rasterLayers_withCount_visible": "Livelli raster ({{count}})",
"globalReferenceImages_withCount_visible": "Immagini di riferimento globali ({{count}})",
"inpaintMasks_withCount_visible": "Maschere Inpaint ({{count}})",
"pastedTo": "Incollato su {{destination}}",
"stagingOnCanvas": "Predisponi le immagini su",
"newGallerySession": "Nuova sessione della Galleria",
"newGallerySessionDesc": "Questo cancellerà la tela e tutte le impostazioni, ad eccezione della selezione del modello. Le generazioni verranno inviate alla galleria.",
"newCanvasSession": "Nuova sessione Tela",
"newCanvasSessionDesc": "Questo cancellerà la tela e tutte le impostazioni, ad eccezione della selezione del modello. Le generazioni verranno predisposte sulla tela.",
"replaceCurrent": "Sostituisci l'attuale",
"uploadOrDragAnImage": "Trascina un'immagine dalla galleria o <UploadButton>carica un'immagine</UploadButton>.",
"sendingToCanvas": "Predisponi le generazioni sulla Tela",
"viewProgressInViewer": "Visualizza i progressi e gli output nel <Btn>Visualizzatore immagini</Btn>."
},
"ui": {
"tabs": {
@@ -2527,6 +2702,10 @@
"addStartingFrame": {
"title": "Aggiungi un fotogramma iniziale",
"description": "Aggiungi un'immagine per controllare il primo fotogramma del tuo video."
},
"video": {
"startingFrameCalloutTitle": "Aggiungi un fotogramma iniziale",
"startingFrameCalloutDesc": "Aggiungi un'immagine per controllare il primo fotogramma del tuo video."
}
},
"panels": {
@@ -2624,9 +2803,11 @@
"readReleaseNotes": "Leggi le note di rilascio",
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"items": [
"Tela: Color Picker non campiona l'alfa, il riquadro di delimitazione rispetta il blocco delle proporzioni quando si ridimensiona il pulsante Mescola per i campi numerici nel generatore di flusso di lavoro, nasconde i cursori delle dimensioni dei pixel quando si utilizza un modello che non li supporta",
"Flussi di lavoro: aggiunto un pulsante Mescola ai campi di input numerici"
]
"Seleziona oggetto v2: selezione degli oggetti migliorata con input di punti e riquadri o prompt di testo.",
"Regolazioni del livello raster: regola facilmente la luminosità, il contrasto, la saturazione, le curve e altro ancora del livello.",
"Cronologia prompt: rivedi e richiama rapidamente i tuoi ultimi 100 prompt."
],
"watchUiUpdatesOverview": "Guarda la panoramica degli aggiornamenti dell'interfaccia utente"
},
"system": {
"logLevel": {
@@ -2677,5 +2858,9 @@
},
"lora": {
"weight": "Peso"
},
"video": {
"noVideoSelected": "Nessun video selezionato",
"selectFromGallery": "Seleziona un video dalla galleria per riprodurlo"
}
}

View File

@@ -981,7 +981,6 @@
"clearQueueAlertDialog2": "キューをクリアしてもよろしいですか?",
"item": "項目",
"graphFailedToQueue": "グラフをキューに追加できませんでした",
"batchFieldValues": "バッチの詳細",
"openQueue": "キューを開く",
"time": "時間",
"completedIn": "完了まで",
@@ -2150,9 +2149,6 @@
"saveAs": "名前を付けて保存",
"cancel": "キャンセル",
"process": "プロセス",
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
"dragToMove": "ポイントをドラッグして移動します",
"clickToRemove": "ポイントをクリックして削除します"

View File

@@ -232,7 +232,6 @@
"next": "다음",
"cancelBatch": "Batch 취소",
"back": "back",
"batchFieldValues": "Batch 필드 값들",
"cancel": "취소",
"session": "세션",
"time": "시간",

View File

@@ -284,7 +284,6 @@
"completed": "Zakończono",
"item": "Pozycja",
"failed": "Niepowodzenie",
"batchFieldValues": "Masowe Wartości pól",
"graphFailedToQueue": "NIe udało się dodać tabeli do kolejki",
"workflows": "Przepływy pracy",
"next": "Następny",

View File

@@ -1411,7 +1411,6 @@
"next": "Следующий",
"cancelBatch": "Отменить пакет",
"back": "задний",
"batchFieldValues": "Пакетные значения полей",
"cancel": "Отмена",
"session": "Сессия",
"time": "Время",

View File

@@ -176,7 +176,6 @@
"session": "Oturum",
"batchQueued": "Toplu İş Sıraya Alındı",
"notReady": "Sıraya Alınamadı",
"batchFieldValues": "Toplu İş Değişkenleri",
"graphFailedToQueue": "Çizge sıraya alınamadı",
"graphQueued": "Çizge sıraya alındı"
},

View File

@@ -53,7 +53,12 @@
"assetsWithCount_other": "{{count}} tài nguyên",
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
"locateInGalery": "Vị Trí Ở Thư Viện Ảnh"
"locateInGalery": "Vị Trí Ở Thư Viện Ảnh",
"deletedImagesCannotBeRestored": "Ảnh đã xóa không thể khôi phục lại.",
"hideBoards": "Ẩn Bảng",
"movingVideosToBoard_other": "Di chuyển {{count}} video vào bảng:",
"viewBoards": "Xem Bảng",
"videosWithCount_other": "{{count}} video"
},
"gallery": {
"swapImages": "Đổi Hình Ảnh",
@@ -84,7 +89,7 @@
"newestFirst": "Mới Nhất Trước",
"showStarredImagesFirst": "Hiển Thị Ảnh Gắn Sao Trước",
"bulkDownloadRequestedDesc": "Yêu cầu tải xuống đang được chuẩn bị. Vui lòng chờ trong giây lát.",
"starImage": "Gắn Sao Cho Ảnh",
"starImage": "Gắn Sao",
"viewerImage": "Trình Xem Ảnh",
"sideBySide": "Cạnh Nhau",
"alwaysShowImageSizeBadge": "Luôn Hiển Thị Kích Thước Ảnh",
@@ -109,13 +114,24 @@
"exitCompare": "Ngừng So Sánh",
"stretchToFit": "Kéo Dài Cho Vừa Vặn",
"sortDirection": "Cách Sắp Xếp",
"unstarImage": "Ngừng Gắn Sao Cho Ảnh",
"unstarImage": "Bỏ Gắn Sao",
"compareHelp2": "Nhấn <Kbd>M</Kbd> để tuần hoàn trong chế độ so sánh.",
"boardsSettings": "Thiết Lập Bảng",
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
"assets": "Tài Nguyên",
"images": "Hình Ảnh",
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh",
"deleteVideo_other": "Xóa {{count}} Video",
"deleteVideoPermanent": "Video đã xóa không thể khôi phục lại.",
"jump": "Nhảy Đến",
"noVideoSelected": "Không Có Video Được Chọn",
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
"unableToLoad": "Không Thể Tải Thư Viện Ảnh",
"selectAnImageToCompare": "Chọn Ảnh Để So Sánh",
"openViewer": "Mở Trình Xem",
"closeViewer": "Đóng Trình Xem",
"videos": "Video",
"videosTab": "Video bạn tạo và được lưu trong Invoke."
},
"common": {
"ipAdapter": "IP Adapter",
@@ -147,7 +163,7 @@
"dontAskMeAgain": "Không hỏi lại",
"error": "Lỗi",
"or": "hoặc",
"installed": ã Tải Xuống",
"installed": ược Tải Xuống Sẵn",
"simple": "Cơ Bản",
"linear": "Tuyến Tính",
"safetensors": "Safetensors",
@@ -240,7 +256,14 @@
"options_withCount_other": "{{count}} thiết lập",
"removeNegativePrompt": "Xóa Lệnh Tiêu Cực",
"addNegativePrompt": "Thêm Lệnh Tiêu Cực",
"selectYourModel": "Chọn Model"
"selectYourModel": "Chọn Model",
"goTo": "Đi Đến",
"imageFailedToLoad": "Không Thể Tải Ảnh",
"localSystem": "Hệ Thống Máy Chủ",
"notInstalled": "Chưa $t(common.installed)",
"prevPage": "Trang Trước",
"nextPage": "Trang Sau",
"resetToDefaults": "Tải Lại Mặc Định"
},
"prompt": {
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
@@ -251,7 +274,10 @@
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
"expandingPrompt": "Đang mở rộng lệnh...",
"replace": "Thay Thế",
"discard": "Huỷ Bỏ"
"discard": "Huỷ Bỏ",
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
"insert": "Chèn"
},
"queue": {
"resume": "Tiếp Tục",
@@ -265,7 +291,6 @@
"clearQueueAlertDialog2": "Bạn chắc chắn muốn dọn sạch hàng không?",
"queueEmpty": "Hàng Trống",
"queueBack": "Thêm Vào Hàng",
"batchFieldValues": "Giá Trị Vùng Theo Lô",
"openQueue": "Mở Queue",
"pause": "Dừng Lại",
"pauseFailed": "Có Vấn Đề Khi Dừng Lại Bộ Xử Lý",
@@ -329,7 +354,13 @@
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
"retryItem": "Thử Lại Mục",
"credits": "Nguồn",
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại"
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại",
"createdAt": "Tạo tại",
"completedAt": "Hoàn Thành Tại",
"sortColumn": "Sắp Xếp Cột",
"sortBy": "Sắp Xếp Theo {{column}}",
"sortOrderAscending": "Tăng Dần",
"sortOrderDescending": "Giảm Dần"
},
"hotkeys": {
"canvas": {
@@ -481,6 +512,14 @@
"toggleBbox": {
"title": "Bật/Tắt Hiển Thị Hộp Giới Hạn",
"desc": "Ẩn hoặc hiện hộp giới hạn tạo sinh"
},
"setFillColorsToDefault": {
"title": "Đặt Màu Lại Mặc Định",
"desc": "Chỉnh công cụ màu hiện tại về mặc định."
},
"toggleFillColor": {
"title": "Bật/Tắt Màu Lấp Đầy",
"desc": "Bật/Tắt công cụ đổ màu hiện tại."
}
},
"workflows": {
@@ -678,12 +717,19 @@
"title": "Chọn Tab Tạo Sinh",
"desc": "Chọn tab Tạo Sinh.",
"key": "1"
},
"selectVideoTab": {
"title": "Chọn Thẻ Video",
"desc": "Chọn thẻ Video."
}
},
"searchHotkeys": "Tìm Phím tắt",
"noHotkeysFound": "Không Tìm Thấy Phím Tắt",
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
"hotkeys": "Phím Tắt"
"hotkeys": "Phím Tắt",
"video": {
"title": "Video"
}
},
"modelManager": {
"modelConverted": "Model Đã Được Chuyển Đổi",
@@ -845,11 +891,19 @@
"recommendedModels": "Model Khuyến Nghị",
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
"sdxl": "SDXL"
"sdxl": "SDXL",
"quickStart": "Gói Khởi Đầu Nhanh",
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
"stableDiffusion15": "Stable Diffusion 1.5",
"fluxDev": "FLUX.1 dev"
},
"installBundle": "Tải Xuống Gói",
"installBundleMsg1": "Bạn có chắc chắn muốn tải xuống gói {{bundleName}}?",
"installBundleMsg2": "Gói này sẽ tải xuống {{count}} model sau đây:"
"installBundleMsg2": "Gói này sẽ tải xuống {{count}} model sau đây:",
"filterModels": "Lọc Model",
"ipAdapters": "IP Adapters",
"showOnlyRelatedModels": "Liên Quan",
"starterModelsInModelManager": "Model Khởi Đầu có thể tìm thấy ở Trình Quản Lý Model"
},
"metadata": {
"guidance": "Hướng Dẫn",
@@ -861,7 +915,7 @@
"positivePrompt": "Lệnh Tích Cực",
"seed": "Hạt Giống",
"negativePrompt": "Lệnh Tiêu Cực",
"noImageDetails": "Không tìm thấy chí tiết ảnh",
"noImageDetails": "Không tìm thấy chi tiết ảnh",
"strength": "Mức độ mạnh từ ảnh sang ảnh",
"Threshold": "Ngưỡng Nhiễu",
"width": "Chiều Rộng",
@@ -881,7 +935,15 @@
"scheduler": "Scheduler",
"noMetaData": "Không tìm thấy metadata",
"imageDimensions": "Kích Thước Ảnh",
"clipSkip": "$t(parameters.clipSkip)"
"clipSkip": "$t(parameters.clipSkip)",
"videoDetails": "Chi Tiết Video",
"noVideoDetails": "Không tìm thấy chi tiết video",
"parsingFailed": "Lỗi Cú Pháp",
"recallParameter": "Gợi Nhớ {{label}}",
"videoModel": "Model",
"videoDuration": "Thời Lượng",
"videoAspectRatio": "Tỉ Lệ",
"videoResolution": "Độ Phân Giải"
},
"accordions": {
"generation": {
@@ -926,7 +988,9 @@
"strength": "Mức Độ Mạnh Của Sửa Độ Phân Giải Cao",
"method": "Cách Thức Sửa Độ Phân Giải Cao"
},
"hrf": "Sửa Độ Phân Giải Cao"
"hrf": "Sửa Độ Phân Giải Cao",
"enableHrf": "Bật Chế Độ Chỉnh Sửa Phân Giải Cao",
"upscaleMethod": "Phương Thức Upscale"
},
"nodes": {
"validateConnectionsHelp": "Ngăn chặn những kết nối không hợp lý được tạo ra, và đồ thị không hợp lệ bị kích hoạt",
@@ -1099,7 +1163,23 @@
"alignmentDL": "Dưới Cùng Bên Trái",
"alignmentUR": "Trên Cùng Bên Phải",
"alignmentDR": "Dưới Cùng Bên Phải"
}
},
"generatorLoading": "đang tải",
"addLinearView": "Thêm Vào Chế Độ Xem Tuyến Tính (Linear View)",
"hideLegendNodes": "Ẩn Vùng Nhập",
"mismatchedVersion": "Node không hợp lệ: node {{node}} thuộc loại {{type}} có phiên bản không khớp (thử cập nhật?)",
"noFieldsLinearview": "Không có vùng được thêm vào Chế Độ Xem Tuyến Tính",
"removeLinearView": "Xoá Khỏi Chế Độ Xem Tuyến Tính",
"reorderLinearView": "Sắp Xếp Lại Chế Độ Xem Tuyến Tính",
"showLegendNodes": "Hiển Thị Vùng Nhập",
"unableToLoadWorkflow": "Không Thể Tải Workflow",
"unknownTemplate": "Mẫu Trình Bày Không Rõ",
"unknownInput": "Đầu Vào Không Rõ: {{name}}",
"loadingTemplates": "Đang Tải {{name}}",
"versionUnknown": " Phiên Bản Không Rõ",
"generateValues": "Giá Trị Tạo Sinh",
"floatRangeGenerator": "Phạm Vị Tạo Sinh Số Thực",
"integerRangeGenerator": "Phạm Vị Tạo Sinh Số Nguyên"
},
"popovers": {
"paramCFGRescaleMultiplier": {
@@ -1552,7 +1632,9 @@
"noMatchingModels": "Không có Model phù hợp",
"noModelsAvailable": "Không có model",
"selectModel": "Chọn Model",
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích",
"noMatchingLoRAs": "Không có LoRA phù hợp",
"noLoRAsInstalled": "Chưa có LoRA được tải xuống"
},
"parameters": {
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
@@ -1600,7 +1682,15 @@
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn",
"emptyBatches": "lô trống",
"noStartingFrameImage": "Chưa có khung hình ảnh đầu",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều rộng hộp giới hạn là {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều cao hộp giới hạn là {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều rộng hộp giới hạn là {{width}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều cao hộp giới hạn là {{height}}",
"incompatibleLoRAs": "LoRA không tương thích bị thêm vào",
"videoIsDisabled": "Trình tạo sinh Video không được mở cho tài khoản {{accountType}}."
},
"cfgScale": "Thang CFG",
"useSeed": "Dùng Hạt Giống",
@@ -1663,7 +1753,17 @@
"tileSize": "Kích Thước Khối",
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp.",
"useClipSkip": "Dùng CLIP Skip"
"useClipSkip": "Dùng CLIP Skip",
"duration": "Thời Lượng",
"downloadImage": "Tải Xuống Hình Ảnh",
"images_withCount_other": "Hình Ảnh",
"videos_withCount_other": "Video",
"startingFrameImage": "Khung Hình Bắt Đầu",
"videoActions": "Hành Động Với Video",
"sendToVideo": "Gửi Vào Video",
"showOptionsPanel": "Hiển Thị Bảng Bên Cạnh (O hoặc T)",
"video": "Video",
"resolution": "Độ Phân Giải"
},
"dynamicPrompts": {
"seedBehaviour": {
@@ -1677,7 +1777,8 @@
"showDynamicPrompts": "HIện Dynamic Prompt",
"maxPrompts": "Số Lệnh Tối Đa",
"promptsPreview": "Xem Trước Lệnh",
"dynamicPrompts": "Dynamic Prompt"
"dynamicPrompts": "Dynamic Prompt",
"promptsToGenerate": "Lệnh Để Tạo Sinh"
},
"settings": {
"beta": "Beta",
@@ -1711,7 +1812,9 @@
"intermediatesClearedFailed": "Có Vấn Đề Khi Dọn Sạch Sản Phẩm Trung Gian",
"enableInvisibleWatermark": "Bật Chế Độ Ẩn Watermark",
"showDetailedInvocationProgress": "Hiện Dữ Liệu Xử Lý",
"enableHighlightFocusedRegions": "Nhấn Mạnh Khu Vực Chỉ Định"
"enableHighlightFocusedRegions": "Nhấn Mạnh Khu Vực Chỉ Định",
"modelDescriptionsDisabled": "Trình Mô Tả Model Bằng Hộp Thả Đã Tắt",
"modelDescriptionsDisabledDesc": "Trình mô tả model bằng hộp thả đã tắt. Bật lại trong Cài đặt."
},
"sdxl": {
"loading": "Đang Tải...",
@@ -1725,7 +1828,11 @@
"refiner": "Refiner",
"cfgScale": "Thang CFG",
"negAestheticScore": "Điểm Khác Tiêu Chuẩn",
"noModelsAvailable": "Không có sẵn model"
"noModelsAvailable": "Không có sẵn model",
"concatPromptStyle": "Liên Kết Lệnh & Phong Cách",
"freePromptStyle": "Viết Thủ Công Lệnh Phong Cách",
"negStylePrompt": "Điểm Tiêu Cực Cho Lệnh Phong Cách",
"posStylePrompt": "Điểm Tích Cực Cho Lệnh Phong Cách"
},
"controlLayers": {
"width": "Chiều Rộng",
@@ -1817,7 +1924,9 @@
"horizontal": "Đường Ngang",
"crosshatch": "Đường Chéo Song Song (Crosshatch)",
"vertical": "Đường Dọc",
"solid": "Chắc Chắn"
"solid": "Chắc Chắn",
"bgFillColor": "Màu Nền",
"fgFillColor": "Màu Nổi"
},
"addControlLayer": "Thêm $t(controlLayers.controlLayer)",
"inpaintMask": "Lớp Phủ Inpaint",
@@ -1862,15 +1971,12 @@
"transparency": "Độ Trong Suốt",
"showingType": "Hiển Thị {{type}}",
"selectObject": {
"help2": "Bắt đầu mới một điểm <Bold>Bao Gồm</Bold> trong đối tượng được chọn. Cho thêm điểm để tinh chế phần chọn. Ít điểm hơn thường mang lại kết quả tốt hơn.",
"invertSelection": "Đảo Ngược Phần Chọn",
"include": "Bao Gồm",
"exclude": "Loại Trừ",
"reset": "Làm Mới",
"saveAs": "Lưu Như",
"help1": "Chọn một đối tượng. Thêm điểm <Bold>Bao Gồm</Bold> và <Bold>Loại Trừ</Bold> để chỉ ra phần nào trong layer là đối tượng mong muốn.",
"dragToMove": "Kéo kiểm để di chuyển nó",
"help3": "Đảo ngược phần chọn để chọn mọi thứ trừ đối tượng được chọn.",
"clickToAdd": "Nhấp chuột vào layer để thêm điểm",
"clickToRemove": "Nhấp chuột vào một điểm để xoá",
"selectObject": "Chọn Đối Tượng",
@@ -2152,12 +2258,45 @@
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
"hideNonRasterLayers": "Ẩn Layer Không Thuộc Dạng Raster (Shift + H)",
"autoSwitch": {
"off": "Tắt"
"off": "Tắt",
"switchOnStart": "Khi Bắt Đầu",
"switchOnFinish": "Khi Kết Thúc"
},
"fitBboxToMasks": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
"invertMask": "Đảo Ngược Lớp Phủ",
"maxRefImages": "Ảnh Mẫu Tối Đa",
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu"
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu",
"deletePrompt": "Xoá Lệnh",
"addGlobalReferenceImage": "Thêm $t(controlLayers.globalReferenceImage)",
"referenceImageGlobal": "Ảnh Mẫu (Toàn Vùng)",
"sendingToCanvas": "Chuyển Ảnh Tạo Sinh Vào Canvas",
"sendingToGallery": "Chuyển Ảnh Tạo Sinh Vào Thư Viện Ảnh",
"sendToGallery": "Chuyển Tới Thư Viện Ảnh",
"sendToGalleryDesc": "Bấm 'Kích Hoạt' sẽ tiến hành tạo sinh và lưu ảnh vào thư viện ảnh.",
"newImg2ImgCanvasFromImage": "Chuyển Đổi Ảnh Sang Ảnh Mới Từ Ảnh",
"sendToCanvasDesc": "Bấm 'Kích Hoạt' sẽ hiển thị công việc đang xử lý của bạn lên canvas.",
"viewProgressInViewer": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Trình Xem Ảnh</Btn>.",
"viewProgressOnCanvas": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Canvas</Btn>.",
"globalReferenceImage_withCount_other": "$t(controlLayers.globalReferenceImage)",
"regionalGuidance_withCount_hidden": "Chỉ Dẫn Khu Vực ({{count}} đang ẩn)",
"controlLayers_withCount_hidden": "Layer Điều Khiển Được ({{count}} đang ẩn)",
"rasterLayers_withCount_hidden": "Layer Dạng Raster ({{count}} đang ẩn)",
"globalReferenceImages_withCount_hidden": "Ảnh Mẫu Toàn Vùng ({{count}} đang ẩn)",
"inpaintMasks_withCount_hidden": "Lớp Phủ Inpaint ({{count}} đang ẩn)",
"regionalGuidance_withCount_visible": "Chỉ Dẫn Khu Vực ({{count}})",
"controlLayers_withCount_visible": "Layer Điều Khiển Được ({{count}})",
"rasterLayers_withCount_visible": "Layer Dạng Raster ({{count}})",
"globalReferenceImages_withCount_visible": "Ảnh Mẫu Toàn Vùng ({{count}})",
"inpaintMasks_withCount_visible": "Lớp Phủ Inpaint ({{count}})",
"layer_withCount_other": "Layer ({{count}})",
"pastedTo": "Dán Vào {{destination}}",
"stagingOnCanvas": "Hiển thị hình ảnh lên",
"newGallerySession": "Phiên Thư Viện Ảnh Mới",
"newGallerySessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến thư viện ảnh.",
"newCanvasSession": "Phiên Canvas Mới",
"newCanvasSessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến canvas.",
"replaceCurrent": "Thay Đổi Cái Hiện Tại",
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>."
},
"stylePresets": {
"negativePrompt": "Lệnh Tiêu Cực",
@@ -2268,7 +2407,7 @@
"errorCopied": "Lỗi Khi Sao Chép",
"prunedQueue": "Cắt Bớt Hàng Đợi",
"imagesWillBeAddedTo": "Ảnh đã tải lên sẽ được thêm vào tài nguyên của bảng {{boardName}}.",
"baseModelChangedCleared_other": "Dọn sạch hoặc tắt {{count}} model phụ không tương thích",
"baseModelChangedCleared_other": "Cập nhật, dọn sạch hoặc tắt {{count}} model phụ không tương thích",
"canceled": "Quá Trình Xử Lý Đã Huỷ",
"baseModelChanged": "Model Cơ Sở Đã Đổi",
"addedToUncategorized": "Thêm vào tài nguyên của bảng $t(boards.uncategorized)",
@@ -2309,7 +2448,25 @@
"maskInverted": "Đã Đảo Ngược Lớp Phủ",
"maskInvertFailed": "Thất Bại Khi Đảo Ngược Lớp Phủ",
"noVisibleMasks": "Không Có Lớp Phủ Đang Hiển Thị",
"noVisibleMasksDesc": "Tạo hoặc bật ít nhất một lớp phủ inpaint để đảo ngược"
"noVisibleMasksDesc": "Tạo hoặc bật ít nhất một lớp phủ inpaint để đảo ngược",
"imageNotLoadedDesc": "Không thể tìm thấy ảnh",
"imageSaved": "Ảnh Đã Lưu",
"imageSavingFailed": "Lưu Ảnh Thất Bại",
"invalidUpload": "Dữ Liệu Tải Lên Không Hợp Lệ",
"layerSavedToAssets": "Lưu Layer Vào Khu Tài Nguyên",
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
"noActiveRasterLayersDesc": "Bật ít nhất một layer dạng raster để xuất file PSD",
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
"setControlImage": "Đặt làm ảnh điều khiển được",
"setNodeField": "Đặt làm vùng node",
"uploadFailedInvalidUploadDesc_withCount_other": "Cần tối đa {{count}} ảnh PNG, JPEG, hoặc WEBP.",
"noInpaintMaskSelected": "Không Có Lớp Phủ Inpant Được Chọn",
"noInpaintMaskSelectedDesc": "Chọn một lớp phủ inpaint để đảo ngược",
"invalidBbox": "Hộp Giới Hạn Không Hợp Lệ",
"invalidBboxDesc": "Hợp giới hạn có kích thước không hợp lệ"
},
"ui": {
"tabs": {
@@ -2322,7 +2479,8 @@
"queue": "Queue (Hàng Đợi)",
"workflows": "Workflow (Luồng Làm Việc)",
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)",
"generate": "Tạo Sinh"
"generate": "Tạo Sinh",
"video": "Video"
},
"launchpad": {
"workflowsTitle": "Đi sâu hơn với Workflow.",
@@ -2400,13 +2558,23 @@
"generate": {
"canvasCalloutTitle": "Đang tìm cách để điều khiển, chỉnh sửa, và làm lại ảnh?",
"canvasCalloutLink": "Vào Canvas cho nhiều tính năng hơn."
},
"videoTitle": "Tạo sinh video từ lệnh chữ.",
"video": {
"startingFrameCalloutTitle": "Thêm Khung Hình Bắt Đầu",
"startingFrameCalloutDesc": "Thêm ảnh nhằm điều khiển khung hình đầu của video."
},
"addStartingFrame": {
"title": "Thêm Khung Hình Bắt Đầu",
"description": "Thêm ảnh nhằm điều khiển khung hình đầu của video."
}
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Trình Biên Tập Workflow",
"imageViewer": "Trình Xem Ảnh",
"canvas": "Canvas"
"imageViewer": "Trình Xem",
"canvas": "Canvas",
"video": "Video"
}
},
"workflows": {
@@ -2513,7 +2681,9 @@
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata",
"removeFromForm": "Xóa Khỏi Vùng Nhập",
"showShuffle": "Hiện Xáo Trộn",
"shuffle": "Xáo Trộn"
"shuffle": "Xáo Trộn",
"emptyRootPlaceholderViewMode": "Chọn Chỉnh Sửa để bắt đầu tạo nên một vùng nhập cho workflow này.",
"workflowBuilderAlphaWarning": "Trình tạo vùng nhập đang trong giai đoạn alpha. Nó có thể xuất hiện những thay đổi đột ngột trước khi chính thức được phát hành."
},
"yourWorkflows": "Workflow Của Bạn",
"browseWorkflows": "Khám Phá Workflow",
@@ -2528,7 +2698,19 @@
"deselectAll": "Huỷ Chọn Tất Cả",
"recommended": "Có Thể Bạn Sẽ Cần",
"emptyStringPlaceholder": "<xâu ký tự trống>",
"published": "Đã Đăng"
"published": "Đã Đăng",
"defaultWorkflows": "Workflow Mặc Định",
"userWorkflows": "Workflow Của Người Dùng",
"projectWorkflows": "Dự Án Workflow",
"allLoaded": "Đã Tải Tất Cả Workflow",
"filterByTags": "Lọc Theo Nhãn",
"noRecentWorkflows": "Không Có Workflows Gần Đây",
"openWorkflow": "Mở Workflow",
"problemLoading": "Có Vấn Đề Khi Tải Workflow",
"noDescription": "Không có mô tả",
"searchWorkflows": "Tìm Workflow",
"clearWorkflowSearchFilter": "Xoá Workflow Khỏi Bộ Lọc Tìm Kiếm",
"openLibrary": "Mở Thư Viện"
},
"upscaling": {
"missingUpscaleInitialImage": "Thiếu ảnh dùng để upscale",
@@ -2566,9 +2748,10 @@
"readReleaseNotes": "Đọc Ghi Chú Phát Hành",
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"items": [
"Misc QoL: Bật/Tắt hiển thị hộp giới hạn, đánh dấu node bị lỗi, chặn lỗi thêm node vào vùng nhập nhiều lần, khả năng đọc lại metadata của CLIP Skip",
"Giảm lượng tiêu thụ VRAM cho các ảnh mẫu Kontext và mã hóa VAE"
]
"Canvas: Chia tách màu nổi và màu nền - bật/tắt với 'x', khởi động lại về dạng đen trắng với 'd'",
"LoRA: Đặt khối lượng mặc định cho LoRA trong Trình Quản Lý Model"
],
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng"
},
"upsell": {
"professional": "Chuyên Nghiệp",
@@ -2596,5 +2779,12 @@
"clearSucceeded": "Cache Model Đã Được Dọn",
"clearFailed": "Có Vấn Đề Khi Dọn Cache Model",
"clear": "Dọn Cache Model"
},
"lora": {
"weight": "Trọng Lượng"
},
"video": {
"noVideoSelected": "Không có video được chọn",
"selectFromGallery": "Chọn một video trong thư viện để xem"
}
}

View File

@@ -957,7 +957,6 @@
"session": "会话",
"enqueueing": "队列中的批次",
"graphFailedToQueue": "节点图加入队列失败",
"batchFieldValues": "批处理值",
"time": "时间",
"openQueue": "打开队列",
"prompts_other": "提示词",

View File

@@ -2,6 +2,7 @@ import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { CropImageModal } from 'features/cropper/components/CropImageModal';
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
import { DeleteVideoModal } from 'features/deleteVideoModal/components/DeleteVideoModal';
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
@@ -58,6 +59,7 @@ export const GlobalModalIsolator = memo(() => {
<CanvasPasteModal />
</CanvasManagerProviderGate>
<LoadWorkflowFromGraphModal />
<CropImageModal />
</>
);
});

View File

@@ -4,7 +4,6 @@ import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { withResultAsync } from 'common/util/result';
import { canvasReset } from 'features/controlLayers/store/actions';
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import { sentImageToCanvas } from 'features/gallery/store/actions';
@@ -164,7 +163,6 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
case 'generation':
// Go to the generate tab, open the launchpad
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
store.dispatch(paramsReset());
break;
case 'canvas':
// Go to the canvas tab, open the launchpad

View File

@@ -12,7 +12,13 @@ import {
} from 'features/controlLayers/store/paramsSlice';
import { refImageModelChanged, selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
import {
getEntityIdentifier,
isFLUXReduxConfig,
isIPAdapterConfig,
isRegionalGuidanceFLUXReduxConfig,
isRegionalGuidanceIPAdapterConfig,
} from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { modelSelected } from 'features/parameters/store/actions';
import {
@@ -252,7 +258,7 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, config }) => {
if (!isIPAdapterConfig(config)) {
if (!isRegionalGuidanceIPAdapterConfig(config)) {
return;
}
@@ -295,7 +301,7 @@ const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, config }) => {
if (!isFLUXReduxConfig(config)) {
if (!isRegionalGuidanceFLUXReduxConfig(config)) {
return;
}

View File

@@ -1,4 +1,13 @@
import { Box, Flex, Popover, PopoverBody, PopoverContent, PopoverTrigger, Tooltip } from '@invoke-ai/ui-library';
import {
Box,
Flex,
Popover,
PopoverBody,
PopoverContent,
PopoverTrigger,
Portal,
Tooltip,
} from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import RgbColorPicker from 'common/components/ColorPicker/RgbColorPicker';
import { rgbColorToString } from 'common/util/colorCodeTransformers';
@@ -62,14 +71,16 @@ export const EntityListSelectedEntityActionBarFill = memo(() => {
</Tooltip>
</Flex>
</PopoverTrigger>
<PopoverContent>
<PopoverBody minH={64}>
<Flex flexDir="column" gap={4}>
<RgbColorPicker color={fill.color} onChange={onChangeFillColor} withNumberInput withSwatches />
<MaskFillStyle style={fill.style} onChange={onChangeFillStyle} />
</Flex>
</PopoverBody>
</PopoverContent>
<Portal>
<PopoverContent>
<PopoverBody minH={64}>
<Flex flexDir="column" gap={4}>
<RgbColorPicker color={fill.color} onChange={onChangeFillColor} withNumberInput withSwatches />
<MaskFillStyle style={fill.style} onChange={onChangeFillStyle} />
</Flex>
</PopoverBody>
</PopoverContent>
</Portal>
</Popover>
);
});

View File

@@ -12,6 +12,7 @@ import {
PopoverBody,
PopoverContent,
PopoverTrigger,
Portal,
} from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
@@ -165,22 +166,24 @@ export const EntityListSelectedEntityActionBarOpacity = memo(() => {
</NumberInput>
</PopoverAnchor>
</FormControl>
<PopoverContent w={200} pt={0} pb={2} px={4}>
<PopoverArrow />
<PopoverBody>
<CompositeSlider
min={0}
max={100}
value={localOpacity}
onChange={onChangeSlider}
defaultValue={sliderDefaultValue}
marks={marks}
formatValue={formatSliderValue}
alwaysShowMarks
isDisabled={selectedEntityIdentifier === null}
/>
</PopoverBody>
</PopoverContent>
<Portal>
<PopoverContent w={200} pt={0} pb={2} px={4}>
<PopoverArrow />
<PopoverBody>
<CompositeSlider
min={0}
max={100}
value={localOpacity}
onChange={onChangeSlider}
defaultValue={sliderDefaultValue}
marks={marks}
formatValue={formatSliderValue}
alwaysShowMarks
isDisabled={selectedEntityIdentifier === null}
/>
</PopoverBody>
</PopoverContent>
</Portal>
</Popover>
);
});

View File

@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
import { RasterLayerAdjustmentsPanel } from 'features/controlLayers/components/RasterLayer/RasterLayerAdjustmentsPanel';
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
import { RasterLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
@@ -39,6 +40,7 @@ export const RasterLayer = memo(({ id }: Props) => {
<Spacer />
<CanvasEntityHeaderCommonActions />
</CanvasEntityHeader>
<RasterLayerAdjustmentsPanel />
<DndDropTarget
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
dndTargetData={dndTargetData}

View File

@@ -0,0 +1,167 @@
import { Button, ButtonGroup, Flex, IconButton, Switch, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { RasterLayerCurvesAdjustmentsEditor } from 'features/controlLayers/components/RasterLayer/RasterLayerCurvesAdjustmentsEditor';
import { RasterLayerSimpleAdjustmentsEditor } from 'features/controlLayers/components/RasterLayer/RasterLayerSimpleAdjustmentsEditor';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import {
rasterLayerAdjustmentsCancel,
rasterLayerAdjustmentsCollapsedToggled,
rasterLayerAdjustmentsEnabledToggled,
rasterLayerAdjustmentsModeChanged,
rasterLayerAdjustmentsReset,
rasterLayerAdjustmentsSet,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
import React, { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowCounterClockwiseBold, PiCaretDownBold, PiCheckBold, PiTrashBold } from 'react-icons/pi';
export const RasterLayerAdjustmentsPanel = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext<'raster_layer'>();
const canvasManager = useCanvasManager();
const selectHasAdjustments = useMemo(() => {
return createSelector(selectCanvasSlice, (canvas) => Boolean(selectEntity(canvas, entityIdentifier)?.adjustments));
}, [entityIdentifier]);
const hasAdjustments = useAppSelector(selectHasAdjustments);
const selectMode = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) => selectEntity(canvas, entityIdentifier)?.adjustments?.mode ?? 'simple'
);
}, [entityIdentifier]);
const mode = useAppSelector(selectMode);
const selectEnabled = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) => selectEntity(canvas, entityIdentifier)?.adjustments?.enabled ?? false
);
}, [entityIdentifier]);
const enabled = useAppSelector(selectEnabled);
const selectCollapsed = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) => selectEntity(canvas, entityIdentifier)?.adjustments?.collapsed ?? false
);
}, [entityIdentifier]);
const collapsed = useAppSelector(selectCollapsed);
const onToggleEnabled = useCallback(() => {
dispatch(rasterLayerAdjustmentsEnabledToggled({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
const onReset = useCallback(() => {
// Reset values to defaults but keep adjustments present; preserve enabled/collapsed/mode
dispatch(rasterLayerAdjustmentsReset({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
const onCancel = useCallback(() => {
// Clear out adjustments entirely
dispatch(rasterLayerAdjustmentsCancel({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
const onToggleCollapsed = useCallback(() => {
dispatch(rasterLayerAdjustmentsCollapsedToggled({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
const onClickModeSimple = useCallback(
() => dispatch(rasterLayerAdjustmentsModeChanged({ entityIdentifier, mode: 'simple' })),
[dispatch, entityIdentifier]
);
const onClickModeCurves = useCallback(
() => dispatch(rasterLayerAdjustmentsModeChanged({ entityIdentifier, mode: 'curves' })),
[dispatch, entityIdentifier]
);
const onFinish = useCallback(async () => {
// Bake current visual into layer pixels, then clear adjustments
const adapter = canvasManager.getAdapter(entityIdentifier);
if (!adapter || adapter.type !== 'raster_layer_adapter') {
return;
}
const rect = adapter.transformer.getRelativeRect();
try {
await adapter.renderer.rasterize({ rect, replaceObjects: true, attrs: { opacity: 1 } });
// Clear adjustments after baking
dispatch(rasterLayerAdjustmentsSet({ entityIdentifier, adjustments: null }));
} catch {
// no-op; leave state unchanged on failure
}
}, [canvasManager, entityIdentifier, dispatch]);
// Hide the panel entirely until adjustments are added via context menu
if (!hasAdjustments) {
return null;
}
return (
<>
<Flex px={2} pb={2} alignItems="center" gap={2}>
<IconButton
aria-label={collapsed ? t('controlLayers.adjustments.expand') : t('controlLayers.adjustments.collapse')}
size="sm"
variant="ghost"
onClick={onToggleCollapsed}
icon={
<PiCaretDownBold
style={{ transform: collapsed ? 'rotate(-90deg)' : 'rotate(0deg)', transition: 'transform 0.2s' }}
/>
}
/>
<Text fontWeight={600} flex={1}>
Adjustments
</Text>
<ButtonGroup size="sm" isAttached variant="outline">
<Button onClick={onClickModeSimple} colorScheme={mode === 'simple' ? 'invokeBlue' : undefined}>
{t('controlLayers.adjustments.simple')}
</Button>
<Button onClick={onClickModeCurves} colorScheme={mode === 'curves' ? 'invokeBlue' : undefined}>
{t('controlLayers.adjustments.curves')}
</Button>
</ButtonGroup>
<Switch isChecked={enabled} onChange={onToggleEnabled} />
<IconButton
aria-label={t('controlLayers.adjustments.cancel')}
size="md"
onClick={onCancel}
isDisabled={!hasAdjustments}
colorScheme="red"
icon={<PiTrashBold />}
variant="ghost"
/>
<IconButton
aria-label={t('controlLayers.adjustments.reset')}
size="md"
onClick={onReset}
isDisabled={!hasAdjustments}
icon={<PiArrowCounterClockwiseBold />}
variant="ghost"
/>
<IconButton
aria-label={t('controlLayers.adjustments.finish')}
size="md"
onClick={onFinish}
isDisabled={!hasAdjustments}
colorScheme="green"
icon={<PiCheckBold />}
variant="ghost"
/>
</Flex>
{!collapsed && mode === 'simple' && <RasterLayerSimpleAdjustmentsEditor />}
{!collapsed && mode === 'curves' && <RasterLayerCurvesAdjustmentsEditor />}
</>
);
});
RasterLayerAdjustmentsPanel.displayName = 'RasterLayerAdjustmentsPanel';

View File

@@ -0,0 +1,179 @@
import { Box, Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useEntityAdapterContext } from 'features/controlLayers/contexts/EntityAdapterContext';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { rasterLayerAdjustmentsCurvesUpdated } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
import type { ChannelName, ChannelPoints, CurvesAdjustmentsConfig } from 'features/controlLayers/store/types';
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { RasterLayerCurvesAdjustmentsGraph } from './RasterLayerCurvesAdjustmentsGraph';
const DEFAULT_POINTS: ChannelPoints = [
[0, 0],
[255, 255],
];
const DEFAULT_CURVES: CurvesAdjustmentsConfig = {
master: DEFAULT_POINTS,
r: DEFAULT_POINTS,
g: DEFAULT_POINTS,
b: DEFAULT_POINTS,
};
type ChannelHistograms = Record<ChannelName, number[] | null>;
const calculateHistogramsFromImageData = (imageData: ImageData): ChannelHistograms | null => {
try {
const data = imageData.data;
const len = data.length / 4;
const master = new Array<number>(256).fill(0);
const r = new Array<number>(256).fill(0);
const g = new Array<number>(256).fill(0);
const b = new Array<number>(256).fill(0);
// sample every 4th pixel to lighten work
for (let i = 0; i < len; i += 4) {
const idx = i * 4;
const rv = data[idx] as number;
const gv = data[idx + 1] as number;
const bv = data[idx + 2] as number;
const m = Math.round(0.2126 * rv + 0.7152 * gv + 0.0722 * bv);
if (m >= 0 && m < 256) {
master[m] = (master[m] ?? 0) + 1;
}
if (rv >= 0 && rv < 256) {
r[rv] = (r[rv] ?? 0) + 1;
}
if (gv >= 0 && gv < 256) {
g[gv] = (g[gv] ?? 0) + 1;
}
if (bv >= 0 && bv < 256) {
b[bv] = (b[bv] ?? 0) + 1;
}
}
return {
master,
r,
g,
b,
};
} catch {
return null;
}
};
export const RasterLayerCurvesAdjustmentsEditor = memo(() => {
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext<'raster_layer'>();
const adapter = useEntityAdapterContext<'raster_layer'>('raster_layer');
const { t } = useTranslation();
const selectCurves = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) => selectEntity(canvas, entityIdentifier)?.adjustments?.curves ?? DEFAULT_CURVES
);
}, [entityIdentifier]);
const curves = useAppSelector(selectCurves);
const selectIsDisabled = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) => selectEntity(canvas, entityIdentifier)?.adjustments?.enabled !== true
);
}, [entityIdentifier]);
const isDisabled = useAppSelector(selectIsDisabled);
// The canvas cache for the layer serves as a proxy for when the layer changes and can be used to trigger histo recalc
const canvasCache = useStore(adapter.$canvasCache);
const [histMaster, setHistMaster] = useState<number[] | null>(null);
const [histR, setHistR] = useState<number[] | null>(null);
const [histG, setHistG] = useState<number[] | null>(null);
const [histB, setHistB] = useState<number[] | null>(null);
const recalcHistogram = useCallback(() => {
try {
const rect = adapter.transformer.getRelativeRect();
if (rect.width === 0 || rect.height === 0) {
setHistMaster(Array(256).fill(0));
setHistR(Array(256).fill(0));
setHistG(Array(256).fill(0));
setHistB(Array(256).fill(0));
return;
}
const imageData = adapter.renderer.getImageData({ rect });
const h = calculateHistogramsFromImageData(imageData);
if (h) {
setHistMaster(h.master);
setHistR(h.r);
setHistG(h.g);
setHistB(h.b);
}
} catch {
// ignore
}
}, [adapter]);
useEffect(() => {
recalcHistogram();
}, [canvasCache, recalcHistogram]);
const onChangePoints = useCallback(
(channel: ChannelName, pts: ChannelPoints) => {
dispatch(rasterLayerAdjustmentsCurvesUpdated({ entityIdentifier, channel, points: pts }));
},
[dispatch, entityIdentifier]
);
// Memoize per-channel change handlers to avoid inline lambdas in JSX
const onChangeMaster = useCallback((pts: ChannelPoints) => onChangePoints('master', pts), [onChangePoints]);
const onChangeR = useCallback((pts: ChannelPoints) => onChangePoints('r', pts), [onChangePoints]);
const onChangeG = useCallback((pts: ChannelPoints) => onChangePoints('g', pts), [onChangePoints]);
const onChangeB = useCallback((pts: ChannelPoints) => onChangePoints('b', pts), [onChangePoints]);
return (
<Flex
direction="column"
gap={2}
px={3}
pb={3}
opacity={isDisabled ? 0.3 : 1}
pointerEvents={isDisabled ? 'none' : 'auto'}
>
<Box display="grid" gridTemplateColumns="repeat(2, minmax(0, 1fr))" gap={4}>
<RasterLayerCurvesAdjustmentsGraph
title={t('controlLayers.adjustments.master')}
channel="master"
points={curves.master}
histogram={histMaster}
onChange={onChangeMaster}
/>
<RasterLayerCurvesAdjustmentsGraph
title={t('common.red')}
channel="r"
points={curves.r}
histogram={histR}
onChange={onChangeR}
/>
<RasterLayerCurvesAdjustmentsGraph
title={t('common.green')}
channel="g"
points={curves.g}
histogram={histG}
onChange={onChangeG}
/>
<RasterLayerCurvesAdjustmentsGraph
title={t('common.blue')}
channel="b"
points={curves.b}
histogram={histB}
onChange={onChangeB}
/>
</Box>
</Flex>
);
});
RasterLayerCurvesAdjustmentsEditor.displayName = 'RasterLayerCurvesEditor';

View File

@@ -0,0 +1,432 @@
import { Flex, IconButton, Text } from '@invoke-ai/ui-library';
import type { ChannelName, ChannelPoints } from 'features/controlLayers/store/types';
import React, { memo, useCallback, useEffect, useRef, useState } from 'react';
import { PiArrowCounterClockwiseBold } from 'react-icons/pi';
const DEFAULT_POINTS: ChannelPoints = [
[0, 0],
[255, 255],
];
const channelColor: Record<ChannelName, string> = {
master: '#888',
r: '#e53e3e',
g: '#38a169',
b: '#3182ce',
};
const clamp = (v: number, min: number, max: number) => (v < min ? min : v > max ? max : v);
const sortPoints = (pts: ChannelPoints) =>
[...pts]
.sort((a, b) => {
const xDiff = a[0] - b[0];
if (xDiff) {
return xDiff;
}
if (a[0] === 0 || a[0] === 255) {
return a[1] - b[1];
}
return 0;
})
// Finally, clamp to valid range and round to integers
.map(([x, y]) => [clamp(Math.round(x), 0, 255), clamp(Math.round(y), 0, 255)] satisfies [number, number]);
// Base canvas logical coordinate system (used for aspect ratio & initial sizing)
const CANVAS_WIDTH = 256;
const CANVAS_HEIGHT = 160;
const MARGIN_LEFT = 8;
const MARGIN_RIGHT = 8;
const MARGIN_TOP = 8;
const MARGIN_BOTTOM = 10;
const CANVAS_STYLE: React.CSSProperties = {
width: '100%',
// Maintain aspect ratio while allowing responsive width. Height is set automatically via aspect-ratio.
aspectRatio: `${CANVAS_WIDTH} / ${CANVAS_HEIGHT}`,
height: 'auto',
touchAction: 'none',
borderRadius: 4,
background: '#111',
display: 'block',
};
type CurveGraphProps = {
title: string;
channel: ChannelName;
points: ChannelPoints | undefined;
histogram: number[] | null;
onChange: (pts: ChannelPoints) => void;
};
const drawHistogram = (
c: HTMLCanvasElement,
channel: ChannelName,
histogram: number[] | null,
points: ChannelPoints
) => {
// Use device pixel ratio for crisp rendering on HiDPI displays.
const dpr = window.devicePixelRatio || 1;
const cssWidth = c.clientWidth || CANVAS_WIDTH; // CSS pixels
const cssHeight = (cssWidth * CANVAS_HEIGHT) / CANVAS_WIDTH; // maintain aspect ratio
// Ensure the backing store matches current display size * dpr (only if changed).
const targetWidth = Math.round(cssWidth * dpr);
const targetHeight = Math.round(cssHeight * dpr);
if (c.width !== targetWidth || c.height !== targetHeight) {
c.width = targetWidth;
c.height = targetHeight;
}
// Guarantee the CSS height stays synced (width is 100%).
if (c.style.height !== `${cssHeight}px`) {
c.style.height = `${cssHeight}px`;
}
const ctx = c.getContext('2d');
if (!ctx) {
return;
}
// Reset transform then scale for dpr so we can draw in CSS pixel coordinates.
ctx.setTransform(1, 0, 0, 1, 0, 0);
ctx.scale(dpr, dpr);
// Dynamic inner geometry (CSS pixel space)
const innerWidth = cssWidth - MARGIN_LEFT - MARGIN_RIGHT;
const innerHeight = cssHeight - MARGIN_TOP - MARGIN_BOTTOM;
const valueToCanvasX = (x: number) => MARGIN_LEFT + (clamp(x, 0, 255) / 255) * innerWidth;
const valueToCanvasY = (y: number) => MARGIN_TOP + innerHeight - (clamp(y, 0, 255) / 255) * innerHeight;
// Clear & background
ctx.clearRect(0, 0, cssWidth, cssHeight);
ctx.fillStyle = '#111';
ctx.fillRect(0, 0, cssWidth, cssHeight);
// Grid
ctx.strokeStyle = '#2a2a2a';
ctx.lineWidth = 1;
for (let i = 0; i <= 4; i++) {
const y = MARGIN_TOP + (i * innerHeight) / 4;
ctx.beginPath();
ctx.moveTo(MARGIN_LEFT + 0.5, y + 0.5);
ctx.lineTo(MARGIN_LEFT + innerWidth - 0.5, y + 0.5);
ctx.stroke();
}
for (let i = 0; i <= 4; i++) {
const x = MARGIN_LEFT + (i * innerWidth) / 4;
ctx.beginPath();
ctx.moveTo(x + 0.5, MARGIN_TOP + 0.5);
ctx.lineTo(x + 0.5, MARGIN_TOP + innerHeight - 0.5);
ctx.stroke();
}
// Histogram
if (histogram) {
const logHist = histogram.map((v) => Math.log10((v ?? 0) + 1));
const max = Math.max(1e-6, ...logHist);
ctx.fillStyle = '#5557';
// If there's enough horizontal room, draw each of the 256 bins with exact (possibly fractional) width so they tessellate.
// Otherwise, aggregate multiple bins into per-pixel columns to avoid aliasing.
if (innerWidth >= 256) {
for (let i = 0; i < 256; i++) {
const v = logHist[i] ?? 0;
const h = (v / max) * (innerHeight - 2);
// Exact fractional coordinates for seamless coverage (no gaps as width grows)
const x0 = MARGIN_LEFT + (i / 256) * innerWidth;
const x1 = MARGIN_LEFT + ((i + 1) / 256) * innerWidth;
const w = x1 - x0;
if (w <= 0) {
continue;
} // safety
const y = MARGIN_TOP + innerHeight - h;
ctx.fillRect(x0, y, w, h);
}
} else {
// Aggregate bins per CSS pixel column (similar to previous anti-moire approach)
const columns = Math.max(1, Math.round(innerWidth));
const binsPerCol = 256 / columns;
for (let col = 0; col < columns; col++) {
const startBin = Math.floor(col * binsPerCol);
const endBin = Math.min(255, Math.floor((col + 1) * binsPerCol - 1));
let acc = 0;
let count = 0;
for (let b = startBin; b <= endBin; b++) {
acc += logHist[b] ?? 0;
count++;
}
const v = count > 0 ? acc / count : 0;
const h = (v / max) * (innerHeight - 2);
const x = MARGIN_LEFT + col;
const y = MARGIN_TOP + innerHeight - h;
ctx.fillRect(x, y, 1, h);
}
}
}
// Curve
const pts = sortPoints(points);
ctx.strokeStyle = channelColor[channel];
ctx.lineWidth = 2;
ctx.beginPath();
for (let i = 0; i < pts.length; i++) {
const [x, y] = pts[i]!;
const cx = valueToCanvasX(x);
const cy = valueToCanvasY(y);
if (i === 0) {
ctx.moveTo(cx, cy);
} else {
ctx.lineTo(cx, cy);
}
}
ctx.stroke();
// Control points
for (let i = 0; i < pts.length; i++) {
const [x, y] = pts[i]!;
const cx = valueToCanvasX(x);
const cy = valueToCanvasY(y);
ctx.fillStyle = '#000';
ctx.beginPath();
ctx.arc(cx, cy, 3.5, 0, Math.PI * 2);
ctx.fill();
ctx.strokeStyle = channelColor[channel];
ctx.lineWidth = 1.5;
ctx.stroke();
}
};
const getNearestPointIndex = (c: HTMLCanvasElement, points: ChannelPoints, mx: number, my: number) => {
const cssWidth = c.clientWidth || CANVAS_WIDTH;
const cssHeight = c.clientHeight || CANVAS_HEIGHT;
const innerWidth = cssWidth - MARGIN_LEFT - MARGIN_RIGHT;
const innerHeight = cssHeight - MARGIN_TOP - MARGIN_BOTTOM;
const canvasToValueX = (cx: number) => clamp(Math.round(((cx - MARGIN_LEFT) / innerWidth) * 255), 0, 255);
const canvasToValueY = (cy: number) => clamp(Math.round(255 - ((cy - MARGIN_TOP) / innerHeight) * 255), 0, 255);
const xVal = canvasToValueX(mx);
const yVal = canvasToValueY(my);
let best = -1;
let bestDist = 9999;
for (let i = 0; i < points.length; i++) {
const [px, py] = points[i]!;
const dx = px - xVal;
const dy = py - yVal;
const d = dx * dx + dy * dy;
if (d < bestDist) {
best = i;
bestDist = d;
}
}
if (best !== -1 && bestDist <= 20 * 20) {
return best;
}
return -1;
};
const canvasXToValueX = (c: HTMLCanvasElement, cx: number): number => {
const cssWidth = c.clientWidth || CANVAS_WIDTH;
const innerWidth = cssWidth - MARGIN_LEFT - MARGIN_RIGHT;
return clamp(Math.round(((cx - MARGIN_LEFT) / innerWidth) * 255), 0, 255);
};
const canvasYToValueY = (c: HTMLCanvasElement, cy: number) => {
const cssHeight = c.clientHeight || CANVAS_HEIGHT;
const innerHeight = cssHeight - MARGIN_TOP - MARGIN_BOTTOM;
return clamp(Math.round(255 - ((cy - MARGIN_TOP) / innerHeight) * 255), 0, 255);
};
export const RasterLayerCurvesAdjustmentsGraph = memo((props: CurveGraphProps) => {
const { title, channel, points, histogram, onChange } = props;
const canvasRef = useRef<HTMLCanvasElement | null>(null);
const [localPoints, setLocalPoints] = useState<ChannelPoints>(sortPoints(points ?? DEFAULT_POINTS));
const [dragIndex, setDragIndex] = useState<number | null>(null);
useEffect(() => {
setLocalPoints(sortPoints(points ?? DEFAULT_POINTS));
}, [points]);
useEffect(() => {
const c = canvasRef.current;
if (!c) {
return;
}
drawHistogram(c, channel, histogram, localPoints);
}, [channel, histogram, localPoints]);
const handlePointerDown = useCallback(
(e: React.PointerEvent<HTMLCanvasElement>) => {
e.preventDefault();
e.stopPropagation();
const c = canvasRef.current;
if (!c) {
return;
}
// Capture the pointer so we still get pointerup even if released outside the canvas.
try {
c.setPointerCapture(e.pointerId);
} catch {
/* ignore */
}
const rect = c.getBoundingClientRect();
const mx = e.clientX - rect.left; // CSS pixel coordinates
const my = e.clientY - rect.top;
const idx = getNearestPointIndex(c, localPoints, mx, my);
if (idx !== -1 && idx !== 0 && idx !== localPoints.length - 1) {
setDragIndex(idx);
return;
}
const xVal = canvasXToValueX(c, mx);
const yVal = canvasYToValueY(c, my);
const next = sortPoints([...localPoints, [xVal, yVal]]);
setLocalPoints(next);
setDragIndex(next.findIndex(([x, y]) => x === xVal && y === yVal));
},
[localPoints]
);
const handlePointerMove = useCallback(
(e: React.PointerEvent<HTMLCanvasElement>) => {
e.preventDefault();
e.stopPropagation();
if (dragIndex === null) {
return;
}
const c = canvasRef.current;
if (!c) {
return;
}
const rect = c.getBoundingClientRect();
const mx = e.clientX - rect.left;
const my = e.clientY - rect.top;
const mxVal = canvasXToValueX(c, mx);
const myVal = canvasYToValueY(c, my);
setLocalPoints((prev) => {
// Endpoints are immutable; safety check.
if (dragIndex === 0 || dragIndex === prev.length - 1) {
return prev;
}
const leftX = prev[dragIndex - 1]![0];
const rightX = prev[dragIndex + 1]![0];
// Constrain to strictly between neighbors so ordering is preserved & no crossing.
const minX = Math.min(254, leftX);
const maxX = Math.max(1, rightX);
const clampedX = clamp(mxVal, minX, maxX);
// If neighbors are adjacent (minX > maxX after adjustments), effectively lock X.
const finalX = minX > maxX ? leftX + 1 - 1 /* keep existing */ : clampedX;
const next = [...prev];
next[dragIndex] = [finalX, myVal];
return next; // already ordered due to constraints
});
},
[dragIndex]
);
const commit = useCallback(
(pts: ChannelPoints) => {
onChange(sortPoints(pts));
},
[onChange]
);
const handlePointerUp = useCallback(
(e: React.PointerEvent<HTMLCanvasElement>) => {
e.preventDefault();
e.stopPropagation();
const c = canvasRef.current;
if (c) {
try {
c.releasePointerCapture(e.pointerId);
} catch {
/* ignore */
}
}
setDragIndex(null);
commit(localPoints);
},
[commit, localPoints]
);
const handlePointerCancel = useCallback(
(e: React.PointerEvent<HTMLCanvasElement>) => {
const c = canvasRef.current;
if (c) {
try {
c.releasePointerCapture(e.pointerId);
} catch {
/* ignore */
}
}
setDragIndex(null);
commit(localPoints);
},
[commit, localPoints]
);
const handleDoubleClick = useCallback(
(e: React.MouseEvent<HTMLCanvasElement>) => {
e.preventDefault();
e.stopPropagation();
const c = canvasRef.current;
if (!c) {
return;
}
const rect = c.getBoundingClientRect();
const mx = e.clientX - rect.left;
const my = e.clientY - rect.top;
const idx = getNearestPointIndex(c, localPoints, mx, my);
if (idx > 0 && idx < localPoints.length - 1) {
const next = localPoints.filter((_, i) => i !== idx);
setLocalPoints(next);
commit(next);
}
},
[commit, localPoints]
);
// Observe size changes to redraw (responsive behavior)
useEffect(() => {
const c = canvasRef.current;
if (!c) {
return;
}
const ro = new ResizeObserver(() => {
drawHistogram(c, channel, histogram, localPoints);
});
ro.observe(c);
return () => ro.disconnect();
}, [channel, histogram, localPoints]);
const resetPoints = useCallback(() => {
setLocalPoints(sortPoints(DEFAULT_POINTS));
commit(DEFAULT_POINTS);
}, [commit]);
return (
<Flex flexDir="column" gap={2}>
<Flex justifyContent="space-between">
<Text fontSize="sm" color={channelColor[channel]} fontWeight="semibold">
{title}
</Text>
<IconButton
icon={<PiArrowCounterClockwiseBold />}
aria-label="Reset"
size="sm"
variant="link"
onClick={resetPoints}
/>
</Flex>
<canvas
ref={canvasRef}
onPointerDown={handlePointerDown}
onPointerMove={handlePointerMove}
onPointerUp={handlePointerUp}
onPointerCancel={handlePointerCancel}
onDoubleClick={handleDoubleClick}
style={CANVAS_STYLE}
/>
</Flex>
);
});
RasterLayerCurvesAdjustmentsGraph.displayName = 'RasterLayerCurvesAdjustmentsGraph';

View File

@@ -9,6 +9,7 @@ import { CanvasEntityMenuItemsMergeDown } from 'features/controlLayers/component
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
import { CanvasEntityMenuItemsSelectObject } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSelectObject';
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
import { RasterLayerMenuItemsAdjustments } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsAdjustments';
import { RasterLayerMenuItemsConvertToSubMenu } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsConvertToSubMenu';
import { RasterLayerMenuItemsCopyToSubMenu } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsCopyToSubMenu';
import { memo } from 'react';
@@ -21,10 +22,10 @@ export const RasterLayerMenuItems = memo(() => {
<CanvasEntityMenuItemsDuplicate />
<CanvasEntityMenuItemsDelete asIcon />
</IconMenuItemGroup>
<MenuDivider />
<CanvasEntityMenuItemsTransform />
<CanvasEntityMenuItemsFilter />
<CanvasEntityMenuItemsSelectObject />
<RasterLayerMenuItemsAdjustments />
<MenuDivider />
<CanvasEntityMenuItemsMergeDown />
<RasterLayerMenuItemsCopyToSubMenu />

View File

@@ -0,0 +1,39 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { rasterLayerAdjustmentsCancel, rasterLayerAdjustmentsSet } from 'features/controlLayers/store/canvasSlice';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { makeDefaultRasterLayerAdjustments } from 'features/controlLayers/store/util';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiSlidersHorizontalBold } from 'react-icons/pi';
export const RasterLayerMenuItemsAdjustments = memo(() => {
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext<'raster_layer'>();
const { t } = useTranslation();
const layer = useAppSelector((s) =>
s.canvas.present.rasterLayers.entities.find((e: CanvasRasterLayerState) => e.id === entityIdentifier.id)
);
const hasAdjustments = Boolean(layer?.adjustments);
const onToggleAdjustmentsPresence = useCallback(() => {
if (hasAdjustments) {
dispatch(rasterLayerAdjustmentsCancel({ entityIdentifier }));
} else {
dispatch(
rasterLayerAdjustmentsSet({
entityIdentifier,
adjustments: makeDefaultRasterLayerAdjustments('simple'),
})
);
}
}, [dispatch, entityIdentifier, hasAdjustments]);
return (
<MenuItem onClick={onToggleAdjustmentsPresence} icon={<PiSlidersHorizontalBold />}>
{hasAdjustments ? t('controlLayers.removeAdjustments') : t('controlLayers.addAdjustments')}
</MenuItem>
);
});
RasterLayerMenuItemsAdjustments.displayName = 'RasterLayerMenuItemsAdjustments';

View File

@@ -0,0 +1,118 @@
import { CompositeNumberInput, CompositeSlider, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { rasterLayerAdjustmentsSimpleUpdated } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
import type { SimpleAdjustmentsConfig } from 'features/controlLayers/store/types';
import React, { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
type AdjustmentSliderRowProps = {
label: string;
name: keyof SimpleAdjustmentsConfig;
onChange: (v: number) => void;
min?: number;
max?: number;
step?: number;
};
const AdjustmentSliderRow = ({ label, name, onChange, min = -1, max = 1, step = 0.01 }: AdjustmentSliderRowProps) => {
const entityIdentifier = useEntityIdentifierContext<'raster_layer'>();
const selectValue = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) =>
selectEntity(canvas, entityIdentifier)?.adjustments?.simple?.[name] ?? DEFAULT_SIMPLE_ADJUSTMENTS[name]
);
}, [entityIdentifier, name]);
const value = useAppSelector(selectValue);
return (
<FormControl orientation="horizontal" mb={1} w="full">
<FormLabel m={0} minW="90px">
{label}
</FormLabel>
<CompositeSlider value={value} onChange={onChange} defaultValue={0} min={min} max={max} step={step} marks />
<CompositeNumberInput value={value} onChange={onChange} defaultValue={0} min={min} max={max} step={step} />
</FormControl>
);
};
const DEFAULT_SIMPLE_ADJUSTMENTS = {
brightness: 0,
contrast: 0,
saturation: 0,
temperature: 0,
tint: 0,
sharpness: 0,
};
export const RasterLayerSimpleAdjustmentsEditor = memo(() => {
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext<'raster_layer'>();
const { t } = useTranslation();
const selectIsDisabled = useMemo(() => {
return createSelector(
selectCanvasSlice,
(canvas) => selectEntity(canvas, entityIdentifier)?.adjustments?.enabled !== true
);
}, [entityIdentifier]);
const isDisabled = useAppSelector(selectIsDisabled);
const onBrightness = useCallback(
(v: number) => dispatch(rasterLayerAdjustmentsSimpleUpdated({ entityIdentifier, simple: { brightness: v } })),
[dispatch, entityIdentifier]
);
const onContrast = useCallback(
(v: number) => dispatch(rasterLayerAdjustmentsSimpleUpdated({ entityIdentifier, simple: { contrast: v } })),
[dispatch, entityIdentifier]
);
const onSaturation = useCallback(
(v: number) => dispatch(rasterLayerAdjustmentsSimpleUpdated({ entityIdentifier, simple: { saturation: v } })),
[dispatch, entityIdentifier]
);
const onTemperature = useCallback(
(v: number) => dispatch(rasterLayerAdjustmentsSimpleUpdated({ entityIdentifier, simple: { temperature: v } })),
[dispatch, entityIdentifier]
);
const onTint = useCallback(
(v: number) => dispatch(rasterLayerAdjustmentsSimpleUpdated({ entityIdentifier, simple: { tint: v } })),
[dispatch, entityIdentifier]
);
const onSharpness = useCallback(
(v: number) => dispatch(rasterLayerAdjustmentsSimpleUpdated({ entityIdentifier, simple: { sharpness: v } })),
[dispatch, entityIdentifier]
);
return (
<Flex px={3} pb={2} direction="column" opacity={isDisabled ? 0.3 : 1} pointerEvents={isDisabled ? 'none' : 'auto'}>
<AdjustmentSliderRow
label={t('controlLayers.adjustments.brightness')}
name="brightness"
onChange={onBrightness}
/>
<AdjustmentSliderRow label={t('controlLayers.adjustments.contrast')} name="contrast" onChange={onContrast} />
<AdjustmentSliderRow
label={t('controlLayers.adjustments.saturation')}
name="saturation"
onChange={onSaturation}
/>
<AdjustmentSliderRow
label={t('controlLayers.adjustments.temperature')}
name="temperature"
onChange={onTemperature}
/>
<AdjustmentSliderRow label={t('controlLayers.adjustments.tint')} name="tint" onChange={onTint} />
<AdjustmentSliderRow
label={t('controlLayers.adjustments.sharpness')}
name="sharpness"
onChange={onSharpness}
min={0}
max={1}
/>
</Flex>
);
});
RasterLayerSimpleAdjustmentsEditor.displayName = 'RasterLayerSimpleAdjustmentsEditor';

View File

@@ -1,12 +1,16 @@
import { Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { objectEquals } from '@observ33r/object-equals';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
import { UploadImageIconButton } from 'common/hooks/useImageUploadButton';
import { bboxSizeOptimized, bboxSizeRecalled } from 'features/controlLayers/store/canvasSlice';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { sizeOptimized, sizeRecalled } from 'features/controlLayers/store/paramsSlice';
import type { ImageWithDims } from 'features/controlLayers/store/types';
import type { CroppableImageWithDims } from 'features/controlLayers/store/types';
import { imageDTOToCroppableImage, imageDTOToImageWithDims } from 'features/controlLayers/store/util';
import { Editor } from 'features/cropper/lib/editor';
import { cropImageModalApi } from 'features/cropper/store';
import type { setGlobalReferenceImageDndTarget, setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { DndImage } from 'features/dnd/DndImage';
@@ -14,14 +18,14 @@ import { DndImageIcon } from 'features/dnd/DndImageIcon';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
import { PiArrowCounterClockwiseBold, PiCropBold, PiRulerBold } from 'react-icons/pi';
import { useGetImageDTOQuery, useUploadImageMutation } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import { $isConnected } from 'services/events/stores';
type Props<T extends typeof setGlobalReferenceImageDndTarget | typeof setRegionalGuidanceReferenceImageDndTarget> = {
image: ImageWithDims | null;
onChangeImage: (imageDTO: ImageDTO | null) => void;
image: CroppableImageWithDims | null;
onChangeImage: (croppableImage: CroppableImageWithDims | null) => void;
dndTarget: T;
dndTargetData: ReturnType<T['getData']>;
};
@@ -38,20 +42,28 @@ export const RefImageImage = memo(
const isConnected = useStore($isConnected);
const tab = useAppSelector(selectActiveTab);
const isStaging = useCanvasIsStaging();
const { currentData: imageDTO, isError } = useGetImageDTOQuery(image?.image_name ?? skipToken);
const imageWithDims = image?.crop?.image ?? image?.original.image ?? null;
const croppedImageDTOReq = useGetImageDTOQuery(image?.crop?.image?.image_name ?? skipToken);
const originalImageDTOReq = useGetImageDTOQuery(image?.original.image.image_name ?? skipToken);
const [uploadImage] = useUploadImageMutation();
const originalImageDTO = originalImageDTOReq.currentData;
const croppedImageDTO = croppedImageDTOReq.currentData;
const imageDTO = croppedImageDTO ?? originalImageDTO;
const handleResetControlImage = useCallback(() => {
onChangeImage(null);
}, [onChangeImage]);
useEffect(() => {
if (isConnected && isError) {
if ((isConnected && croppedImageDTOReq.isError) || originalImageDTOReq.isError) {
handleResetControlImage();
}
}, [handleResetControlImage, isError, isConnected]);
}, [handleResetControlImage, isConnected, croppedImageDTOReq.isError, originalImageDTOReq.isError]);
const onUpload = useCallback(
(imageDTO: ImageDTO) => {
onChangeImage(imageDTO);
onChangeImage(imageDTOToCroppableImage(imageDTO));
},
[onChangeImage]
);
@@ -70,13 +82,67 @@ export const RefImageImage = memo(
}
}, [imageDTO, isStaging, store, tab]);
const edit = useCallback(() => {
if (!originalImageDTO) {
return;
}
// We will create a new editor instance each time the user wants to edit
const editor = new Editor();
// When the user applies the crop, we will upload the cropped image and store the applied crop box so if the user
// re-opens the editor they see the same crop
const onApplyCrop = async () => {
const box = editor.getCropBox();
if (objectEquals(box, image?.crop?.box)) {
// If the box hasn't changed, don't do anything
return;
}
if (!box || objectEquals(box, { x: 0, y: 0, width: originalImageDTO.width, height: originalImageDTO.height })) {
// There is a crop applied but it is the whole iamge - revert to original image
onChangeImage(imageDTOToCroppableImage(originalImageDTO));
return;
}
const blob = await editor.exportImage('blob');
const file = new File([blob], 'image.png', { type: 'image/png' });
const newCroppedImageDTO = await uploadImage({
file,
is_intermediate: true,
image_category: 'user',
}).unwrap();
onChangeImage(
imageDTOToCroppableImage(originalImageDTO, {
image: imageDTOToImageWithDims(newCroppedImageDTO),
box,
ratio: editor.getCropAspectRatio(),
})
);
};
const onReady = async () => {
const initial = image?.crop ? { cropBox: image.crop.box, aspectRatio: image.crop.ratio } : undefined;
// Load the image into the editor and open the modal once it's ready
await editor.loadImage(originalImageDTO.image_url, initial);
};
cropImageModalApi.open({ editor, onApplyCrop, onReady });
}, [image?.crop, onChangeImage, originalImageDTO, uploadImage]);
return (
<Flex position="relative" w="full" h="full" alignItems="center" data-error={!imageDTO && !image?.image_name}>
<Flex
position="relative"
w="full"
h="full"
alignItems="center"
data-error={!imageDTO && !imageWithDims?.image_name}
>
{!imageDTO && (
<UploadImageIconButton
w="full"
h="full"
isError={!imageDTO && !image?.image_name}
isError={!imageDTO && !imageWithDims?.image_name}
onUpload={onUpload}
fontSize={36}
/>
@@ -99,6 +165,15 @@ export const RefImageImage = memo(
isDisabled={!imageDTO || (tab === 'canvas' && isStaging)}
/>
</Flex>
<Flex position="absolute" flexDir="column" top={2} insetInlineStart={2} gap={1}>
<DndImageIcon
onClick={edit}
icon={<PiCropBold size={16} />}
tooltip={t('common.crop')}
isDisabled={!imageDTO}
/>
</Flex>
</>
)}
<DndDropTarget dndTarget={dndTarget} dndTargetData={dndTargetData} label={t('gallery.drop')} />

View File

@@ -13,7 +13,7 @@ import {
selectRefImageEntityIds,
selectSelectedRefEntityId,
} from 'features/controlLayers/store/refImagesSlice';
import { imageDTOToImageWithDims } from 'features/controlLayers/store/util';
import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
import { addGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
@@ -92,7 +92,7 @@ const AddRefImageDropTargetAndButton = memo(() => {
({
onUpload: (imageDTO: ImageDTO) => {
const config = getDefaultRefImageConfig(getState);
config.image = imageDTOToImageWithDims(imageDTO);
config.image = imageDTOToCroppableImage(imageDTO);
dispatch(refImageAdded({ overrides: { config } }));
},
allowMultiple: false,

View File

@@ -1,6 +1,5 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import { Flex, Icon, IconButton, Image, Skeleton, Text, Tooltip } from '@invoke-ai/ui-library';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { round } from 'es-toolkit/compat';
import { useRefImageEntity } from 'features/controlLayers/components/RefImage/useRefImageEntity';
@@ -15,7 +14,7 @@ import { isIPAdapterConfig } from 'features/controlLayers/store/types';
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
import { PiExclamationMarkBold, PiEyeSlashBold, PiImageBold } from 'react-icons/pi';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
import { useImageDTOFromCroppableImage } from 'services/api/endpoints/images';
import { RefImageWarningTooltipContent } from './RefImageWarningTooltipContent';
@@ -72,7 +71,8 @@ export const RefImagePreview = memo(() => {
const selectedEntityId = useAppSelector(selectSelectedRefEntityId);
const isPanelOpen = useAppSelector(selectIsRefImagePanelOpen);
const [showWeightDisplay, setShowWeightDisplay] = useState(false);
const { data: imageDTO } = useGetImageDTOQuery(entity.config.image?.image_name ?? skipToken);
const imageDTO = useImageDTOFromCroppableImage(entity.config.image);
const sx = useMemo(() => {
if (!isIPAdapterConfig(entity.config)) {
@@ -145,7 +145,7 @@ export const RefImagePreview = memo(() => {
overflow="hidden"
>
<Image
src={imageDTO?.thumbnail_url}
src={imageDTO?.image_url}
objectFit="contain"
aspectRatio="1/1"
height={imageDTO?.height}

View File

@@ -30,6 +30,7 @@ import {
} from 'features/controlLayers/store/refImagesSlice';
import type {
CLIPVisionModelV2,
CroppableImageWithDims,
FLUXReduxImageInfluence as FLUXReduxImageInfluenceType,
IPMethodV2,
} from 'features/controlLayers/store/types';
@@ -42,7 +43,6 @@ import type {
ChatGPT4oModelConfig,
FLUXKontextModelConfig,
FLUXReduxModelConfig,
ImageDTO,
IPAdapterModelConfig,
} from 'services/api/types';
@@ -104,15 +104,19 @@ const RefImageSettingsContent = memo(() => {
);
const onChangeImage = useCallback(
(imageDTO: ImageDTO | null) => {
dispatch(refImageImageChanged({ id, imageDTO }));
(croppableImage: CroppableImageWithDims | null) => {
dispatch(refImageImageChanged({ id, croppableImage }));
},
[dispatch, id]
);
const dndTargetData = useMemo<SetGlobalReferenceImageDndTargetData>(
() => setGlobalReferenceImageDndTarget.getData({ id }, config.image?.image_name),
[id, config.image?.image_name]
() =>
setGlobalReferenceImageDndTarget.getData(
{ id },
config.image?.crop?.image.image_name ?? config.image?.original.image.image_name
),
[id, config.image?.crop?.image.image_name, config.image?.original.image.image_name]
);
const isFLUX = useAppSelector(selectIsFLUX);

View File

@@ -6,7 +6,6 @@ import { FLUXReduxImageInfluence } from 'features/controlLayers/components/commo
import { IPAdapterCLIPVisionModel } from 'features/controlLayers/components/common/IPAdapterCLIPVisionModel';
import { Weight } from 'features/controlLayers/components/common/Weight';
import { IPAdapterMethod } from 'features/controlLayers/components/RefImage/IPAdapterMethod';
import { RefImageImage } from 'features/controlLayers/components/RefImage/RefImageImage';
import { RegionalGuidanceIPAdapterSettingsEmptyState } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceIPAdapterSettingsEmptyState';
import { RegionalReferenceImageModel } from 'features/controlLayers/components/RegionalGuidance/RegionalReferenceImageModel';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
@@ -37,6 +36,8 @@ import { PiBoundingBoxBold, PiXBold } from 'react-icons/pi';
import type { FLUXReduxModelConfig, ImageDTO, IPAdapterModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
import { RegionalGuidanceRefImageImage } from './RegionalGuidanceRefImageImage';
type Props = {
referenceImageId: string;
};
@@ -114,7 +115,7 @@ const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Pro
{ entityIdentifier, referenceImageId },
config.image?.image_name
),
[entityIdentifier, config.image?.image_name, referenceImageId]
[entityIdentifier, config.image, referenceImageId]
);
const pullBboxIntoIPAdapter = usePullBboxIntoRegionalGuidanceReferenceImage(entityIdentifier, referenceImageId);
@@ -170,7 +171,7 @@ const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Pro
</Flex>
)}
<Flex alignItems="center" justifyContent="center" h={32} w={32} aspectRatio="1/1" flexGrow={1}>
<RefImageImage
<RegionalGuidanceRefImageImage
image={config.image}
onChangeImage={onChangeImage}
dndTarget={setRegionalGuidanceReferenceImageDndTarget}

View File

@@ -0,0 +1,103 @@
import { Flex } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
import { UploadImageIconButton } from 'common/hooks/useImageUploadButton';
import { bboxSizeOptimized, bboxSizeRecalled } from 'features/controlLayers/store/canvasSlice';
import { useCanvasIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { sizeOptimized, sizeRecalled } from 'features/controlLayers/store/paramsSlice';
import type { ImageWithDims } from 'features/controlLayers/store/types';
import type { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { DndImage } from 'features/dnd/DndImage';
import { DndImageIcon } from 'features/dnd/DndImageIcon';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { memo, useCallback, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi';
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import { $isConnected } from 'services/events/stores';
type Props = {
image: ImageWithDims | null;
onChangeImage: (imageDTO: ImageDTO | null) => void;
dndTarget: typeof setRegionalGuidanceReferenceImageDndTarget;
dndTargetData: ReturnType<(typeof setRegionalGuidanceReferenceImageDndTarget)['getData']>;
};
export const RegionalGuidanceRefImageImage = memo(({ image, onChangeImage, dndTarget, dndTargetData }: Props) => {
const { t } = useTranslation();
const store = useAppStore();
const isConnected = useStore($isConnected);
const tab = useAppSelector(selectActiveTab);
const isStaging = useCanvasIsStaging();
const { currentData: imageDTO, isError } = useGetImageDTOQuery(image?.image_name ?? skipToken);
const handleResetControlImage = useCallback(() => {
onChangeImage(null);
}, [onChangeImage]);
useEffect(() => {
if (isConnected && isError) {
handleResetControlImage();
}
}, [handleResetControlImage, isError, isConnected]);
const onUpload = useCallback(
(imageDTO: ImageDTO) => {
onChangeImage(imageDTO);
},
[onChangeImage]
);
const recallSizeAndOptimize = useCallback(() => {
if (!imageDTO || (tab === 'canvas' && isStaging)) {
return;
}
const { width, height } = imageDTO;
if (tab === 'canvas') {
store.dispatch(bboxSizeRecalled({ width, height }));
store.dispatch(bboxSizeOptimized());
} else if (tab === 'generate') {
store.dispatch(sizeRecalled({ width, height }));
store.dispatch(sizeOptimized());
}
}, [imageDTO, isStaging, store, tab]);
return (
<Flex position="relative" w="full" h="full" alignItems="center" data-error={!imageDTO && !image?.image_name}>
{!imageDTO && (
<UploadImageIconButton
w="full"
h="full"
isError={!imageDTO && !image?.image_name}
onUpload={onUpload}
fontSize={36}
/>
)}
{imageDTO && (
<>
<DndImage imageDTO={imageDTO} borderRadius="base" borderWidth={1} borderStyle="solid" w="full" />
<Flex position="absolute" flexDir="column" top={2} insetInlineEnd={2} gap={1}>
<DndImageIcon
onClick={handleResetControlImage}
icon={<PiArrowCounterClockwiseBold size={16} />}
tooltip={t('common.reset')}
/>
</Flex>
<Flex position="absolute" flexDir="column" bottom={2} insetInlineEnd={2} gap={1}>
<DndImageIcon
onClick={recallSizeAndOptimize}
icon={<PiRulerBold size={16} />}
tooltip={t('parameters.useSize')}
isDisabled={!imageDTO || (tab === 'canvas' && isStaging)}
/>
</Flex>
</>
)}
<DndDropTarget dndTarget={dndTarget} dndTargetData={dndTargetData} label={t('gallery.drop')} />
</Flex>
);
});
RegionalGuidanceRefImageImage.displayName = 'RegionalGuidanceRefImageImage';

View File

@@ -1,36 +1,22 @@
import {
Button,
ButtonGroup,
Flex,
Heading,
Icon,
ListItem,
Menu,
MenuButton,
MenuItem,
MenuList,
Spacer,
Spinner,
Text,
Tooltip,
UnorderedList,
} from '@invoke-ai/ui-library';
import { Flex, Heading, Spacer } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
import { CanvasAutoProcessSwitch } from 'features/controlLayers/components/CanvasAutoProcessSwitch';
import { CanvasOperationIsolatedLayerPreviewSwitch } from 'features/controlLayers/components/CanvasOperationIsolatedLayerPreviewSwitch';
import { SelectObjectActionButtons } from 'features/controlLayers/components/SelectObject/SelectObjectActionButtons';
import { SelectObjectInfoTooltip } from 'features/controlLayers/components/SelectObject/SelectObjectInfoTooltip';
import { SelectObjectInputTypeButtons } from 'features/controlLayers/components/SelectObject/SelectObjectInputTypeButtons';
import { SelectObjectInvert } from 'features/controlLayers/components/SelectObject/SelectObjectInvert';
import { SelectObjectPointType } from 'features/controlLayers/components/SelectObject/SelectObjectPointType';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import type { PropsWithChildren } from 'react';
import { memo, useCallback, useRef } from 'react';
import { Trans, useTranslation } from 'react-i18next';
import { PiCaretDownBold, PiInfoBold } from 'react-icons/pi';
import { memo, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { SelectObjectModel } from './SelectObjectModel';
import { SelectObjectPrompt } from './SelectObjectPrompt';
const SelectObjectContent = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
@@ -39,25 +25,7 @@ const SelectObjectContent = memo(
useFocusRegion('canvas', ref, { focusOnMount: true });
const isCanvasFocused = useIsRegionFocused('canvas');
const isProcessing = useStore(adapter.segmentAnything.$isProcessing);
const hasPoints = useStore(adapter.segmentAnything.$hasPoints);
const hasImageState = useStore(adapter.segmentAnything.$hasImageState);
const autoProcess = useAppSelector(selectAutoProcess);
const saveAsInpaintMask = useCallback(() => {
adapter.segmentAnything.saveAs('inpaint_mask');
}, [adapter.segmentAnything]);
const saveAsRegionalGuidance = useCallback(() => {
adapter.segmentAnything.saveAs('regional_guidance');
}, [adapter.segmentAnything]);
const saveAsRasterLayer = useCallback(() => {
adapter.segmentAnything.saveAs('raster_layer');
}, [adapter.segmentAnything]);
const saveAsControlLayer = useCallback(() => {
adapter.segmentAnything.saveAs('control_layer');
}, [adapter.segmentAnything]);
const inputType = useStore(adapter.segmentAnything.$inputType);
useRegisteredHotkeys({
id: 'applySegmentAnything',
@@ -94,11 +62,7 @@ const SelectObjectContent = memo(
<Heading size="md" color="base.300" userSelect="none">
{t('controlLayers.selectObject.selectObject')}
</Heading>
<Tooltip label={<SelectObjectHelpTooltipContent />}>
<Flex alignItems="center">
<Icon as={PiInfoBold} color="base.500" />
</Flex>
</Tooltip>
<SelectObjectInfoTooltip />
</Flex>
<Spacer />
<CanvasAutoProcessSwitch />
@@ -106,71 +70,14 @@ const SelectObjectContent = memo(
</Flex>
<Flex w="full" justifyContent="space-between" py={2}>
<SelectObjectPointType adapter={adapter} />
<SelectObjectInputTypeButtons adapter={adapter} />
<SelectObjectInvert adapter={adapter} />
</Flex>
<ButtonGroup isAttached={false} size="sm" w="full">
<Button
onClick={adapter.segmentAnything.processImmediate}
loadingText={t('controlLayers.selectObject.process')}
variant="ghost"
isDisabled={isProcessing || !hasPoints || (autoProcess && hasImageState)}
>
{t('controlLayers.selectObject.process')}
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}
</Button>
<Spacer />
<Button
onClick={adapter.segmentAnything.reset}
isDisabled={isProcessing || !hasPoints}
loadingText={t('controlLayers.selectObject.reset')}
variant="ghost"
>
{t('controlLayers.selectObject.reset')}
</Button>
<Button
onClick={adapter.segmentAnything.apply}
loadingText={t('controlLayers.selectObject.apply')}
variant="ghost"
isDisabled={isProcessing || !hasImageState}
>
{t('controlLayers.selectObject.apply')}
</Button>
<Menu>
<MenuButton
as={Button}
loadingText={t('controlLayers.selectObject.saveAs')}
variant="ghost"
isDisabled={isProcessing || !hasImageState}
rightIcon={<PiCaretDownBold />}
>
{t('controlLayers.selectObject.saveAs')}
</MenuButton>
<MenuList>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsInpaintMask}>
{t('controlLayers.newInpaintMask')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsRegionalGuidance}>
{t('controlLayers.newRegionalGuidance')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsControlLayer}>
{t('controlLayers.newControlLayer')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsRasterLayer}>
{t('controlLayers.newRasterLayer')}
</MenuItem>
</MenuList>
</Menu>
<Button
onClick={adapter.segmentAnything.cancel}
isDisabled={isProcessing}
loadingText={t('common.cancel')}
variant="ghost"
>
{t('controlLayers.selectObject.cancel')}
</Button>
</ButtonGroup>
{inputType === 'visual' && <SelectObjectPointType adapter={adapter} />}
{inputType === 'prompt' && <SelectObjectPrompt adapter={adapter} />}
<SelectObjectModel adapter={adapter} />
<SelectObjectActionButtons adapter={adapter} />
</Flex>
);
}
@@ -190,34 +97,3 @@ export const SelectObject = memo(() => {
});
SelectObject.displayName = 'SelectObject';
const Bold = (props: PropsWithChildren) => (
<Text as="span" fontWeight="semibold">
{props.children}
</Text>
);
const SelectObjectHelpTooltipContent = memo(() => {
const { t } = useTranslation();
return (
<Flex gap={3} flexDir="column">
<Text>
<Trans i18nKey="controlLayers.selectObject.help1" components={{ Bold: <Bold /> }} />
</Text>
<Text>
<Trans i18nKey="controlLayers.selectObject.help2" components={{ Bold: <Bold /> }} />
</Text>
<Text>
<Trans i18nKey="controlLayers.selectObject.help3" />
</Text>
<UnorderedList>
<ListItem>{t('controlLayers.selectObject.clickToAdd')}</ListItem>
<ListItem>{t('controlLayers.selectObject.dragToMove')}</ListItem>
<ListItem>{t('controlLayers.selectObject.clickToRemove')}</ListItem>
</UnorderedList>
</Flex>
);
});
SelectObjectHelpTooltipContent.displayName = 'SelectObjectHelpTooltipContent';

View File

@@ -0,0 +1,64 @@
import { Button, ButtonGroup, Spacer, Spinner } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppSelector } from 'app/store/storeHooks';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { selectAutoProcess } from 'features/controlLayers/store/canvasSettingsSlice';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { SelectObjectSaveAsMenu } from './SelectObjectSaveAsMenu';
interface SelectObjectActionButtonsProps {
adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer;
}
export const SelectObjectActionButtons = memo(({ adapter }: SelectObjectActionButtonsProps) => {
const { t } = useTranslation();
const isProcessing = useStore(adapter.segmentAnything.$isProcessing);
const hasInput = useStore(adapter.segmentAnything.$hasInputData);
const hasImageState = useStore(adapter.segmentAnything.$hasImageState);
const autoProcess = useAppSelector(selectAutoProcess);
return (
<ButtonGroup isAttached={false} size="sm" w="full">
<Button
onClick={adapter.segmentAnything.processImmediate}
loadingText={t('controlLayers.selectObject.process')}
variant="ghost"
isDisabled={isProcessing || !hasInput || (autoProcess && hasImageState)}
>
{t('controlLayers.selectObject.process')}
{isProcessing && <Spinner ms={3} boxSize={5} color="base.600" />}
</Button>
<Spacer />
<Button
onClick={adapter.segmentAnything.reset}
isDisabled={isProcessing || !hasInput}
loadingText={t('controlLayers.selectObject.reset')}
variant="ghost"
>
{t('controlLayers.selectObject.reset')}
</Button>
<Button
onClick={adapter.segmentAnything.apply}
loadingText={t('controlLayers.selectObject.apply')}
variant="ghost"
isDisabled={isProcessing || !hasImageState}
>
{t('controlLayers.selectObject.apply')}
</Button>
<SelectObjectSaveAsMenu adapter={adapter} />
<Button
onClick={adapter.segmentAnything.cancel}
isDisabled={isProcessing}
loadingText={t('common.cancel')}
variant="ghost"
>
{t('controlLayers.selectObject.cancel')}
</Button>
</ButtonGroup>
);
});
SelectObjectActionButtons.displayName = 'SelectObjectActionButtons';

View File

@@ -0,0 +1,59 @@
import { Flex, Icon, ListItem, Text, Tooltip, UnorderedList } from '@invoke-ai/ui-library';
import type { PropsWithChildren } from 'react';
import { memo } from 'react';
import { Trans } from 'react-i18next';
import { PiInfoBold } from 'react-icons/pi';
const Bold = (props: PropsWithChildren) => (
<Text as="span" fontWeight="semibold">
{props.children}
</Text>
);
const components = { Bold: <Bold /> };
const SelectObjectHelpTooltipContent = memo(() => {
return (
<Flex gap={3} flexDir="column">
<Text>
<Trans i18nKey="controlLayers.selectObject.desc" components={components} />
</Text>
<UnorderedList>
<ListItem>
<Trans i18nKey="controlLayers.selectObject.visualMode1" components={components} />
</ListItem>
<ListItem>
<Trans i18nKey="controlLayers.selectObject.visualMode2" components={components} />
</ListItem>
<ListItem>
<Trans i18nKey="controlLayers.selectObject.visualMode3" components={components} />
</ListItem>
</UnorderedList>
<Text>
<Trans i18nKey="controlLayers.selectObject.promptModeDesc" components={components} />
</Text>
<UnorderedList>
<ListItem>
<Trans i18nKey="controlLayers.selectObject.promptMode1" components={components} />
</ListItem>
<ListItem>
<Trans i18nKey="controlLayers.selectObject.promptMode2" components={components} />
</ListItem>
</UnorderedList>
</Flex>
);
});
SelectObjectHelpTooltipContent.displayName = 'SelectObjectHelpTooltipContent';
export const SelectObjectInfoTooltip = memo(() => {
return (
<Tooltip label={<SelectObjectHelpTooltipContent />} minW={420}>
<Flex alignItems="center">
<Icon as={PiInfoBold} color="base.500" />
</Flex>
</Tooltip>
);
});
SelectObjectInfoTooltip.displayName = 'SelectObjectInfoTooltip';

View File

@@ -0,0 +1,34 @@
import { Button, ButtonGroup } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { memo, useCallback } from 'react';
interface SelectObjectInputTypeButtonsProps {
adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer;
}
export const SelectObjectInputTypeButtons = memo(({ adapter }: SelectObjectInputTypeButtonsProps) => {
const inputType = useStore(adapter.segmentAnything.$inputType);
const setInputToVisual = useCallback(() => {
adapter.segmentAnything.setInputType('visual');
}, [adapter.segmentAnything]);
const setInputToPrompt = useCallback(() => {
adapter.segmentAnything.setInputType('prompt');
}, [adapter.segmentAnything]);
return (
<ButtonGroup size="sm" variant="outline">
<Button colorScheme={inputType === 'visual' ? 'invokeBlue' : undefined} onClick={setInputToVisual}>
Visual
</Button>
<Button colorScheme={inputType === 'prompt' ? 'invokeBlue' : undefined} onClick={setInputToPrompt}>
Prompt
</Button>
</ButtonGroup>
);
});
SelectObjectInputTypeButtons.displayName = 'SelectObjectInputTypeButtons';

View File

@@ -0,0 +1,40 @@
import { Flex, FormControl, FormLabel, Radio, RadioGroup, Text } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { zSAMModel } from 'features/controlLayers/store/types';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const SelectObjectModel = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
const { t } = useTranslation();
const model = useStore(adapter.segmentAnything.$model);
const onChange = useCallback(
(v: string) => {
const model = zSAMModel.parse(v);
adapter.segmentAnything.$model.set(model);
},
[adapter.segmentAnything.$model]
);
return (
<FormControl w="full">
<FormLabel m={0}>{t('controlLayers.selectObject.model')}</FormLabel>
<RadioGroup value={model} onChange={onChange} w="full" size="md">
<Flex alignItems="center" w="full" gap={4} color="base.300">
<Radio value="SAM1">
<Text>{t('controlLayers.selectObject.segmentAnything1')}</Text>
</Radio>
<Radio value="SAM2">
<Text>{t('controlLayers.selectObject.segmentAnything2')}</Text>
</Radio>
</Flex>
</RadioGroup>
</FormControl>
);
}
);
SelectObjectModel.displayName = 'SelectObjectModel';

View File

@@ -24,7 +24,7 @@ export const SelectObjectPointType = memo(
<FormControl w="min-content">
<FormLabel m={0}>{t('controlLayers.selectObject.pointType')}</FormLabel>
<RadioGroup value={pointType} onChange={onChange} w="full" size="md">
<Flex alignItems="center" w="full" gap={4} fontWeight="semibold" color="base.300">
<Flex alignItems="center" w="full" gap={4} color="base.300">
<Radio value="foreground">
<Text>{t('controlLayers.selectObject.include')}</Text>
</Radio>

View File

@@ -0,0 +1,34 @@
import { FormControl, FormLabel, Input } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import type { ChangeEvent } from 'react';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const SelectObjectPrompt = memo(
({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
const { t } = useTranslation();
const inputData = useStore(adapter.segmentAnything.$inputData);
const onChange = useCallback(
(e: ChangeEvent<HTMLInputElement>) => {
adapter.segmentAnything.$inputData.set({ type: 'prompt', prompt: e.target.value });
},
[adapter.segmentAnything.$inputData]
);
if (inputData.type !== 'prompt') {
return null;
}
return (
<FormControl w="full">
<FormLabel m={0}>{t('controlLayers.selectObject.prompt')}</FormLabel>
<Input value={inputData.prompt} onChange={onChange} />
</FormControl>
);
}
);
SelectObjectPrompt.displayName = 'SelectObjectPrompt';

View File

@@ -0,0 +1,63 @@
import { Button, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiCaretDownBold } from 'react-icons/pi';
interface SelectObjectSaveAsMenuProps {
adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer;
}
export const SelectObjectSaveAsMenu = memo(({ adapter }: SelectObjectSaveAsMenuProps) => {
const { t } = useTranslation();
const isProcessing = useStore(adapter.segmentAnything.$isProcessing);
const hasImageState = useStore(adapter.segmentAnything.$hasImageState);
const saveAsInpaintMask = useCallback(() => {
adapter.segmentAnything.saveAs('inpaint_mask');
}, [adapter.segmentAnything]);
const saveAsRegionalGuidance = useCallback(() => {
adapter.segmentAnything.saveAs('regional_guidance');
}, [adapter.segmentAnything]);
const saveAsRasterLayer = useCallback(() => {
adapter.segmentAnything.saveAs('raster_layer');
}, [adapter.segmentAnything]);
const saveAsControlLayer = useCallback(() => {
adapter.segmentAnything.saveAs('control_layer');
}, [adapter.segmentAnything]);
return (
<Menu>
<MenuButton
as={Button}
loadingText={t('controlLayers.selectObject.saveAs')}
variant="ghost"
isDisabled={isProcessing || !hasImageState}
rightIcon={<PiCaretDownBold />}
>
{t('controlLayers.selectObject.saveAs')}
</MenuButton>
<MenuList>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsInpaintMask}>
{t('controlLayers.newInpaintMask')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsRegionalGuidance}>
{t('controlLayers.newRegionalGuidance')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsControlLayer}>
{t('controlLayers.newControlLayer')}
</MenuItem>
<MenuItem isDisabled={isProcessing || !hasImageState} onClick={saveAsRasterLayer}>
{t('controlLayers.newRasterLayer')}
</MenuItem>
</MenuList>
</Menu>
);
});
SelectObjectSaveAsMenu.displayName = 'SelectObjectSaveAsMenu';

View File

@@ -8,6 +8,7 @@ import {
PopoverBody,
PopoverContent,
PopoverTrigger,
Portal,
Text,
useShiftModifier,
} from '@invoke-ai/ui-library';
@@ -45,62 +46,64 @@ export const CanvasSettingsPopover = memo(() => {
alignSelf="stretch"
/>
</PopoverTrigger>
<PopoverContent maxW="280px">
<PopoverArrow />
<PopoverBody>
<Flex direction="column" gap={2}>
{/* Behavior Settings */}
<Flex direction="column" gap={1}>
<Flex align="center" gap={2}>
<Icon as={PiPencilFill} boxSize={4} />
<Text fontWeight="bold" fontSize="sm" color="base.100">
{t('hotkeys.canvas.settings.behavior')}
</Text>
<Portal>
<PopoverContent maxW="280px">
<PopoverArrow />
<PopoverBody>
<Flex direction="column" gap={2}>
{/* Behavior Settings */}
<Flex direction="column" gap={1}>
<Flex align="center" gap={2}>
<Icon as={PiPencilFill} boxSize={4} />
<Text fontWeight="bold" fontSize="sm" color="base.100">
{t('hotkeys.canvas.settings.behavior')}
</Text>
</Flex>
<CanvasSettingsInvertScrollCheckbox />
<CanvasSettingsPressureSensitivityCheckbox />
<CanvasSettingsPreserveMaskCheckbox />
<CanvasSettingsClipToBboxCheckbox />
<CanvasSettingsOutputOnlyMaskedRegionsCheckbox />
<CanvasSettingsSaveAllImagesToGalleryCheckbox />
</Flex>
<CanvasSettingsInvertScrollCheckbox />
<CanvasSettingsPressureSensitivityCheckbox />
<CanvasSettingsPreserveMaskCheckbox />
<CanvasSettingsClipToBboxCheckbox />
<CanvasSettingsOutputOnlyMaskedRegionsCheckbox />
<CanvasSettingsSaveAllImagesToGalleryCheckbox />
</Flex>
<Divider />
<Divider />
{/* Display Settings */}
<Flex direction="column" gap={1}>
<Flex align="center" gap={2} color="base.200">
<Icon as={PiEyeFill} boxSize={4} />
<Text fontWeight="bold" fontSize="sm">
{t('hotkeys.canvas.settings.display')}
</Text>
{/* Display Settings */}
<Flex direction="column" gap={1}>
<Flex align="center" gap={2} color="base.200">
<Icon as={PiEyeFill} boxSize={4} />
<Text fontWeight="bold" fontSize="sm">
{t('hotkeys.canvas.settings.display')}
</Text>
</Flex>
<CanvasSettingsShowProgressOnCanvas />
<CanvasSettingsIsolatedStagingPreviewSwitch />
<CanvasSettingsIsolatedLayerPreviewSwitch />
<CanvasSettingsBboxOverlaySwitch />
<CanvasSettingsShowHUDSwitch />
</Flex>
<CanvasSettingsShowProgressOnCanvas />
<CanvasSettingsIsolatedStagingPreviewSwitch />
<CanvasSettingsIsolatedLayerPreviewSwitch />
<CanvasSettingsBboxOverlaySwitch />
<CanvasSettingsShowHUDSwitch />
</Flex>
<Divider />
<Divider />
{/* Grid Settings */}
<Flex direction="column" gap={1}>
<Flex align="center" gap={2} color="base.200">
<Icon as={PiSquaresFourFill} boxSize={4} />
<Text fontWeight="bold" fontSize="sm">
{t('hotkeys.canvas.settings.grid')}
</Text>
{/* Grid Settings */}
<Flex direction="column" gap={1}>
<Flex align="center" gap={2} color="base.200">
<Icon as={PiSquaresFourFill} boxSize={4} />
<Text fontWeight="bold" fontSize="sm">
{t('hotkeys.canvas.settings.grid')}
</Text>
</Flex>
<CanvasSettingsSnapToGridCheckbox />
<CanvasSettingsDynamicGridSwitch />
<CanvasSettingsRuleOfThirdsSwitch />
</Flex>
<CanvasSettingsSnapToGridCheckbox />
<CanvasSettingsDynamicGridSwitch />
<CanvasSettingsRuleOfThirdsSwitch />
</Flex>
<DebugSettings />
</Flex>
</PopoverBody>
</PopoverContent>
<DebugSettings />
</Flex>
</PopoverBody>
</PopoverContent>
</Portal>
</Popover>
);
});

View File

@@ -707,10 +707,9 @@ describe('StagingAreaApi', () => {
// Should end up with the last set of items
expect(api.$items.get()).toBe(items2);
// The selectedItemId retains the old value (1) but $selectedItem will be null
// because item 1 is no longer in the items list
expect(api.$selectedItemId.get()).toBe(1);
expect(api.$selectedItem.get()).toBe(null);
// We expect the selection to have moved to the next existent item
expect(api.$selectedItemId.get()).toBe(2);
expect(api.$selectedItem.get()?.item.item_id).toBe(2);
});
it('should handle multiple progress events for same item', () => {

View File

@@ -361,6 +361,27 @@ export class StagingAreaApi {
}
}
const selectedItemId = this.$selectedItemId.get();
if (selectedItemId !== null && !items.find(({ item_id }) => item_id === selectedItemId)) {
// If the selected item no longer exists, select the next best item.
// Prefer the next item in the list - must check oldItems to determine this
const nextItemIndex = oldItems.findIndex(({ item_id }) => item_id === selectedItemId);
if (nextItemIndex !== -1) {
const nextItem = items[nextItemIndex] ?? items[nextItemIndex - 1];
if (nextItem) {
this.$selectedItemId.set(nextItem.item_id);
}
} else {
// Next, if there is an in-progress item, select that.
const inProgressItem = items.find(({ status }) => status === 'in_progress');
if (inProgressItem) {
this.$selectedItemId.set(inProgressItem.item_id);
}
// Finally just select the first item.
this.$selectedItemId.set(items[0]?.item_id ?? null);
}
}
this.$items.set(items);
};

View File

@@ -6,6 +6,7 @@ import {
PopoverBody,
PopoverContent,
PopoverTrigger,
Portal,
Tooltip,
} from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
@@ -102,12 +103,14 @@ export const ToolFillColorPicker = memo(() => {
</Tooltip>
</Flex>
</PopoverTrigger>
<PopoverContent>
<PopoverArrow />
<PopoverBody minH={64}>
<RgbaColorPicker color={activeColor} onChange={onColorChange} withNumberInput withSwatches />
</PopoverBody>
</PopoverContent>
<Portal>
<PopoverContent>
<PopoverArrow />
<PopoverBody minH={64}>
<RgbaColorPicker color={activeColor} onChange={onColorChange} withNumberInput withSwatches />
</PopoverBody>
</PopoverContent>
</Portal>
</Popover>
);
});

View File

@@ -12,6 +12,7 @@ import {
PopoverBody,
PopoverContent,
PopoverTrigger,
Portal,
} from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
@@ -122,21 +123,23 @@ const DropDownToolWidthPickerComponent = memo(
</NumberInput>
</PopoverAnchor>
</FormControl>
<PopoverContent w={200} pt={0} pb={2} px={4}>
<PopoverArrow />
<PopoverBody>
<CompositeSlider
min={0}
max={100}
value={mapRawValueToSliderValue(localValue)}
onChange={onChangeSlider}
defaultValue={sliderDefaultValue}
marks={marks}
formatValue={formatSliderValue}
alwaysShowMarks
/>
</PopoverBody>
</PopoverContent>
<Portal>
<PopoverContent w={200} pt={0} pb={2} px={4}>
<PopoverArrow />
<PopoverBody>
<CompositeSlider
min={0}
max={100}
value={mapRawValueToSliderValue(localValue)}
onChange={onChangeSlider}
defaultValue={sliderDefaultValue}
marks={marks}
formatValue={formatSliderValue}
alwaysShowMarks
/>
</PopoverBody>
</PopoverContent>
</Portal>
</Popover>
);
}

View File

@@ -12,6 +12,7 @@ import {
PopoverBody,
PopoverContent,
PopoverTrigger,
Portal,
} from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { round } from 'es-toolkit/compat';
@@ -153,21 +154,23 @@ export const CanvasToolbarScale = memo(() => {
</PopoverTrigger>
</NumberInput>
</PopoverAnchor>
<PopoverContent w={200} pt={0} pb={2} px={4}>
<PopoverArrow />
<PopoverBody>
<CompositeSlider
min={0}
max={100}
value={mapRawValueToSliderValue(localScale)}
onChange={onChangeSlider}
defaultValue={sliderDefaultValue}
marks={marks}
formatValue={formatSliderValue}
alwaysShowMarks
/>
</PopoverBody>
</PopoverContent>
<Portal>
<PopoverContent w={200} pt={0} pb={2} px={4}>
<PopoverArrow />
<PopoverBody>
<CompositeSlider
min={0}
max={100}
value={mapRawValueToSliderValue(localScale)}
onChange={onChangeSlider}
defaultValue={sliderDefaultValue}
marks={marks}
formatValue={formatSliderValue}
alwaysShowMarks
/>
</PopoverBody>
</PopoverContent>
</Portal>
</Popover>
<ZoomInButton />
</Flex>

View File

@@ -30,6 +30,7 @@ import type {
FluxKontextReferenceImageConfig,
Gemini2_5ReferenceImageConfig,
IPAdapterConfig,
RegionalGuidanceIPAdapterConfig,
T2IAdapterConfig,
} from 'features/controlLayers/store/types';
import {
@@ -38,6 +39,7 @@ import {
initialFluxKontextReferenceImage,
initialGemini2_5ReferenceImage,
initialIPAdapter,
initialRegionalGuidanceIPAdapter,
initialT2IAdapter,
} from 'features/controlLayers/store/util';
import { zModelIdentifierField } from 'features/nodes/types/common';
@@ -125,7 +127,7 @@ export const getDefaultRefImageConfig = (
return config;
};
export const getDefaultRegionalGuidanceRefImageConfig = (getState: AppGetState): IPAdapterConfig => {
export const getDefaultRegionalGuidanceRefImageConfig = (getState: AppGetState): RegionalGuidanceIPAdapterConfig => {
// Regional guidance ref images do not support ChatGPT-4o, so we always return the IP Adapter config.
const state = getState();
@@ -138,7 +140,7 @@ export const getDefaultRegionalGuidanceRefImageConfig = (getState: AppGetState):
const modelConfig = ipAdapterModelConfigs.find((m) => m.base === base);
// Clone the initial IP Adapter config and set the model if available.
const config = deepClone(initialIPAdapter);
const config = deepClone(initialRegionalGuidanceIPAdapter);
if (modelConfig) {
config.model = zModelIdentifierField.parse(modelConfig);

View File

@@ -32,7 +32,12 @@ import type {
RefImageState,
RegionalGuidanceRefImageState,
} from 'features/controlLayers/store/types';
import { imageDTOToImageObject, imageDTOToImageWithDims, initialControlNet } from 'features/controlLayers/store/util';
import {
imageDTOToCroppableImage,
imageDTOToImageObject,
imageDTOToImageWithDims,
initialControlNet,
} from 'features/controlLayers/store/util';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
import type { BoardId } from 'features/gallery/store/types';
import { Graph } from 'features/nodes/util/graph/generation/Graph';
@@ -209,7 +214,7 @@ export const useNewGlobalReferenceImageFromBbox = () => {
const overrides: Partial<RefImageState> = {
config: {
...getDefaultRefImageConfig(getState),
image: imageDTOToImageWithDims(imageDTO),
image: imageDTOToCroppableImage(imageDTO),
},
};
dispatch(refImageAdded({ overrides }));
@@ -312,7 +317,7 @@ export const usePullBboxIntoGlobalReferenceImage = (id: string) => {
const arg = useMemo<UseSaveCanvasArg>(() => {
const onSave = (imageDTO: ImageDTO, _: Rect) => {
dispatch(refImageImageChanged({ id, imageDTO }));
dispatch(refImageImageChanged({ id, croppableImage: imageDTOToCroppableImage(imageDTO) }));
};
return {

View File

@@ -475,7 +475,7 @@ export abstract class CanvasEntityAdapterBase<T extends CanvasEntityState, U ext
* to hide this entity.
*/
const filteringAdapter = this.manager.stateApi.$filteringAdapter.get();
if (filteringAdapter && filteringAdapter !== this) {
if (filteringAdapter && filteringAdapter.id !== this.id) {
this.setVisibility(false);
return;
}
@@ -492,7 +492,7 @@ export abstract class CanvasEntityAdapterBase<T extends CanvasEntityState, U ext
}
const segmentingAdapter = this.manager.stateApi.$segmentingAdapter.get();
if (segmentingAdapter && segmentingAdapter !== this) {
if (segmentingAdapter && segmentingAdapter.id !== this.id) {
this.setVisibility(false);
return;
}

View File

@@ -72,7 +72,7 @@ export class CanvasEntityAdapterControlLayer extends CanvasEntityAdapterBase<
this.log.trace({ rect }, 'Getting canvas');
// The opacity may have been changed in response to user selecting a different entity category, so we must restore
// the original opacity before rendering the canvas
const attrs: GroupConfig = { opacity: this.state.opacity, filters: [] };
const attrs: GroupConfig = { opacity: this.state.opacity };
const canvas = this.renderer.getCanvas({ rect, attrs });
return canvas;
};

View File

@@ -1,4 +1,4 @@
import { omit } from 'es-toolkit/compat';
import { omit, throttle } from 'es-toolkit/compat';
import { CanvasEntityAdapterBase } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterBase';
import { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer';
import { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
@@ -6,6 +6,7 @@ import { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasE
import { CanvasEntityTransformer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasSegmentAnythingModule } from 'features/controlLayers/konva/CanvasSegmentAnythingModule';
import { AdjustmentsCurvesFilter, AdjustmentsSimpleFilter, buildCurveLUT } from 'features/controlLayers/konva/filters';
import type { CanvasEntityIdentifier, CanvasRasterLayerState, Rect } from 'features/controlLayers/store/types';
import type { GroupConfig } from 'konva/lib/Group';
import type { JsonObject } from 'type-fest';
@@ -59,13 +60,18 @@ export class CanvasEntityAdapterRasterLayer extends CanvasEntityAdapterBase<
if (!prevState || this.state.opacity !== prevState.opacity) {
this.syncOpacity();
}
// Apply per-layer adjustments as a Konva filter
if (!prevState || this.haveAdjustmentsChanged(prevState, this.state)) {
this.syncAdjustmentsFilter();
}
};
getCanvas = (rect?: Rect): HTMLCanvasElement => {
this.log.trace({ rect }, 'Getting canvas');
// The opacity may have been changed in response to user selecting a different entity category, so we must restore
// the original opacity before rendering the canvas
const attrs: GroupConfig = { opacity: this.state.opacity, filters: [] };
const attrs: GroupConfig = { opacity: this.state.opacity };
const canvas = this.renderer.getCanvas({ rect, attrs });
return canvas;
};
@@ -74,4 +80,79 @@ export class CanvasEntityAdapterRasterLayer extends CanvasEntityAdapterBase<
const keysToOmit: (keyof CanvasRasterLayerState)[] = ['name', 'isLocked'];
return omit(this.state, keysToOmit);
};
private syncAdjustmentsFilter = () => {
const a = this.state.adjustments;
const apply = !!a && a.enabled;
// The filter operates on the renderer's object group; we can set filters at the group level via renderer
const group = this.renderer.konva.objectGroup;
if (apply) {
const filters = group.filters() ?? [];
let nextFilters = filters.filter((f) => f !== AdjustmentsSimpleFilter && f !== AdjustmentsCurvesFilter);
if (a.mode === 'simple') {
group.setAttr('adjustmentsSimple', a.simple);
group.setAttr('adjustmentsCurves', null);
nextFilters = [...nextFilters, AdjustmentsSimpleFilter];
} else {
// Build LUTs and set curves attr
const master = buildCurveLUT(a.curves.master);
const r = buildCurveLUT(a.curves.r);
const g = buildCurveLUT(a.curves.g);
const b = buildCurveLUT(a.curves.b);
group.setAttr('adjustmentsCurves', { master, r, g, b });
group.setAttr('adjustmentsSimple', null);
nextFilters = [...nextFilters, AdjustmentsCurvesFilter];
}
group.filters(nextFilters);
this._throttledCacheRefresh();
} else {
// Remove our filter if present
const filters = (group.filters() ?? []).filter(
(f) => f !== AdjustmentsSimpleFilter && f !== AdjustmentsCurvesFilter
);
group.filters(filters);
group.setAttr('adjustmentsSimple', null);
group.setAttr('adjustmentsCurves', null);
this._throttledCacheRefresh();
}
};
private _throttledCacheRefresh = throttle(() => this.renderer.syncKonvaCache(true), 50);
private haveAdjustmentsChanged = (prevState: CanvasRasterLayerState, currState: CanvasRasterLayerState): boolean => {
const pa = prevState.adjustments;
const ca = currState.adjustments;
if (pa === ca) {
return false;
}
if (!pa || !ca) {
return true;
}
if (pa.enabled !== ca.enabled) {
return true;
}
if (pa.mode !== ca.mode) {
return true;
}
// simple params
const ps = pa.simple;
const cs = ca.simple;
if (
ps.brightness !== cs.brightness ||
ps.contrast !== cs.contrast ||
ps.saturation !== cs.saturation ||
ps.temperature !== cs.temperature ||
ps.tint !== cs.tint ||
ps.sharpness !== cs.sharpness
) {
return true;
}
// curves reference (UI not implemented yet) - if arrays differ by ref, consider changed
const pc = pa.curves;
const cc = ca.curves;
if (pc !== cc) {
return true;
}
return false;
};
}

View File

@@ -3,6 +3,10 @@
* https://konvajs.org/docs/filters/Custom_Filter.html
*/
import { clamp } from 'es-toolkit/compat';
import { zCurvesAdjustmentsLUTs, zSimpleAdjustmentsConfig } from 'features/controlLayers/store/types';
import type Konva from 'konva';
/**
* Calculates the lightness (HSL) of a given pixel and sets the alpha channel to that value.
* This is useful for edge maps and other masks, to make the black areas transparent.
@@ -20,3 +24,177 @@ export const LightnessToAlphaFilter = (imageData: ImageData): void => {
imageData.data[i * 4 + 3] = Math.min(a, (cMin + cMax) / 2);
}
};
/**
* Per-layer simple adjustments filter (brightness, contrast, saturation, temp, tint, sharpness).
*
* Parameters are read from the Konva node attr `adjustmentsSimple` set by the adapter.
*/
export const AdjustmentsSimpleFilter = function (this: Konva.Node, imageData: ImageData): void {
const paramsRaw = this.getAttr('adjustmentsSimple');
const parseResult = zSimpleAdjustmentsConfig.safeParse(paramsRaw);
if (!parseResult.success) {
return;
}
const params = parseResult.data;
const { brightness, contrast, saturation, temperature, tint, sharpness } = params;
const data = imageData.data;
const len = data.length / 4;
const width = imageData.width;
const height = imageData.height;
// Precompute factors
const brightnessShift = brightness * 255; // additive shift
const contrastFactor = 1 + contrast; // scale around 128
// Temperature/Tint multipliers
const tempK = 0.5;
const tintK = 0.5;
const rTempMul = 1 + temperature * tempK;
const bTempMul = 1 - temperature * tempK;
// Tint: green <-> magenta. Positive = magenta (R/B up, G down). Negative = green (G up, R/B down).
const t = clamp(tint, -1, 1) * tintK;
const mag = Math.abs(t);
const rTintMul = t >= 0 ? 1 + mag : 1 - mag;
const gTintMul = t >= 0 ? 1 - mag : 1 + mag;
const bTintMul = t >= 0 ? 1 + mag : 1 - mag;
// Saturation matrix (HSL-based approximation via luma coefficients)
const lumaR = 0.2126;
const lumaG = 0.7152;
const lumaB = 0.0722;
const S = 1 + saturation; // 0..2
const m00 = lumaR * (1 - S) + S;
const m01 = lumaG * (1 - S);
const m02 = lumaB * (1 - S);
const m10 = lumaR * (1 - S);
const m11 = lumaG * (1 - S) + S;
const m12 = lumaB * (1 - S);
const m20 = lumaR * (1 - S);
const m21 = lumaG * (1 - S);
const m22 = lumaB * (1 - S) + S;
// First pass: apply per-pixel color adjustments (excluding sharpness)
for (let i = 0; i < len; i++) {
const idx = i * 4;
let r = data[idx + 0] as number;
let g = data[idx + 1] as number;
let b = data[idx + 2] as number;
const a = data[idx + 3] as number;
// Brightness (additive)
r = r + brightnessShift;
g = g + brightnessShift;
b = b + brightnessShift;
// Contrast around mid-point 128
r = (r - 128) * contrastFactor + 128;
g = (g - 128) * contrastFactor + 128;
b = (b - 128) * contrastFactor + 128;
// Temperature (R/B axis) and Tint (G vs Magenta)
r = r * rTempMul * rTintMul;
g = g * gTintMul;
b = b * bTempMul * bTintMul;
// Saturation via matrix
const r2 = r * m00 + g * m01 + b * m02;
const g2 = r * m10 + g * m11 + b * m12;
const b2 = r * m20 + g * m21 + b * m22;
data[idx + 0] = clamp(r2, 0, 255);
data[idx + 1] = clamp(g2, 0, 255);
data[idx + 2] = clamp(b2, 0, 255);
data[idx + 3] = a;
}
// Optional sharpen (simple unsharp mask with 3x3 kernel)
if (Math.abs(sharpness) > 1e-3 && width > 2 && height > 2) {
const src = new Uint8ClampedArray(data); // copy of modified data
const a = Math.max(-1, Math.min(1, sharpness)) * 0.5; // amount
const center = 1 + 4 * a;
const neighbor = -a;
for (let y = 1; y < height - 1; y++) {
for (let x = 1; x < width - 1; x++) {
const idx = (y * width + x) * 4;
for (let c = 0; c < 3; c++) {
const centerPx = src[idx + c] ?? 0;
const leftPx = src[idx - 4 + c] ?? 0;
const rightPx = src[idx + 4 + c] ?? 0;
const topPx = src[idx - width * 4 + c] ?? 0;
const bottomPx = src[idx + width * 4 + c] ?? 0;
const v = centerPx * center + leftPx * neighbor + rightPx * neighbor + topPx * neighbor + bottomPx * neighbor;
data[idx + c] = clamp(v, 0, 255);
}
// preserve alpha
}
}
}
};
// Build a 256-length LUT from 0..255 control points (linear interpolation for v1)
export const buildCurveLUT = (points: Array<[number, number]>): number[] => {
if (!points || points.length === 0) {
return Array.from({ length: 256 }, (_, i) => i);
}
const pts = points
.map(([x, y]) => [clamp(Math.round(x), 0, 255), clamp(Math.round(y), 0, 255)] as [number, number])
.sort((a, b) => a[0] - b[0]);
if ((pts[0]?.[0] ?? 0) !== 0) {
pts.unshift([0, pts[0]?.[1] ?? 0]);
}
const last = pts[pts.length - 1];
if ((last?.[0] ?? 255) !== 255) {
pts.push([255, last?.[1] ?? 255]);
}
const lut = new Array<number>(256);
let j = 0;
for (let x = 0; x <= 255; x++) {
while (j < pts.length - 2 && x > (pts[j + 1]?.[0] ?? 255)) {
j++;
}
const p0 = pts[j] ?? [0, 0];
const p1 = pts[j + 1] ?? [255, 255];
const [x0, y0] = p0;
const [x1, y1] = p1;
const t = x1 === x0 ? 0 : (x - x0) / (x1 - x0);
const y = y0 + (y1 - y0) * t;
lut[x] = clamp(Math.round(y), 0, 255);
}
return lut;
};
/**
* Per-layer curves adjustments filter (master, r, g, b)
*
* Parameters are read from the Konva node attr `adjustmentsCurves` set by the adapter.
*/
export const AdjustmentsCurvesFilter = function (this: Konva.Node, imageData: ImageData): void {
const paramsRaw = this.getAttr('adjustmentsCurves');
const parseResult = zCurvesAdjustmentsLUTs.safeParse(paramsRaw);
if (!parseResult.success) {
return;
}
const params = parseResult.data;
const { master, r, g, b } = params;
if (!master || !r || !g || !b) {
return;
}
const data = imageData.data;
const len = data.length / 4;
for (let i = 0; i < len; i++) {
const idx = i * 4;
const r0 = data[idx + 0] as number;
const g0 = data[idx + 1] as number;
const b0 = data[idx + 2] as number;
const rm = master[r0] ?? r0;
const gm = master[g0] ?? g0;
const bm = master[b0] ?? b0;
data[idx + 0] = clamp(r[rm] ?? rm, 0, 255);
data[idx + 1] = clamp(g[gm] ?? gm, 0, 255);
data[idx + 2] = clamp(b[bm] ?? bm, 0, 255);
}
};

View File

@@ -19,12 +19,16 @@ import type {
CanvasEntityType,
CanvasInpaintMaskState,
CanvasMetadata,
ChannelName,
ChannelPoints,
ControlLoRAConfig,
EntityMovedByPayload,
FillStyle,
FLUXReduxImageInfluence,
RasterLayerAdjustments,
RegionalGuidanceRefImageState,
RgbColor,
SimpleAdjustmentsConfig,
} from 'features/controlLayers/store/types';
import {
calculateNewSize,
@@ -78,10 +82,10 @@ import {
IMAGEN_ASPECT_RATIOS,
isChatGPT4oAspectRatioID,
isFluxKontextAspectRatioID,
isFLUXReduxConfig,
isGemini2_5AspectRatioID,
isImagenAspectRatioID,
isIPAdapterConfig,
isRegionalGuidanceFLUXReduxConfig,
isRegionalGuidanceIPAdapterConfig,
zCanvasState,
} from './types';
import {
@@ -95,7 +99,9 @@ import {
initialControlNet,
initialFLUXRedux,
initialIPAdapter,
initialRegionalGuidanceIPAdapter,
initialT2IAdapter,
makeDefaultRasterLayerAdjustments,
} from './util';
const slice = createSlice({
@@ -104,6 +110,96 @@ const slice = createSlice({
reducers: {
// undoable canvas state
//#region Raster layers
rasterLayerAdjustmentsSet: (
state,
action: PayloadAction<EntityIdentifierPayload<{ adjustments: RasterLayerAdjustments | null }, 'raster_layer'>>
) => {
const { entityIdentifier, adjustments } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer) {
return;
}
if (adjustments === null) {
delete layer.adjustments;
return;
}
if (!layer.adjustments) {
layer.adjustments = makeDefaultRasterLayerAdjustments(adjustments.mode ?? 'simple');
}
layer.adjustments = merge(layer.adjustments, adjustments);
},
rasterLayerAdjustmentsReset: (state, action: PayloadAction<EntityIdentifierPayload<void, 'raster_layer'>>) => {
const { entityIdentifier } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer?.adjustments) {
return;
}
layer.adjustments.simple = makeDefaultRasterLayerAdjustments('simple').simple;
layer.adjustments.curves = makeDefaultRasterLayerAdjustments('curves').curves;
},
rasterLayerAdjustmentsCancel: (state, action: PayloadAction<EntityIdentifierPayload<void, 'raster_layer'>>) => {
const { entityIdentifier } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer) {
return;
}
delete layer.adjustments;
},
rasterLayerAdjustmentsModeChanged: (
state,
action: PayloadAction<EntityIdentifierPayload<{ mode: 'simple' | 'curves' }, 'raster_layer'>>
) => {
const { entityIdentifier, mode } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer?.adjustments) {
return;
}
layer.adjustments.mode = mode;
},
rasterLayerAdjustmentsSimpleUpdated: (
state,
action: PayloadAction<EntityIdentifierPayload<{ simple: Partial<SimpleAdjustmentsConfig> }, 'raster_layer'>>
) => {
const { entityIdentifier, simple } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer?.adjustments) {
return;
}
layer.adjustments.simple = merge(layer.adjustments.simple, simple);
},
rasterLayerAdjustmentsCurvesUpdated: (
state,
action: PayloadAction<EntityIdentifierPayload<{ channel: ChannelName; points: ChannelPoints }, 'raster_layer'>>
) => {
const { entityIdentifier, channel, points } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer?.adjustments) {
return;
}
layer.adjustments.curves[channel] = points;
},
rasterLayerAdjustmentsEnabledToggled: (
state,
action: PayloadAction<EntityIdentifierPayload<void, 'raster_layer'>>
) => {
const { entityIdentifier } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer?.adjustments) {
return;
}
layer.adjustments.enabled = !layer.adjustments.enabled;
},
rasterLayerAdjustmentsCollapsedToggled: (
state,
action: PayloadAction<EntityIdentifierPayload<void, 'raster_layer'>>
) => {
const { entityIdentifier } = action.payload;
const layer = selectEntity(state, entityIdentifier);
if (!layer?.adjustments) {
return;
}
layer.adjustments.collapsed = !layer.adjustments.collapsed;
},
rasterLayerAdded: {
reducer: (
state,
@@ -709,7 +805,7 @@ const slice = createSlice({
if (!entity) {
return;
}
const config = { id: referenceImageId, config: deepClone(initialIPAdapter) };
const config = { id: referenceImageId, config: deepClone(initialRegionalGuidanceIPAdapter) };
merge(config, overrides);
entity.referenceImages.push(config);
},
@@ -752,7 +848,7 @@ const slice = createSlice({
if (!referenceImage) {
return;
}
if (!isIPAdapterConfig(referenceImage.config)) {
if (!isRegionalGuidanceIPAdapterConfig(referenceImage.config)) {
return;
}
@@ -769,7 +865,7 @@ const slice = createSlice({
if (!referenceImage) {
return;
}
if (!isIPAdapterConfig(referenceImage.config)) {
if (!isRegionalGuidanceIPAdapterConfig(referenceImage.config)) {
return;
}
referenceImage.config.beginEndStepPct = beginEndStepPct;
@@ -785,7 +881,7 @@ const slice = createSlice({
if (!referenceImage) {
return;
}
if (!isIPAdapterConfig(referenceImage.config)) {
if (!isRegionalGuidanceIPAdapterConfig(referenceImage.config)) {
return;
}
referenceImage.config.method = method;
@@ -804,7 +900,7 @@ const slice = createSlice({
if (!referenceImage) {
return;
}
if (!isFLUXReduxConfig(referenceImage.config)) {
if (!isRegionalGuidanceFLUXReduxConfig(referenceImage.config)) {
return;
}
@@ -833,7 +929,7 @@ const slice = createSlice({
return;
}
if (isIPAdapterConfig(referenceImage.config) && isFluxReduxModelConfig(modelConfig)) {
if (isRegionalGuidanceIPAdapterConfig(referenceImage.config) && isFluxReduxModelConfig(modelConfig)) {
// Switching from ip_adapter to flux_redux
referenceImage.config = {
...initialFLUXRedux,
@@ -843,7 +939,7 @@ const slice = createSlice({
return;
}
if (isFLUXReduxConfig(referenceImage.config) && isIPAdapterModelConfig(modelConfig)) {
if (isRegionalGuidanceFLUXReduxConfig(referenceImage.config) && isIPAdapterModelConfig(modelConfig)) {
// Switching from flux_redux to ip_adapter
referenceImage.config = {
...initialIPAdapter,
@@ -853,7 +949,7 @@ const slice = createSlice({
return;
}
if (isIPAdapterConfig(referenceImage.config)) {
if (isRegionalGuidanceIPAdapterConfig(referenceImage.config)) {
referenceImage.config.model = zModelIdentifierField.parse(modelConfig);
// Ensure that the IP Adapter model is compatible with the CLIP Vision model
@@ -876,7 +972,7 @@ const slice = createSlice({
if (!referenceImage) {
return;
}
if (!isIPAdapterConfig(referenceImage.config)) {
if (!isRegionalGuidanceIPAdapterConfig(referenceImage.config)) {
return;
}
referenceImage.config.clipVisionModel = clipVisionModel;
@@ -1658,6 +1754,15 @@ export const {
entityBrushLineAdded,
entityEraserLineAdded,
entityRectAdded,
// Raster layer adjustments
rasterLayerAdjustmentsSet,
rasterLayerAdjustmentsCancel,
rasterLayerAdjustmentsReset,
rasterLayerAdjustmentsModeChanged,
rasterLayerAdjustmentsEnabledToggled,
rasterLayerAdjustmentsCollapsedToggled,
rasterLayerAdjustmentsSimpleUpdated,
rasterLayerAdjustmentsCurvesUpdated,
entityDeleted,
entityArrangedForwardOne,
entityArrangedToFront,

View File

@@ -1,14 +1,24 @@
import { createSelector, createSlice, type PayloadAction } from '@reduxjs/toolkit';
import type { RootState } from 'app/store/store';
import type { SliceConfig } from 'app/store/types';
import type { NumericalParameterConfig } from 'app/types/invokeai';
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import { type LoRA, zLoRA } from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { DEFAULT_LORA_WEIGHT_CONFIG } from 'features/system/store/configSlice';
import type { LoRAModelConfig } from 'services/api/types';
import { v4 as uuidv4 } from 'uuid';
import z from 'zod';
export const DEFAULT_LORA_WEIGHT_CONFIG: NumericalParameterConfig = {
initial: 0.75,
sliderMin: -1,
sliderMax: 2,
numberInputMin: -10,
numberInputMax: 10,
fineStep: 0.01,
coarseStep: 0.05,
};
const zLoRAsState = z.object({
loras: z.array(zLoRA),
});

View File

@@ -19,6 +19,7 @@ import {
isFluxKontextAspectRatioID,
isGemini2_5AspectRatioID,
isImagenAspectRatioID,
MAX_POSITIVE_PROMPT_HISTORY,
zParamsState,
} from 'features/controlLayers/store/types';
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
@@ -192,6 +193,24 @@ const slice = createSlice({
positivePromptChanged: (state, action: PayloadAction<ParameterPositivePrompt>) => {
state.positivePrompt = action.payload;
},
positivePromptAddedToHistory: (state, action: PayloadAction<ParameterPositivePrompt>) => {
const prompt = action.payload.trim();
if (prompt.length === 0) {
return;
}
state.positivePromptHistory = [prompt, ...state.positivePromptHistory.filter((p) => p !== prompt)];
if (state.positivePromptHistory.length > MAX_POSITIVE_PROMPT_HISTORY) {
state.positivePromptHistory = state.positivePromptHistory.slice(0, MAX_POSITIVE_PROMPT_HISTORY);
}
},
promptRemovedFromHistory: (state, action: PayloadAction<string>) => {
state.positivePromptHistory = state.positivePromptHistory.filter((p) => p !== action.payload);
},
promptHistoryCleared: (state) => {
state.positivePromptHistory = [];
},
negativePromptChanged: (state, action: PayloadAction<ParameterNegativePrompt>) => {
state.negativePrompt = action.payload;
},
@@ -462,6 +481,9 @@ export const {
setClipSkip,
shouldUseCpuNoiseChanged,
positivePromptChanged,
positivePromptAddedToHistory,
promptRemovedFromHistory,
promptHistoryCleared,
negativePromptChanged,
refinerModelChanged,
setRefinerSteps,
@@ -500,6 +522,12 @@ export const paramsSliceConfig: SliceConfig<typeof slice> = {
state.dimensions.height = state.dimensions.rect.height;
}
if (state._version === 1) {
// v1 -> v2, add positive prompt history
state._version = 2;
state.positivePromptHistory = [];
}
return zParamsState.parse(state);
},
},
@@ -600,6 +628,7 @@ export const selectShouldUseCPUNoise = createParamsSelector((params) => params.s
export const selectUpscaleScheduler = createParamsSelector((params) => params.upscaleScheduler);
export const selectUpscaleCfgScale = createParamsSelector((params) => params.upscaleCfgScale);
export const selectPositivePromptHistory = createParamsSelector((params) => params.positivePromptHistory);
export const selectRefinerCFGScale = createParamsSelector((params) => params.refinerCFGScale);
export const selectRefinerModel = createParamsSelector((params) => params.refinerModel);
export const selectIsRefinerModelSelected = createParamsSelector((params) => Boolean(params.refinerModel));

View File

@@ -6,13 +6,16 @@ import type { RootState } from 'app/store/store';
import type { SliceConfig } from 'app/store/types';
import { clamp } from 'es-toolkit/compat';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { FLUXReduxImageInfluence, RefImagesState } from 'features/controlLayers/store/types';
import type {
CroppableImageWithDims,
FLUXReduxImageInfluence,
RefImagesState,
} from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
import type {
ChatGPT4oModelConfig,
FLUXKontextModelConfig,
FLUXReduxModelConfig,
ImageDTO,
IPAdapterModelConfig,
} from 'services/api/types';
import { assert } from 'tsafe';
@@ -22,7 +25,6 @@ import type { CLIPVisionModelV2, IPMethodV2, RefImageState } from './types';
import { getInitialRefImagesState, isFLUXReduxConfig, isIPAdapterConfig, zRefImagesState } from './types';
import {
getReferenceImageState,
imageDTOToImageWithDims,
initialChatGPT4oReferenceImage,
initialFluxKontextReferenceImage,
initialFLUXRedux,
@@ -65,13 +67,13 @@ const slice = createSlice({
state.entities.push(...entities);
}
},
refImageImageChanged: (state, action: PayloadActionWithId<{ imageDTO: ImageDTO | null }>) => {
const { id, imageDTO } = action.payload;
refImageImageChanged: (state, action: PayloadActionWithId<{ croppableImage: CroppableImageWithDims | null }>) => {
const { id, croppableImage } = action.payload;
const entity = selectRefImageEntity(state, id);
if (!entity) {
return;
}
entity.config.image = imageDTO ? imageDTOToImageWithDims(imageDTO) : null;
entity.config.image = croppableImage;
},
refImageIPAdapterMethodChanged: (state, action: PayloadActionWithId<{ method: IPMethodV2 }>) => {
const { id, method } = action.payload;

View File

@@ -37,6 +37,45 @@ export const zImageWithDims = z.object({
});
export type ImageWithDims = z.infer<typeof zImageWithDims>;
const zCropBox = z.object({
x: z.number().min(0),
y: z.number().min(0),
width: z.number().positive(),
height: z.number().positive(),
});
// This new schema is an extension of zImageWithDims, with an optional crop field.
//
// When we added cropping support to certain entities (e.g. Ref Images, video Starting Frame Image), we changed
// their schemas from using zImageWithDims to this new schema. To support loading pre-existing entities that
// were created before cropping was supported, we can use zod's preprocess to transform old data into the new format.
// Its essentially a data migration step.
//
// This parsing happens currently in two places:
// - Recalling metadata.
// - Loading/rehydrating persisted client state from storage.
export const zCroppableImageWithDims = z.preprocess(
(val) => {
try {
const imageWithDims = zImageWithDims.parse(val);
const migrated = { original: { image: deepClone(imageWithDims) } };
return migrated;
} catch {
return val;
}
},
z.object({
original: z.object({ image: zImageWithDims }),
crop: z
.object({
box: zCropBox,
ratio: z.number().gt(0).nullable(),
image: zImageWithDims,
})
.optional(),
})
);
export type CroppableImageWithDims = z.infer<typeof zCroppableImageWithDims>;
const zImageWithDimsDataURL = z.object({
dataURL: z.string(),
width: z.number().int().positive(),
@@ -116,6 +155,9 @@ export type SAMPointLabel = z.infer<typeof zSAMPointLabel>;
export const zSAMPointLabelString = z.enum(['background', 'neutral', 'foreground']);
export type SAMPointLabelString = z.infer<typeof zSAMPointLabelString>;
export const zSAMModel = z.enum(['SAM1', 'SAM2']);
export type SAMModel = z.infer<typeof zSAMModel>;
/**
* A mapping of SAM point labels (as numbers) to their string representations.
*/
@@ -232,7 +274,7 @@ export type CanvasObjectState = z.infer<typeof zCanvasObjectState>;
const zIPAdapterConfig = z.object({
type: z.literal('ip_adapter'),
image: zImageWithDims.nullable(),
image: zCroppableImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
weight: z.number().gte(-1).lte(2),
beginEndStepPct: zBeginEndStepPct,
@@ -241,21 +283,39 @@ const zIPAdapterConfig = z.object({
});
export type IPAdapterConfig = z.infer<typeof zIPAdapterConfig>;
const zRegionalGuidanceIPAdapterConfig = z.object({
type: z.literal('ip_adapter'),
image: zImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
weight: z.number().gte(-1).lte(2),
beginEndStepPct: zBeginEndStepPct,
method: zIPMethodV2,
clipVisionModel: zCLIPVisionModelV2,
});
export type RegionalGuidanceIPAdapterConfig = z.infer<typeof zRegionalGuidanceIPAdapterConfig>;
const zFLUXReduxImageInfluence = z.enum(['lowest', 'low', 'medium', 'high', 'highest']);
export const isFLUXReduxImageInfluence = (v: unknown): v is FLUXReduxImageInfluence =>
zFLUXReduxImageInfluence.safeParse(v).success;
export type FLUXReduxImageInfluence = z.infer<typeof zFLUXReduxImageInfluence>;
const zFLUXReduxConfig = z.object({
type: z.literal('flux_redux'),
image: zImageWithDims.nullable(),
image: zCroppableImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
imageInfluence: zFLUXReduxImageInfluence.default('highest'),
});
export type FLUXReduxConfig = z.infer<typeof zFLUXReduxConfig>;
const zRegionalGuidanceFLUXReduxConfig = z.object({
type: z.literal('flux_redux'),
image: zImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
imageInfluence: zFLUXReduxImageInfluence.default('highest'),
});
type RegionalGuidanceFLUXReduxConfig = z.infer<typeof zRegionalGuidanceFLUXReduxConfig>;
const zChatGPT4oReferenceImageConfig = z.object({
type: z.literal('chatgpt_4o_reference_image'),
image: zImageWithDims.nullable(),
image: zCroppableImageWithDims.nullable(),
/**
* TODO(psyche): Technically there is no model for ChatGPT 4o reference images - it's just a field in the API call.
* But we use a model drop down to switch between different ref image types, so there needs to be a model here else
@@ -267,14 +327,14 @@ export type ChatGPT4oReferenceImageConfig = z.infer<typeof zChatGPT4oReferenceIm
const zGemini2_5ReferenceImageConfig = z.object({
type: z.literal('gemini_2_5_reference_image'),
image: zImageWithDims.nullable(),
image: zCroppableImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
});
export type Gemini2_5ReferenceImageConfig = z.infer<typeof zGemini2_5ReferenceImageConfig>;
const zFluxKontextReferenceImageConfig = z.object({
type: z.literal('flux_kontext_reference_image'),
image: zImageWithDims.nullable(),
image: zCroppableImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
});
export type FluxKontextReferenceImageConfig = z.infer<typeof zFluxKontextReferenceImageConfig>;
@@ -304,6 +364,7 @@ export const isIPAdapterConfig = (config: RefImageState['config']): config is IP
export const isFLUXReduxConfig = (config: RefImageState['config']): config is FLUXReduxConfig =>
config.type === 'flux_redux';
export const isChatGPT4oReferenceImageConfig = (
config: RefImageState['config']
): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image';
@@ -323,10 +384,18 @@ const zFill = z.object({ style: zFillStyle, color: zRgbColor });
const zRegionalGuidanceRefImageState = z.object({
id: zId,
config: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig]),
config: z.discriminatedUnion('type', [zRegionalGuidanceIPAdapterConfig, zRegionalGuidanceFLUXReduxConfig]),
});
export type RegionalGuidanceRefImageState = z.infer<typeof zRegionalGuidanceRefImageState>;
export const isRegionalGuidanceIPAdapterConfig = (
config: RegionalGuidanceRefImageState['config']
): config is RegionalGuidanceIPAdapterConfig => config.type === 'ip_adapter';
export const isRegionalGuidanceFLUXReduxConfig = (
config: RegionalGuidanceRefImageState['config']
): config is RegionalGuidanceFLUXReduxConfig => config.type === 'flux_redux';
const zCanvasRegionalGuidanceState = zCanvasEntityBase.extend({
type: z.literal('regional_guidance'),
position: zCoordinate,
@@ -375,11 +444,57 @@ const zControlLoRAConfig = z.object({
});
export type ControlLoRAConfig = z.infer<typeof zControlLoRAConfig>;
/**
* All simple params normalized to `[-1, 1]` except sharpness `[0, 1]`.
*
* - Brightness: -1 (darken) to 1 (brighten)
* - Contrast: -1 (decrease contrast) to 1 (increase contrast)
* - Saturation: -1 (desaturate) to 1 (saturate)
* - Temperature: -1 (cooler/blue) to 1 (warmer/yellow)
* - Tint: -1 (greener) to 1 (more magenta)
* - Sharpness: 0 (no sharpening) to 1 (maximum sharpening)
*/
export const zSimpleAdjustmentsConfig = z.object({
brightness: z.number().gte(-1).lte(1),
contrast: z.number().gte(-1).lte(1),
saturation: z.number().gte(-1).lte(1),
temperature: z.number().gte(-1).lte(1),
tint: z.number().gte(-1).lte(1),
sharpness: z.number().gte(0).lte(1),
});
export type SimpleAdjustmentsConfig = z.infer<typeof zSimpleAdjustmentsConfig>;
const zUint8 = z.number().int().min(0).max(255);
const zChannelPoints = z.array(z.tuple([zUint8, zUint8])).min(2);
const zChannelName = z.enum(['master', 'r', 'g', 'b']);
const zCurvesAdjustmentsConfig = z.record(zChannelName, zChannelPoints);
export type ChannelName = z.infer<typeof zChannelName>;
export type ChannelPoints = z.infer<typeof zChannelPoints>;
export type CurvesAdjustmentsConfig = z.infer<typeof zCurvesAdjustmentsConfig>;
/**
* The curves adjustments are stored as LUTs in the Konva node attributes. Konva will use these values when applying
* the filter.
*/
export const zCurvesAdjustmentsLUTs = z.record(zChannelName, z.array(zUint8));
const zRasterLayerAdjustments = z.object({
version: z.literal(1),
enabled: z.boolean(),
collapsed: z.boolean(),
mode: z.enum(['simple', 'curves']),
simple: zSimpleAdjustmentsConfig,
curves: zCurvesAdjustmentsConfig,
});
export type RasterLayerAdjustments = z.infer<typeof zRasterLayerAdjustments>;
const zCanvasRasterLayerState = zCanvasEntityBase.extend({
type: z.literal('raster_layer'),
position: zCoordinate,
opacity: zOpacity,
objects: z.array(zCanvasObjectState),
// Optional per-layer color adjustments (simple + curves). When undefined, no adjustments are applied.
adjustments: zRasterLayerAdjustments.optional(),
});
export type CanvasRasterLayerState = z.infer<typeof zCanvasRasterLayerState>;
@@ -421,7 +536,7 @@ export const zLoRA = z.object({
id: z.string(),
isEnabled: z.boolean(),
model: zModelIdentifierField,
weight: z.number().gte(-1).lte(2),
weight: z.number().gte(-10).lte(10),
});
export type LoRA = z.infer<typeof zLoRA>;
@@ -563,8 +678,13 @@ const zDimensionsState = z.object({
aspectRatio: zAspectRatioConfig,
});
export const MAX_POSITIVE_PROMPT_HISTORY = 100;
const zPositivePromptHistory = z
.array(zParameterPositivePrompt)
.transform((arr) => arr.slice(0, MAX_POSITIVE_PROMPT_HISTORY));
export const zParamsState = z.object({
_version: z.literal(1),
_version: z.literal(2),
maskBlur: z.number(),
maskBlurMethod: zParameterMaskBlurMethod,
canvasCoherenceMode: zParameterCanvasCoherenceMode,
@@ -595,6 +715,7 @@ export const zParamsState = z.object({
clipSkip: z.number(),
shouldUseCpuNoise: z.boolean(),
positivePrompt: zParameterPositivePrompt,
positivePromptHistory: zPositivePromptHistory,
negativePrompt: zParameterNegativePrompt,
refinerModel: zParameterSDXLRefinerModel.nullable(),
refinerSteps: z.number(),
@@ -612,7 +733,7 @@ export const zParamsState = z.object({
});
export type ParamsState = z.infer<typeof zParamsState>;
export const getInitialParamsState = (): ParamsState => ({
_version: 1,
_version: 2,
maskBlur: 16,
maskBlurMethod: 'box',
canvasCoherenceMode: 'Gaussian Blur',
@@ -643,6 +764,7 @@ export const getInitialParamsState = (): ParamsState => ({
clipSkip: 0,
shouldUseCpuNoise: true,
positivePrompt: '',
positivePromptHistory: [],
negativePrompt: null,
refinerModel: null,
refinerSteps: 20,

View File

@@ -10,12 +10,15 @@ import type {
ChatGPT4oReferenceImageConfig,
ControlLoRAConfig,
ControlNetConfig,
CroppableImageWithDims,
FluxKontextReferenceImageConfig,
FLUXReduxConfig,
Gemini2_5ReferenceImageConfig,
ImageWithDims,
IPAdapterConfig,
RasterLayerAdjustments,
RefImageState,
RegionalGuidanceIPAdapterConfig,
RgbColor,
T2IAdapterConfig,
} from 'features/controlLayers/store/types';
@@ -44,6 +47,21 @@ export const imageDTOToImageWithDims = ({ image_name, width, height }: ImageDTO)
height,
});
export const imageDTOToCroppableImage = (
originalImageDTO: ImageDTO,
crop?: CroppableImageWithDims['crop']
): CroppableImageWithDims => {
const { image_name, width, height } = originalImageDTO;
const val: CroppableImageWithDims = {
original: { image: { image_name, width, height } },
};
if (crop) {
val.crop = deepClone(crop);
}
return val;
};
export const imageDTOToImageField = ({ image_name }: ImageDTO): ImageField => ({ image_name });
const DEFAULT_RG_MASK_FILL_COLORS: RgbColor[] = [
@@ -78,6 +96,15 @@ export const initialIPAdapter: IPAdapterConfig = {
clipVisionModel: 'ViT-H',
weight: 1,
};
export const initialRegionalGuidanceIPAdapter: RegionalGuidanceIPAdapterConfig = {
type: 'ip_adapter',
image: null,
model: null,
beginEndStepPct: [0, 1],
method: 'full',
clipVisionModel: 'ViT-H',
weight: 1,
};
export const initialFLUXRedux: FLUXReduxConfig = {
type: 'flux_redux',
image: null,
@@ -118,6 +145,32 @@ export const initialControlLoRA: ControlLoRAConfig = {
weight: 0.75,
};
export const makeDefaultRasterLayerAdjustments = (mode: 'simple' | 'curves' = 'simple'): RasterLayerAdjustments => ({
version: 1,
enabled: true,
collapsed: false,
mode,
simple: { brightness: 0, contrast: 0, saturation: 0, temperature: 0, tint: 0, sharpness: 0 },
curves: {
master: [
[0, 0],
[255, 255],
],
r: [
[0, 0],
[255, 255],
],
g: [
[0, 0],
[255, 255],
],
b: [
[0, 0],
[255, 255],
],
},
});
export const getReferenceImageState = (id: string, overrides?: PartialDeep<RefImageState>): RefImageState => {
const entityState: RefImageState = {
id,
@@ -187,6 +240,7 @@ export const getRasterLayerState = (
objects: [],
opacity: 1,
position: { x: 0, y: 0 },
adjustments: undefined,
};
merge(entityState, overrides);
return entityState;

View File

@@ -0,0 +1,215 @@
import {
Button,
ButtonGroup,
Divider,
Flex,
FormControl,
FormLabel,
Select,
Spacer,
Text,
} from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import type { AspectRatioID } from 'features/controlLayers/store/types';
import { ASPECT_RATIO_MAP, isAspectRatioID } from 'features/controlLayers/store/types';
import type { CropBox } from 'features/cropper/lib/editor';
import { cropImageModalApi, type CropImageModalState } from 'features/cropper/store';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
import React, { memo, useCallback, useEffect, useRef, useState } from 'react';
import { useUploadImageMutation } from 'services/api/endpoints/images';
import { objectEntries } from 'tsafe';
type Props = {
editor: CropImageModalState['editor'];
onApplyCrop: CropImageModalState['onApplyCrop'];
onReady: CropImageModalState['onReady'];
};
const getAspectRatioString = (ratio: number | null): AspectRatioID => {
if (!ratio) {
return 'Free';
}
const entries = objectEntries(ASPECT_RATIO_MAP);
for (const [key, value] of entries) {
if (value.ratio === ratio) {
return key;
}
}
return 'Free';
};
export const CropImageEditor = memo(({ editor, onApplyCrop, onReady }: Props) => {
const containerRef = useRef<HTMLDivElement>(null);
const [zoom, setZoom] = useState(100);
const [cropBox, setCropBox] = useState<CropBox | null>(null);
const [aspectRatio, setAspectRatio] = useState<string>('free');
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
const [uploadImage] = useUploadImageMutation({ fixedCacheKey: 'editorContainer' });
const setup = useCallback(
async (container: HTMLDivElement) => {
editor.init(container);
editor.onZoomChange((zoom) => {
setZoom(zoom);
});
editor.onCropBoxChange((crop) => {
setCropBox(crop);
});
editor.onAspectRatioChange((ratio) => {
setAspectRatio(getAspectRatioString(ratio));
});
await onReady();
editor.fitToContainer();
},
[editor, onReady]
);
useEffect(() => {
const container = containerRef.current;
if (!container) {
return;
}
setup(container);
const handleResize = () => {
editor.resize(container.clientWidth, container.clientHeight);
};
const resizeObserver = new ResizeObserver(handleResize);
resizeObserver.observe(container);
return () => {
resizeObserver.disconnect();
};
}, [editor, setup]);
const handleAspectRatioChange = useCallback(
(e: React.ChangeEvent<HTMLSelectElement>) => {
const newRatio = e.target.value;
if (!isAspectRatioID(newRatio)) {
return;
}
setAspectRatio(newRatio);
if (newRatio === 'Free') {
editor.setCropAspectRatio(null);
} else {
editor.setCropAspectRatio(ASPECT_RATIO_MAP[newRatio]?.ratio ?? null);
}
},
[editor]
);
const handleResetCrop = useCallback(() => {
editor.resetCrop();
}, [editor]);
const handleApplyCrop = useCallback(async () => {
await onApplyCrop();
cropImageModalApi.close();
}, [onApplyCrop]);
const handleCancelCrop = useCallback(() => {
cropImageModalApi.close();
}, []);
const handleExport = useCallback(async () => {
try {
const blob = await editor.exportImage('blob');
const file = new File([blob], 'image.png', { type: 'image/png' });
await uploadImage({
file,
is_intermediate: false,
image_category: 'user',
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
}).unwrap();
} catch (err) {
if (err instanceof Error && err.message.includes('tainted')) {
alert(
'Cannot export image: The image is from a different domain (CORS issue). To fix this:\n\n1. Load images from the same domain\n2. Use images from CORS-enabled sources\n3. Upload a local image file instead'
);
} else {
alert(`Export failed: ${err instanceof Error ? err.message : String(err)}`);
}
}
}, [autoAddBoardId, editor, uploadImage]);
const zoomIn = useCallback(() => {
editor.zoomIn();
}, [editor]);
const zoomOut = useCallback(() => {
editor.zoomOut();
}, [editor]);
const fitToContainer = useCallback(() => {
editor.fitToContainer();
}, [editor]);
const resetView = useCallback(() => {
editor.resetView();
}, [editor]);
return (
<Flex w="full" h="full" flexDir="column" gap={4}>
<Flex gap={2} alignItems="center">
<FormControl flex={1}>
<FormLabel>Aspect Ratio:</FormLabel>
<Select size="sm" value={aspectRatio} onChange={handleAspectRatioChange} w={32}>
<option value="Free">Free</option>
<option value="16:9">16:9</option>
<option value="3:2">3:2</option>
<option value="4:3">4:3</option>
<option value="1:1">1:1</option>
<option value="3:4">3:4</option>
<option value="2:3">2:3</option>
<option value="9:16">9:16</option>
</Select>
</FormControl>
<Spacer />
<ButtonGroup size="sm" isAttached={false}>
<Button onClick={fitToContainer}>Fit View</Button>
<Button onClick={resetView}>Reset View</Button>
<Button onClick={zoomIn}>Zoom In</Button>
<Button onClick={zoomOut}>Zoom Out</Button>
</ButtonGroup>
<Spacer />
<ButtonGroup size="sm" isAttached={false}>
<Button onClick={handleApplyCrop}>Apply</Button>
<Button onClick={handleResetCrop}>Reset</Button>
<Button onClick={handleCancelCrop}>Cancel</Button>
<Button onClick={handleExport}>Save to Assets</Button>
</ButtonGroup>
</Flex>
<Flex position="relative" w="full" h="full" bg="base.900">
<Flex position="absolute" inset={0} ref={containerRef} />
</Flex>
<Flex gap={2} color="base.300">
<Text>Mouse wheel: Zoom</Text>
<Divider orientation="vertical" />
<Text>Space + Drag: Pan</Text>
<Divider orientation="vertical" />
<Text>Drag crop box or handles to adjust</Text>
{cropBox && (
<>
<Divider orientation="vertical" />
<Text>
X: {Math.round(cropBox.x)}, Y: {Math.round(cropBox.y)}, Width: {Math.round(cropBox.width)}, Height:{' '}
{Math.round(cropBox.height)}
</Text>
</>
)}
<Spacer key="help-spacer" />
<Text key="help-zoom">Zoom: {Math.round(zoom * 100)}%</Text>
</Flex>
</Flex>
);
});
CropImageEditor.displayName = 'CropImageEditor';

View File

@@ -0,0 +1,29 @@
import { Modal, ModalBody, ModalContent, ModalHeader, ModalOverlay } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { cropImageModalApi } from 'features/cropper/store';
import { memo } from 'react';
import { CropImageEditor } from './CropImageEditor';
export const CropImageModal = memo(() => {
const state = useStore(cropImageModalApi.$state);
if (!state) {
return null;
}
return (
// This modal is always open when this component is rendered
<Modal isOpen={true} onClose={cropImageModalApi.close} isCentered useInert={false} size="full">
<ModalOverlay />
<ModalContent minH="unset" minW="unset" maxH="90vh" maxW="90vw" w="full" h="full" borderRadius="base">
<ModalHeader>Crop Image</ModalHeader>
<ModalBody px={4} pb={4} pt={0}>
<CropImageEditor editor={state.editor} onApplyCrop={state.onApplyCrop} onReady={state.onReady} />
</ModalBody>
</ModalContent>
</Modal>
);
});
CropImageModal.displayName = 'CropImageModal';

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
import type { Editor } from 'features/cropper/lib/editor';
import { atom } from 'nanostores';
export type CropImageModalState = {
editor: Editor;
onApplyCrop: () => Promise<void> | void;
onReady: () => Promise<void> | void;
};
const $state = atom<CropImageModalState | null>(null);
const open = (state: CropImageModalState) => {
$state.set(state);
};
const close = () => {
const state = $state.get();
state?.editor.destroy();
$state.set(null);
};
export const cropImageModalApi = {
$state,
open,
close,
};

View File

@@ -236,8 +236,11 @@ const deleteControlLayerImages = (state: RootState, dispatch: AppDispatch, image
const deleteReferenceImages = (state: RootState, dispatch: AppDispatch, image_name: string) => {
selectReferenceImageEntities(state).forEach((entity) => {
if (entity.config.image?.image_name === image_name) {
dispatch(refImageImageChanged({ id: entity.id, imageDTO: null }));
if (
entity.config.image?.original.image.image_name === image_name ||
entity.config.image?.crop?.image.image_name === image_name
) {
dispatch(refImageImageChanged({ id: entity.id, croppableImage: null }));
}
});
};
@@ -284,7 +287,10 @@ export const getImageUsage = (
const isUpscaleImage = upscale.upscaleInitialImage?.image_name === image_name;
const isReferenceImage = refImages.entities.some(({ config }) => config.image?.image_name === image_name);
const isReferenceImage = refImages.entities.some(
({ config }) =>
config.image?.original.image.image_name === image_name || config.image?.crop?.image.image_name === image_name
);
const isRasterLayerImage = canvas.rasterLayers.entities.some(({ objects }) =>
objects.some((obj) => obj.type === 'image' && 'image_name' in obj.image && obj.image.image_name === image_name)

View File

@@ -3,7 +3,7 @@ import { IconButton } from '@invoke-ai/ui-library';
import type { MouseEvent } from 'react';
import { memo } from 'react';
const sx: SystemStyleObject = {
export const imageButtonSx: SystemStyleObject = {
minW: 0,
svg: {
transitionProperty: 'common',
@@ -31,7 +31,7 @@ export const DndImageIcon = memo((props: Props) => {
aria-label={tooltip}
icon={icon}
variant="link"
sx={sx}
sx={imageButtonSx}
data-testid={tooltip}
{...rest}
/>

View File

@@ -4,7 +4,7 @@ import { getDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerH
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { refImageAdded } from 'features/controlLayers/store/refImagesSlice';
import type { CanvasEntityIdentifier, CanvasEntityType } from 'features/controlLayers/store/types';
import { imageDTOToImageWithDims } from 'features/controlLayers/store/util';
import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
import { selectComparisonImages } from 'features/gallery/components/ImageViewer/common';
import type { BoardId } from 'features/gallery/store/types';
import {
@@ -211,7 +211,7 @@ export const addGlobalReferenceImageDndTarget: DndTarget<
handler: ({ sourceData, dispatch, getState }) => {
const { imageDTO } = sourceData.payload;
const config = getDefaultRefImageConfig(getState);
config.image = imageDTOToImageWithDims(imageDTO);
config.image = imageDTOToCroppableImage(imageDTO);
dispatch(refImageAdded({ overrides: { config } }));
},
};
@@ -641,7 +641,7 @@ export const videoFrameFromImageDndTarget: DndTarget<VideoFrameFromImageDndTarge
},
handler: ({ sourceData, dispatch }) => {
const { imageDTO } = sourceData.payload;
dispatch(startingFrameImageChanged(imageDTOToImageWithDims(imageDTO)));
dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO)));
},
};
//#endregion

View File

@@ -1,4 +1,5 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
import { startingFrameImageChanged } from 'features/parameters/store/videoSlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
@@ -13,7 +14,7 @@ export const ContextMenuItemSendToVideo = memo(() => {
const dispatch = useDispatch();
const onClick = useCallback(() => {
dispatch(startingFrameImageChanged(imageDTO));
dispatch(startingFrameImageChanged(imageDTOToCroppableImage(imageDTO)));
navigationApi.switchToTab('video');
}, [imageDTO, dispatch]);

View File

@@ -2,7 +2,7 @@ import { MenuItem } from '@invoke-ai/ui-library';
import { useAppStore } from 'app/store/storeHooks';
import { getDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
import { refImageAdded } from 'features/controlLayers/store/refImagesSlice';
import { imageDTOToImageWithDims } from 'features/controlLayers/store/util';
import { imageDTOToCroppableImage } from 'features/controlLayers/store/util';
import { useItemDTOContextImageOnly } from 'features/gallery/contexts/ItemDTOContext';
import { toast } from 'features/toast/toast';
import { memo, useCallback } from 'react';
@@ -17,7 +17,7 @@ export const ContextMenuItemUseAsRefImage = memo(() => {
const onClickNewGlobalReferenceImageFromImage = useCallback(() => {
const { dispatch, getState } = store;
const config = getDefaultRefImageConfig(getState);
config.image = imageDTOToImageWithDims(imageDTO);
config.image = imageDTOToCroppableImage(imageDTO);
dispatch(refImageAdded({ overrides: { config } }));
toast({
id: 'SENT_TO_CANVAS',

View File

@@ -26,7 +26,12 @@ import type {
CanvasRasterLayerState,
CanvasRegionalGuidanceState,
} from 'features/controlLayers/store/types';
import { imageDTOToImageObject, imageDTOToImageWithDims, initialControlNet } from 'features/controlLayers/store/util';
import {
imageDTOToCroppableImage,
imageDTOToImageObject,
imageDTOToImageWithDims,
initialControlNet,
} from 'features/controlLayers/store/util';
import { calculateNewSize } from 'features/controlLayers/util/getScaledBoundingBoxDimensions';
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
import type { BoardId } from 'features/gallery/store/types';
@@ -44,7 +49,7 @@ import { assert } from 'tsafe';
export const setGlobalReferenceImage = (arg: { imageDTO: ImageDTO; id: string; dispatch: AppDispatch }) => {
const { imageDTO, id, dispatch } = arg;
dispatch(refImageImageChanged({ id, imageDTO }));
dispatch(refImageImageChanged({ id, croppableImage: imageDTOToCroppableImage(imageDTO) }));
};
export const setRegionalGuidanceReferenceImage = (arg: {

View File

@@ -13,16 +13,18 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import {
buildSelectLoRA,
DEFAULT_LORA_WEIGHT_CONFIG,
loraDeleted,
loraIsEnabledChanged,
loraWeightChanged,
} from 'features/controlLayers/store/lorasSlice';
import type { LoRA } from 'features/controlLayers/store/types';
import { DEFAULT_LORA_WEIGHT_CONFIG } from 'features/system/store/configSlice';
import { memo, useCallback, useMemo } from 'react';
import { PiTrashSimpleBold } from 'react-icons/pi';
import { useGetModelConfigQuery } from 'services/api/endpoints/models';
const MARKS = [-1, 0, 1, 2];
export const LoRACard = memo((props: { id: string }) => {
const selectLoRA = useMemo(() => buildSelectLoRA(props.id), [props.id]);
const lora = useAppSelector(selectLoRA);
@@ -81,7 +83,8 @@ const LoRAContent = memo(({ lora }: { lora: LoRA }) => {
min={DEFAULT_LORA_WEIGHT_CONFIG.sliderMin}
max={DEFAULT_LORA_WEIGHT_CONFIG.sliderMax}
step={DEFAULT_LORA_WEIGHT_CONFIG.coarseStep}
marks={DEFAULT_LORA_WEIGHT_CONFIG.marks.slice()}
fineStep={DEFAULT_LORA_WEIGHT_CONFIG.fineStep}
marks={MARKS}
defaultValue={DEFAULT_LORA_WEIGHT_CONFIG.initial}
isDisabled={!lora.isEnabled}
/>
@@ -91,6 +94,7 @@ const LoRAContent = memo(({ lora }: { lora: LoRA }) => {
min={DEFAULT_LORA_WEIGHT_CONFIG.numberInputMin}
max={DEFAULT_LORA_WEIGHT_CONFIG.numberInputMax}
step={DEFAULT_LORA_WEIGHT_CONFIG.coarseStep}
fineStep={DEFAULT_LORA_WEIGHT_CONFIG.fineStep}
w={20}
flexShrink={0}
defaultValue={DEFAULT_LORA_WEIGHT_CONFIG.initial}

Some files were not shown because too many files have changed in this diff Show More