Compare commits

...

624 Commits

Author SHA1 Message Date
psychedelicious
3707c3b034 fix(ui): do not bake opacity when rasterizing layer adjustments 2025-09-22 11:43:08 +10:00
Mary Hipp
5885db4ab5 ruff 2025-09-19 11:07:36 -04:00
Mary Hipp
36ed9b750d restore list_queue_items method 2025-09-19 11:07:36 -04:00
psychedelicious
3cec06f86e chore(ui): typegen 2025-09-19 22:13:12 +10:00
psychedelicious
28b5f7a1c5 feat(nodes): better deprecation handling for ui_type
- Move migration of model-specific ui_types into BaseInvocation. This
gives us access to the node and field names, so the warnings are more
useful to the end user.
- Ensure we serialize the fields' json_schema_extra with enum values.
This wasn't a problem until now, when it interferes with migrating
ui_type cleanly. It's a transparent change.
- Improve warnings when validating fields (which includes the ui_type
migration logic)
2025-09-19 22:13:12 +10:00
psychedelicious
22cbb23ae0 fix(ui): ref images for flux kontext & api models not parsed correctly 2025-09-19 21:40:17 +10:00
Riccardo Giovanetti
4d585e3eec translationBot(ui): update translation (Italian)
Currently translated at 98.4% (2130 of 2163 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.4% (2127 of 2161 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-18 14:01:31 +10:00
psychedelicious
006b4356bb chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
da947866f2 fix(nodes): ensure SD2 models are pickable in loader/cnet nodes 2025-09-18 12:39:27 +10:00
psychedelicious
84a2cc6fc9 chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
b50534bb49 revert(nodes): do not deprecate ui_type for output fields! only deprecate the model ui types 2025-09-18 12:39:27 +10:00
psychedelicious
c305e79fee tests(ui): update tests to reflect new model parsing logic 2025-09-18 12:39:27 +10:00
psychedelicious
c32949d113 tidy(nodes): mark all UIType.*ModelField as deprecated 2025-09-18 12:39:27 +10:00
psychedelicious
87a98902da tidy(nodes): remove unused UIType.Video 2025-09-18 12:39:27 +10:00
psychedelicious
2857a446c9 docs(nodes): update docstrings for InputField 2025-09-18 12:39:27 +10:00
psychedelicious
035d9432bd feat(ui): support filtering on model format 2025-09-18 12:39:27 +10:00
psychedelicious
bdeb9fb1cf chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
dadff57061 feat(nodes): add ui_model_format filter for nodes 2025-09-18 12:39:27 +10:00
psychedelicious
480857ae4e fix(nodes): add base to SD1 model loader 2025-09-18 12:39:27 +10:00
psychedelicious
eaf0624004 feat(ui): remove explicit model type handling from workflow editor 2025-09-18 12:39:27 +10:00
psychedelicious
58bca1b9f4 feat(nodes): use new ui_model_[base|type|variant] on all core nodes 2025-09-18 12:39:27 +10:00
psychedelicious
54aa6908fa feat(ui): update invocation parsing to handle new ui_model_[base|type|variant] attrs 2025-09-18 12:39:27 +10:00
psychedelicious
e6d9daca96 chore(ui): typegen 2025-09-18 12:39:27 +10:00
psychedelicious
6e5a529cb7 feat(nodes): add ui_model_[base|type|variant] to InputField args for dynamic UI generation 2025-09-18 12:39:27 +10:00
Iq1pl
8c742a6e38 ruff format 2025-09-18 11:05:32 +10:00
Iq1pl
693373f1c1 Update ip_adapter.py
added support for NOOB-IPA-MARK1
2025-09-18 11:05:32 +10:00
Josh Corbett
4809080fd9 fix(ui): allow scrolling in ModelPane 2025-09-18 10:33:22 +10:00
psychedelicious
efcb1bea7f chore: bump version to v6.8.0rc1 2025-09-17 13:57:43 +10:00
psychedelicious
e0d7a401f3 feat(ui): make ref images croppable 2025-09-17 13:43:13 +10:00
psychedelicious
aac979e9a4 fix(ui): issue w/ setting initial aspect ratio in cropper 2025-09-17 13:43:13 +10:00
psychedelicious
3b0d7f076d tidy(ui): rename from "editor" to "cropper", minor cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
e1acbcdbd5 fix(ui): store floats for box 2025-09-17 13:43:13 +10:00
psychedelicious
7d9b81550b feat(ui): revert to original image when crop discarded 2025-09-17 13:43:13 +10:00
psychedelicious
6a447dd1fe refactor(ui): remove "apply", "start" and "cancel" concepts from editor 2025-09-17 13:43:13 +10:00
psychedelicious
c2dc63ddbc fix(ui): video graphs 2025-09-17 13:43:13 +10:00
psychedelicious
1bc689d531 docs(ui): add comments to startingframeimage 2025-09-17 13:43:13 +10:00
psychedelicious
4829975827 feat(ui): make the editor components not care about the image 2025-09-17 13:43:13 +10:00
psychedelicious
49da4e00c3 feat(ui): add concept for editable image state 2025-09-17 13:43:13 +10:00
psychedelicious
89dfe5e729 docs(ui): add comments to editor 2025-09-17 13:43:13 +10:00
psychedelicious
6816d366df tidy(ui): editor misc 2025-09-17 13:43:13 +10:00
psychedelicious
9d3d2a36c9 tidy(ui): editor listeners 2025-09-17 13:43:13 +10:00
psychedelicious
ed231044c8 refactor(ui): simplify crop constraints 2025-09-17 13:43:13 +10:00
psychedelicious
b51a232794 feat(ui): extract config to own obj 2025-09-17 13:43:13 +10:00
psychedelicious
4412143a6e feat(ui): clean up editor 2025-09-17 13:43:13 +10:00
psychedelicious
de11cafdb3 refactor(ui): editor (wip) 2025-09-17 13:43:13 +10:00
psychedelicious
4d9114aa7d refactor(ui): editor (wip) 2025-09-17 13:43:13 +10:00
psychedelicious
67e2da1ebf refactor(ui): editor (wip) 2025-09-17 13:43:13 +10:00
psychedelicious
33ecc591c3 refactor(ui): editor init 2025-09-17 13:43:13 +10:00
psychedelicious
b57459a226 chore(ui): lint 2025-09-17 13:43:13 +10:00
psychedelicious
01282b1c90 feat(ui): do not clear crop when canceling 2025-09-17 13:43:13 +10:00
psychedelicious
3f302906dc feat(ui): crop doesn't hide outside cropped region 2025-09-17 13:43:13 +10:00
psychedelicious
81d56596fb tidy(ui): cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
b536b0df0c feat(ui): misc iterate on editor 2025-09-17 13:43:13 +10:00
psychedelicious
692af1d93d feat(ui): type narrowing for editor output types 2025-09-17 13:43:13 +10:00
psychedelicious
bb7ef77b50 tidy(ui): lint/react conventions for editor component 2025-09-17 13:43:13 +10:00
psychedelicious
1862548573 feat(ui): image editor bg checkerboard pattern 2025-09-17 13:43:13 +10:00
psychedelicious
242c1b6350 feat(ui): tweak editor konva styles 2025-09-17 13:43:13 +10:00
psychedelicious
fc6e4bb04e tidy(ui): editor component cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
20841abca6 tidy(ui): editor cleanup 2025-09-17 13:43:13 +10:00
psychedelicious
e8b69d99a4 chore(ui): lint 2025-09-17 13:43:13 +10:00
Mary Hipp
d6eaff8237 create editImageModal that takes an imageDTO, loads blob onto canvas, and allows cropping. cropped blob is uploaded as new asset 2025-09-17 13:43:13 +10:00
Mary Hipp
068b095956 show warning state with tooltip if starting frame image aspect ratio does not match the video output aspect ratio' 2025-09-17 13:43:13 +10:00
psychedelicious
f795a47340 tidy(ui): remove unused translation string 2025-09-16 15:04:03 +10:00
psychedelicious
df47345eb0 feat(ui): add translation strings for prompt history 2025-09-16 15:04:03 +10:00
psychedelicious
def04095a4 feat(ui): tweak prompt history styling 2025-09-16 15:04:03 +10:00
psychedelicious
28be8f0911 refactor(ui): simplify prompt history shortcuts 2025-09-16 15:04:03 +10:00
Kent Keirsey
b50c44bac0 handle potential for invalid list item 2025-09-16 15:04:03 +10:00
Kent Keirsey
b4ce0e02fc lint 2025-09-16 15:04:03 +10:00
Kent Keirsey
d6442d9a34 Prompt history shortcuts 2025-09-16 15:04:03 +10:00
Josh Corbett
4528bcafaf feat(model manager): add ModelFooter component and reusable ModelDeleteButton 2025-09-16 12:29:57 +10:00
Josh Corbett
8b82b81ee2 fix(ModelImage): change MODEL_IMAGE_THUMBNAIL_SIZE to a local constant 2025-09-16 12:29:57 +10:00
Josh Corbett
757acdd49e feat(model manager): 💄 update model manager ui, initial commit 2025-09-16 12:29:57 +10:00
psychedelicious
94b7cc583a fix(ui): do not reset params state on studio init nav to generate tab 2025-09-16 12:25:25 +10:00
psychedelicious
b663a6bac4 chore: bump version to v6.7.0 2025-09-15 14:37:56 +10:00
psychedelicious
65d40153fb chore(ui): update whatsnew 2025-09-15 14:37:56 +10:00
Riccardo Giovanetti
c8b741a514 translationBot(ui): update translation (Italian)
Currently translated at 98.4% (2120 of 2153 strings)

translationBot(ui): update translation (Italian)

Currently translated at 97.3% (2097 of 2153 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-15 14:25:41 +10:00
psychedelicious
6d3aeffed9 fix(ui): dedupe prompt history 2025-09-15 14:22:44 +10:00
psychedelicious
203be96910 fix(ui): render popovers in portals to ensure they are on top of other ui elements 2025-09-15 14:19:54 +10:00
psychedelicious
b0aa48ddb8 feat(ui): simple prompt history 2025-09-12 10:19:48 -04:00
psychedelicious
867dbe51e5 fix(ui): extend lora weight schema to accept full range of weights
This could cause a failure to rehydrate LoRA state, or failure to recall
a LoRA.

Closes #8551
2025-09-12 11:50:10 +10:00
psychedelicious
ff8948b6f1 chore(ui): update whatsnew 2025-09-11 18:09:31 +10:00
psychedelicious
fa3a6425a6 tests(ui): update staging area test to reflect new behaviour 2025-09-11 18:09:31 +10:00
psychedelicious
c5992ece89 fix(ui): better logic in staging area when canceling the selected item 2025-09-11 18:09:31 +10:00
psychedelicious
12a6239929 chore: bump version to v6.7.0rc1 2025-09-11 18:09:31 +10:00
Riccardo Giovanetti
e9238c59f4 translationBot(ui): update translation (Italian)
Currently translated at 96.5% (2053 of 2127 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-11 17:42:41 +10:00
Linos
c1cbbe51d6 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2127 of 2127 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-09-11 17:42:41 +10:00
Hosted Weblate
4219b4a288 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

translationBot(ui): update translation files

Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-09-11 17:42:41 +10:00
psychedelicious
48c8a9c09d chore(ui): lint 2025-09-11 17:25:57 +10:00
psychedelicious
a67efdf4ad perf(ui): optimize curves graph component
Do not use whole layer as trigger for histo recalc; use the canvas cache
of the layer - it more reliably indicates when the layer pixel data has
changed, and fixes an issue where we can miss the first histo calc due
to race conditiong with async layer bbox calculation.
2025-09-11 17:25:57 +10:00
psychedelicious
d6ff9c2e49 tidy(ui): split curves graph into own component 2025-09-11 17:25:57 +10:00
psychedelicious
e768a3bc7b perf(ui): use narrow selectors in adjustments to reduce rerenders
dramatically improves the feel of the sliders
2025-09-11 17:25:57 +10:00
psychedelicious
7273700f61 fix(ui): sharpness range 2025-09-11 17:25:57 +10:00
psychedelicious
f909e81d91 feat(ui): better types & runtime guarantees for filter data stored in konva node attrs 2025-09-11 17:25:57 +10:00
psychedelicious
8c85f168f6 refactor(ui): make layer adjustments schemas/types composable 2025-09-11 17:25:57 +10:00
psychedelicious
263d86d46f fix(ui): points where x=255 sorted incorrectly 2025-09-11 17:25:57 +10:00
psychedelicious
0921805160 feat(ui): tweak adjustments panel styling 2025-09-11 17:25:57 +10:00
psychedelicious
517f4811e7 feat(ui): single action to reset adjustments 2025-09-11 17:25:57 +10:00
psychedelicious
0dc73c8803 tidy(ui): move some histogram drawing logic out of components and into calblacks 2025-09-11 17:25:57 +10:00
psychedelicious
26702b54c0 feat(ui): tweak layouts, use react conventions, disabled state 2025-09-11 17:25:57 +10:00
dunkeroni
2d65e4543f minor padding changes 2025-09-11 17:25:57 +10:00
dunkeroni
309113956b remove unknown type annotations 2025-09-11 17:25:57 +10:00
dunkeroni
0ac4099bc6 allow negative sharpness to soften 2025-09-11 17:25:57 +10:00
dunkeroni
899dc739fa defaultValue on adjusters 2025-09-11 17:25:57 +10:00
dunkeroni
4e2439fc8e remove extra edit comments 2025-09-11 17:25:57 +10:00
dunkeroni
00864c24e0 layout fixes 2025-09-11 17:25:57 +10:00
dunkeroni
b73aaa7d6f fix several points of curve editor jank 2025-09-11 17:25:57 +10:00
dunkeroni
85057ae704 splitup adjustment panel objects 2025-09-11 17:25:57 +10:00
dunkeroni
c3fb3a43a2 blue mode switch indicator 2025-09-11 17:25:57 +10:00
dunkeroni
51d0a15a1b use default factory on reset 2025-09-11 17:25:57 +10:00
dunkeroni
5991067fd9 simplify adjustments type to optional not null 2025-09-11 17:25:57 +10:00
dunkeroni
32c2d3f740 remove extra casts and types from filters.ts 2025-09-11 17:25:57 +10:00
dunkeroni
c661f86b34 fix: crop to bbox doubles adjustment filters 2025-09-11 17:25:57 +10:00
dunkeroni
cc72d8eab4 curves editor syntax and structure fixes 2025-09-11 17:25:57 +10:00
dunkeroni
e8550f9355 move constants in curves editor 2025-09-11 17:25:57 +10:00
dunkeroni
a1d0386ca4 move memoized slider to component 2025-09-11 17:25:57 +10:00
dunkeroni
495d089f85 clean up right click menu 2025-09-11 17:25:57 +10:00
dunkeroni
913b91e9dd remove redundant en.json colors 2025-09-11 17:25:57 +10:00
dunkeroni
3e907f4e14 remove extra title 2025-09-11 17:25:57 +10:00
dunkeroni
756df6ebe4 Finish button on adjustments 2025-09-11 17:25:57 +10:00
dunkeroni
2a6be99152 Fix tint not shifting green in negative direction 2025-09-11 17:25:57 +10:00
dunkeroni
3099e2bf9d fix disable toggle reverts to simple view 2025-09-11 17:25:57 +10:00
dunkeroni
6921f0412a log scale and panel width compatibility 2025-09-11 17:25:57 +10:00
dunkeroni
022d5a8863 curves editor 2025-09-11 17:25:57 +10:00
dunkeroni
af99beedc5 apply filters to operations 2025-09-11 17:25:57 +10:00
dunkeroni
f3d83dc6b7 visual adjustment filters 2025-09-11 17:25:57 +10:00
psychedelicious
ebc3f18a1a ai(ui): add CLAUDE.md to frontend 2025-09-11 13:26:39 +10:00
Mary Hipp
aeb512f8d9 ruff 2025-09-11 12:41:56 +10:00
Mary Hipp
a1810acb93 accidental commit 2025-09-11 12:41:56 +10:00
Mary Hipp
aa35a5083b remove completed_at from queue list so that created_at is only sort option, restore field values in UI 2025-09-11 12:41:56 +10:00
psychedelicious
4f17de0b32 fix(ui): ensure mask image is deleted when no more inputs to select object 2025-09-11 12:15:41 +10:00
psychedelicious
370c3cd59b feat(ui): update select object info tooltip 2025-09-11 12:15:41 +10:00
psychedelicious
67214e16c0 tidy(ui): organize select object components 2025-09-11 12:15:41 +10:00
psychedelicious
4880a1d946 feat(nodes): accept neg coords for bbox
This actually works fine for SAM.
2025-09-11 12:15:41 +10:00
psychedelicious
0f0988610f feat(ui): spruce up UI a bit 2025-09-11 12:15:41 +10:00
psychedelicious
6805d28b7a feat(ui): increase hit area for bbox anchors 2025-09-11 12:15:41 +10:00
psychedelicious
9b45a24136 fix(ui): respect selected point type 2025-09-11 12:15:41 +10:00
psychedelicious
4e9d66a64b tidy(ui): clean up CanvasSegmentAnythingModule 2025-09-11 12:15:41 +10:00
psychedelicious
8fec530b0f fix(ui): restore old tooltip for select object
need to add translation strigns for new functionality
2025-09-11 12:15:41 +10:00
psychedelicious
50c66f8671 fix(ui): select obj box moving on mmb pan 2025-09-11 12:15:41 +10:00
psychedelicious
f0aa39ea81 fix(ui): prevent bbox from following cursor after middle mouse pan
Added button checks to bbox rect and transformer mousedown/touchstart handlers to only process left clicks. Also added stage dragging check in onBboxDragMove to clear bbox drag state when middle mouse panning is active.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
faac814a3d fix(ui): prevent middle mouse from creating points in segmentation module
When middle mouse button is used for canvas panning, the pointerup event was still creating points in the segmentation module. Added button check to onBboxDragEnd handler to only process left clicks.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
fb9545bb90 fix(ui): bbox no shrinkies 2025-09-11 12:15:41 +10:00
psychedelicious
8ad2ee83b6 fix(ui): prevent bbox scale accumulation in SAM module
Fixed an issue where bounding boxes could grow exponentially when created at small sizes. The problem occurred because Konva Transformer modifies scaleX/scaleY rather than width/height directly, and the scale values weren't consistently reset after being applied to dimensions.

Changes:
- Ensure scale values are always reset to 1 after applying to dimensions
- Add minimum size constraints to prevent zero/negative dimensions
- Fix scale handling in transformend, dragend, and initial bbox creation

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
f8ad62b5eb tidy(backend) cleanup sam pipelines 2025-09-11 12:15:41 +10:00
psychedelicious
03ae78bc7c tidy(nodes): clean up sam node 2025-09-11 12:15:41 +10:00
psychedelicious
ec1a058dbe fix(backend): issue w/ multiple bbox and sam1 2025-09-11 12:15:41 +10:00
psychedelicious
9e4d441e2e feat(ui): allow adding point inside bbox 2025-09-11 12:15:41 +10:00
psychedelicious
3770fd22f8 tidy(ui): ts issues 2025-09-11 12:15:41 +10:00
psychedelicious
a0232b0e63 feat(ui): combine points and bbox in visual mode for SAM
Revised the Select Object feature to support two input modes:
- Visual mode: Combined points and bounding box input for paired SAM inputs
- Prompt mode: Text-based object selection (unchanged)

Key changes:
- Replaced three input types (points, prompt, bbox) with two (visual, prompt)
- Visual mode supports both point and bbox inputs simultaneously
- Click to add include points, Shift+click for exclude points
- Click and drag to draw bounding box
- Fixed bbox visibility issues when adding points
- Fixed coordinate system issues for proper bbox positioning
- Added proper event handling and interaction controls

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-11 12:15:41 +10:00
psychedelicious
e1e964bf0e experiment(ui): support bboxes in select object 2025-09-11 12:15:41 +10:00
psychedelicious
1b1759cffc feat(ui): support prompt-based selection for object selection 2025-09-11 12:15:41 +10:00
psychedelicious
d828502bc8 refactor(backend): simplify segment anything APIs
There was a really confusing aspect of the SAM pipeline classes where
they accepted deeply nested lists of different dimensions (bbox, points,
and labels).

The lengths of the lists are related; each point must have a
corresponding label, and if bboxes are provided with points, they must
be same length.

I've refactored the backend API to take a single list of SAMInput
objects. This class has a bbox and/or a list of points, making it much
simpler to provide the right shape of inputs.

Internally, the pipeline classes take rejigger these input classes to
have the correct nesting.

The Nodes still have an awkward API where you can provide both bboxes
and points of different lengths, so I added a pydantic validator that
enforces correct lenghts.
2025-09-11 12:15:41 +10:00
psychedelicious
7a073b6de7 feat(ui): hold shift to add inverse point type 2025-09-11 12:15:41 +10:00
psychedelicious
338ff8d588 chore: typegen 2025-09-11 12:15:41 +10:00
psychedelicious
a3625efd3a chore: ruff 2025-09-11 12:15:41 +10:00
Kent Keirsey
5efb37fe63 consolidate into one node. 2025-09-11 12:15:41 +10:00
Kent Keirsey
aef0b81d5b fix models 2025-09-11 12:15:41 +10:00
Kent Keirsey
544edff507 update uv.lock 2025-09-11 12:15:41 +10:00
Kent Keirsey
42b1adab22 init Sam2 2025-09-11 12:15:41 +10:00
Attila Cseh
a2b9d12e88 prettier errors fixed 2025-09-10 11:28:50 +10:00
Attila Cseh
7a94fb6c04 maths enabled on numeric input fields in worklow editor 2025-09-10 11:28:50 +10:00
psychedelicious
efcd159704 fix(app): path traversal via bulk downloads paths 2025-09-10 11:18:12 +10:00
psychedelicious
997e619a9d feat(ui): address feedback 2025-09-09 14:42:30 +10:00
Attila Cseh
4bc184ff16 LoRA number input min/max restored 2025-09-09 14:42:30 +10:00
psychedelicious
0b605a745b fix(ui): route metadata to gemini node 2025-09-09 14:31:07 +10:00
Attila Cseh
22b038ce3b unused translations removed 2025-09-08 20:41:36 +10:00
psychedelicious
0bb5d647b5 tidy(app): method naming snake case 2025-09-08 20:41:36 +10:00
psychedelicious
4a3599929b fix(ui): do not pass scroll seek props to DOM in queue list 2025-09-08 20:41:36 +10:00
psychedelicious
f959ce8323 feat(ui): reduce overscan for queue
makes it a bit less sluggish
2025-09-08 20:41:36 +10:00
Attila Cseh
74e1047870 build errors fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
732881c51b createdAt column fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
107be8e166 queueSlice cleaned up 2025-09-08 20:41:36 +10:00
Attila Cseh
3c2f654da8 queue api listQueueItems removed 2025-09-08 20:41:36 +10:00
Attila Cseh
474fd44e50 status column not sortable 2025-09-08 20:41:36 +10:00
Attila Cseh
0dc5f8fd65 getQueueItemIds cache invalidation added 2025-09-08 20:41:36 +10:00
Attila Cseh
d4215fb460 isOpen refactored 2025-09-08 20:41:36 +10:00
Attila Cseh
0cd05ee9fd ListContext reverted with queryArgs 2025-09-08 20:41:36 +10:00
Attila Cseh
9fcb3af1d8 ListContext removed 2025-09-08 20:41:36 +10:00
Attila Cseh
c9da7e2172 typegen fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
9788735d6b code review fixes 2025-09-08 20:41:36 +10:00
Attila Cseh
d6139748e2 Update invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-09-08 20:41:36 +10:00
Attila Cseh
602dfb1e5d Update invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-09-08 20:41:36 +10:00
Attila Cseh
5bb3a78f56 Update invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemComponent.tsx
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-09-08 20:41:36 +10:00
Attila Cseh
d58df1e17b schema re-generated 2025-09-08 20:41:36 +10:00
Attila Cseh
5d0e37eb2f lint errors fixed 2025-09-08 20:41:36 +10:00
Attila Cseh
486b333cef queue list virtualized 2025-09-08 20:41:36 +10:00
Attila Cseh
6fa437af03 get_queue_itemIds endpoint created 2025-09-08 20:41:36 +10:00
Attila Cseh
787ef6fa27 ColumnSortIcon refactored 2025-09-08 20:41:36 +10:00
Attila Cseh
7f0571c229 QueueListHeaderColumnProps.field turned into SortBy 2025-09-08 20:41:36 +10:00
Attila Cseh
f5a58c0ceb QueueListHeaderColumn created 2025-09-08 20:41:36 +10:00
psychedelicious
d16eef4e66 chore: bump version to v6.6.0 2025-09-08 14:01:02 +10:00
psychedelicious
681ff2b2b3 chore(ui): update whatsnew 2025-09-08 14:01:02 +10:00
psychedelicious
0d81b4ce98 tidy(ui): make names a bit clearer 2025-09-08 13:54:23 +10:00
psychedelicious
99f1667ced tidy(ui): remove unused dependency 2025-09-08 13:54:23 +10:00
psychedelicious
aa5597ab4d feat(ui): use resize observer directly in component 2025-09-08 13:54:23 +10:00
psychedelicious
9bbb8e8a5e feat(ui): simpler strategy to conditionally render slider brush width 2025-09-08 13:54:23 +10:00
psychedelicious
f284d282c1 feat(ui): color picker number input outline styling 2025-09-08 13:54:23 +10:00
Attila Cseh
4231488da6 number input height set 2025-09-08 13:54:23 +10:00
Attila Cseh
a014867e68 slider number input height set 2025-09-08 13:54:23 +10:00
Attila Cseh
22654fbc9c redundant translations removed 2025-09-08 13:54:23 +10:00
Attila Cseh
daa4fd751c ToolWidthPicker refactored 2025-09-08 13:54:23 +10:00
Attila Cseh
3fd265c333 slider for brush and eraser tool 2025-09-08 13:54:23 +10:00
psychedelicious
26a3a9130c Revert "build(ui): port clean translations script to js"
This reverts commit 8a00d855b4.
2025-09-08 11:20:55 +10:00
psychedelicious
3dfeaab4b2 Revert "build(ui): add package script to check and clean translatoins"
This reverts commit 9610f34dd4.
2025-09-08 11:20:55 +10:00
psychedelicious
a33707cc76 Revert "ci: add translation string check to frontend checks"
This reverts commit 98945a4560.
2025-09-08 11:20:55 +10:00
psychedelicious
21e13daf6e Revert "chore(ui): clean translations"
This reverts commit a0dceecab9.
2025-09-08 11:20:55 +10:00
psychedelicious
fa2614ee02 Revert "tidy(ui): remove python clean translations script"
This reverts commit 8a81c05caf.
2025-09-08 11:20:55 +10:00
Hosted Weblate
4be6ddb23d translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-09-05 12:28:33 +10:00
Riccardo Giovanetti
bba0e01926 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2093 of 2122 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-05 12:28:33 +10:00
psychedelicious
20d57d5ccf gh: update pr template 2025-09-05 11:27:02 +10:00
psychedelicious
d9121271a2 fix(ui): rehydration + redux migration design issue
Certain items in redux are ephemeral and omitted from persisted slices.
On rehydration, we need to inject these values back into the slice.

But there was an issue taht could prevent slice migrations from running
during rehydration.

The migrations look for the `_version` key in state and migrate the
slice accordingly.

The logic that merged in the ephemeral values accidentally _also_ merged
in the `_version` key if it didn't already exist. This happened _before_
migrations are run.

This causes problems for slices that didn't have a `_version` key and
then have one added via migration.

For example, the params slice didn't have a `_version` key until the
previous commit, which added `_version` and changed some other parts of
state in a migration.

On first load of the updated code, we have a catch-22 kinda situation:
- The persisted params slice is the old version. It needs to have both
`_version` and some other data added to it.
- We deserialize the state and then merge in ephemeral values. This
inadvertnetly also merged in the `_version` key.
- We run the slice migration. It sees there is a `_version` key and
thinks it doesn't need to run. The extra data isn't added to the slice.
The slice is parsed against its zod schema and fails because the new
data is missing.
- Because the parse failed, we treat the user's persisted data as
invalid and overwrite it with initial state, potentially causing data
loss.

The fix is to be more selective when merging in the ephemeral state
before migration - this is now done by checking which keys are on the
persist denylist and only adding those key.
2025-09-05 11:27:02 +10:00
psychedelicious
30b487c71c tidy(ui): remove unused x/y coords from params slice 2025-09-05 11:27:02 +10:00
psychedelicious
8a81c05caf tidy(ui): remove python clean translations script 2025-09-05 11:02:37 +10:00
psychedelicious
a0dceecab9 chore(ui): clean translations 2025-09-05 11:02:37 +10:00
psychedelicious
98945a4560 ci: add translation string check to frontend checks 2025-09-05 11:02:37 +10:00
psychedelicious
9610f34dd4 build(ui): add package script to check and clean translatoins 2025-09-05 11:02:37 +10:00
psychedelicious
8a00d855b4 build(ui): port clean translations script to js 2025-09-05 11:02:37 +10:00
psychedelicious
25430f04c5 chore: bump version to v6.6.0rc2 2025-09-04 16:43:41 +10:00
psychedelicious
b2b53c4481 fix(ui): set a react key on the current image viewer's components
This tells react that the component is a new instance each time we
change the image. Which, in turn, prevents a flash of the
previously-selected image during image switching and
progress-image-to-output-image-ing.
2025-09-04 16:35:40 +10:00
psychedelicious
c6696d7913 fix(ui): ensure origin is set correctly for generate tab batches
This prevents an issue in the image viewer's logic for simulating the
progress image "resolving" to a completed image
2025-09-04 16:35:40 +10:00
psychedelicious
8bcb6648f1 fix(ui): stop dragging when user clicks mmb once
This has been an issue for a long time. I suspect it wasn't noticed
until now because it's finicky to trigger - you have to click and
release very quickly, without moving the mouse at all.
2025-09-04 16:16:04 +10:00
psychedelicious
0ee360ba6c fix(ui): show fallback when no image is selected 2025-09-04 16:13:01 +10:00
psychedelicious
09bbe3eef9 fix(ui): clear gallery selection when switching boards and there are no items in the new board 2025-09-04 16:13:01 +10:00
psychedelicious
d14b7a48f5 fix(ui): clear gallery selection when last image on selected board is deleted 2025-09-04 16:13:01 +10:00
Mary Hipp
1db55b0ffa cleanup 2025-09-03 10:11:32 -04:00
Mary Hipp
3104a1baa6 remove crossOrigin for thumbnail loading 2025-09-03 10:11:32 -04:00
psychedelicious
0e523ca2c1 fix(ui): browser image caching cors race condition
Must set cross origin whenever we load an image from a URL to prevent
race conditions where browser caches an image with no CORS, then canvas
attempts to load it with CORS, resulting in browser rejecting the
request before it is made
2025-09-03 10:11:32 -04:00
psychedelicious
75daef2aba fix(ui): fix situation where progress images are super tiny
Missed a spot
2025-09-03 22:56:55 +10:00
psychedelicious
b036b18986 chore: bump version to v6.6.0rc1 2025-09-03 18:02:37 +10:00
Hosted Weblate
93535fa3c2 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-09-03 17:57:27 +10:00
Riccardo Giovanetti
dcafb44f8a translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2088 of 2117 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-03 17:57:27 +10:00
Mary Hipp
44b1d8d1fc remove video base models from image aspect/ratio logic 2025-09-03 10:22:14 +10:00
Attila Cseh
6f70a6bd10 prettier fix 2025-09-02 19:23:24 +10:00
Attila Cseh
0546aeed1d code review changes 2025-09-02 19:23:24 +10:00
Attila Cseh
8933f3f5dd LoRA weight default values turned into constant 2025-09-02 19:23:24 +10:00
Attila Cseh
29cdefe873 type conversion fixed 2025-09-02 19:23:24 +10:00
Attila Cseh
df299bb37f python source code reformatted 2025-09-02 19:23:24 +10:00
Attila Cseh
481fb42371 lint errors fixed 2025-09-02 19:23:24 +10:00
Attila Cseh
631a04b48c LoRA default weight 2025-09-02 19:23:24 +10:00
Attila Cseh
547e1941f4 code review changes 2025-09-02 19:16:26 +10:00
Attila Cseh
031d25ed63 switchable foreground/background colors 2025-09-02 19:16:26 +10:00
Riccardo Giovanetti
27f4af0eb4 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2087 of 2116 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-09-02 15:05:51 +10:00
psychedelicious
e0a0617093 chore(ui): bump dockview
This brings in a fix for Chrome that allowed you to drag tabs and split
the panels.

Closes #8449
2025-09-02 11:05:41 +10:00
psychedelicious
e6a763b887 fix(ui): move getItemsPerRow to frontend src dir
Not sure how but it was in repo root

Closes #8509
2025-09-02 11:02:56 +10:00
psychedelicious
3c9c49f7d9 feat(ui): add readiness checks for LoRAs
If incompatible LoRAs are added, prevent Invoking.

The logic to prevent adding incompatible LoRAs to graphs already
existed. This does not fix any generation bugs; just a visual
inconsistency where it looks like Invoke would use an incompatible LoRA.
2025-09-01 14:41:03 +10:00
Attila Cseh
26690d47b7 lint errors fixed 2025-09-01 14:34:35 +10:00
Attila Cseh
fcaff6ce09 remove LoRAs for recall use all 2025-09-01 14:34:35 +10:00
Damian
afd7296cb2 Add 'sd-2' to supported negative prompt base models
add back negative prompt support for sd2
2025-08-31 10:20:31 -04:00
psychedelicious
d6f42c76d5 fix(app): board count queries not getting categories as params 2025-08-29 11:07:52 +10:00
Mary Hipp
68f39fe907 cleanup 2025-08-28 16:38:48 -04:00
Mary Hipp
23a528545f match screen capture button to the others 2025-08-28 16:38:48 -04:00
Mary Hipp
c69d04a7f0 handle large videos 2025-08-28 15:29:47 -04:00
Mary Hipp
60f1e2d7ad do not show negative prompt for video 2025-08-28 12:59:23 -04:00
Mary Hipp
cb386bec28 do not show reference images on video tab 2025-08-28 12:59:23 -04:00
Mary Hipp
f29ceb3f12 add translations 2025-08-28 10:17:00 -04:00
Mary Hipp
4f51bc9421 add credit estimate for video generation 2025-08-28 10:17:00 -04:00
Mary Hipp
0c41abab79 add label for starting image field 2025-08-28 10:17:00 -04:00
Mary Hipp
cb457c3402 default resolution to 1080p 2025-08-28 10:17:00 -04:00
Mary Hipp
606ad73814 use first video model if none selected 2025-08-28 10:17:00 -04:00
psychedelicious
fe70bd538a fix(ui): hide unused queue actions menu item category 2025-08-28 10:17:00 -04:00
psychedelicious
b5c7316c0a chore(ui): lint 2025-08-28 10:17:00 -04:00
psychedelicious
460aec03ea fix(ui): more video translations 2025-08-28 10:17:00 -04:00
psychedelicious
6730d86a13 fix(ui): make ctx menu star label not refer to iamges 2025-08-28 10:17:00 -04:00
psychedelicious
c4bc03cb1f fix(ui): make ctx menu download tooltip not refer to iamges 2025-08-28 10:17:00 -04:00
psychedelicious
136ee28199 feat(ui): remove unimplemented context menu items for video 2025-08-28 10:17:00 -04:00
psychedelicious
2c6d266c0a fix(ui): metadata viewer translations 2025-08-28 10:17:00 -04:00
psychedelicious
f779920eaa chore(ui): lint 2025-08-28 10:17:00 -04:00
psychedelicious
01bef5d165 fix(ui): do not highlight starting frame image in red when it is not required 2025-08-28 10:17:00 -04:00
psychedelicious
72851d3e84 feat(ui): tweak video settings padding 2025-08-28 10:17:00 -04:00
psychedelicious
4ba85c62ca feat(ui): add border around starting frame image 2025-08-28 10:17:00 -04:00
psychedelicious
313aedb00a fix(ui): graph builder check for veo 2025-08-28 10:17:00 -04:00
psychedelicious
85bd324d74 tweak(ui): nav bar divider not so bright 2025-08-28 10:17:00 -04:00
psychedelicious
4a04411e74 fix(ui): tab hotkeys for video 2025-08-28 10:17:00 -04:00
psychedelicious
299a4db3bb chore(ui): lint 2025-08-28 10:17:00 -04:00
psychedelicious
390faa592c chore: ruff 2025-08-28 10:17:00 -04:00
Mary Hipp
2463aeb84a studio init action for video tab 2025-08-28 10:17:00 -04:00
Mary Hipp
ec8df163d1 launchpad cleanup 2025-08-28 10:17:00 -04:00
Mary Hipp
a198b7da78 fix view on large screens, restore auth for screen capture 2025-08-28 10:17:00 -04:00
Mary Hipp
fb11770852 rearrange image | video | asset for boards 2025-08-28 10:17:00 -04:00
Mary Hipp
6b6f3d56f7 add option for video upsell, rearrange navigation bar and gallery tabs 2025-08-28 10:17:00 -04:00
Mary Hipp
29d00eef9a hide video features if video is disabled 2025-08-28 10:17:00 -04:00
psychedelicious
6972cd708d feat(ui): delete confirmation for videos 2025-08-28 10:17:00 -04:00
psychedelicious
82893804ff feat(ui): metadata recall for videos 2025-08-28 10:17:00 -04:00
psychedelicious
47ffe365bc fix(ui): do not store whole model config in state 2025-08-28 10:17:00 -04:00
psychedelicious
f7b03b1e63 fix(ui): do not change canvas bbox on video model change 2025-08-28 10:17:00 -04:00
psychedelicious
356e38e82a feat(ui): use correct model config object in video graph builders 2025-08-28 10:17:00 -04:00
psychedelicious
5ea077bb8c feat(ui): add selector to get model config for current video model 2025-08-28 10:17:00 -04:00
psychedelicious
3c4b303555 feat(ui): simplify and consolidate video capture logic 2025-08-28 10:17:00 -04:00
psychedelicious
b8651cb1a2 fix(ui): rebase conflict 2025-08-28 10:17:00 -04:00
Mary Hipp
a6527c0ba1 lint again 2025-08-28 10:17:00 -04:00
Mary Hipp
6e40eca754 lint 2025-08-28 10:17:00 -04:00
Mary Hipp
53fab17c33 use context to track video ref so that toolbar can also save current frame 2025-08-28 10:17:00 -04:00
Mary Hipp
3876d88b3c add save frame functionality 2025-08-28 10:17:00 -04:00
Mary Hipp
82b4526691 add video_count and asset_count to boards UI 2025-08-28 10:17:00 -04:00
Mary Hipp
f56ba11394 add asset_count to BoardDTO and split it out from image count 2025-08-28 10:17:00 -04:00
Mary Hipp
32eb5190f2 add video_count to boardDTO 2025-08-28 10:17:00 -04:00
Mary Hipp
72e378789d video metadata support 2025-08-28 10:17:00 -04:00
Mary Hipp
f10ddb0cab split out video aspect/ratio into its own components 2025-08-28 10:17:00 -04:00
Mary Hipp
286127077d updates for new model type 2025-08-28 10:17:00 -04:00
Mary Hipp
36278bc044 add UI support for new model type Video 2025-08-28 10:17:00 -04:00
Mary Hipp
7a1c7ca43a add Video as new model type 2025-08-28 10:17:00 -04:00
psychedelicious
8303d567d5 docs(ui): add note about visual jank in gallery 2025-08-28 10:17:00 -04:00
psychedelicious
1fe19c1242 fix(ui): use correct placeholder for vidoes 2025-08-28 10:17:00 -04:00
psychedelicious
127a43865c fix(ui): locate in gallery, galleryview when selecting image/video 2025-08-28 10:17:00 -04:00
psychedelicious
24a48884cb chore(ui): lint 2025-08-28 10:17:00 -04:00
psychedelicious
47cee816fd chore(ui): dpdm 2025-08-28 10:17:00 -04:00
psychedelicious
90bacaddda feat(ui): video dnd 2025-08-28 10:17:00 -04:00
psychedelicious
c0cc9f421e fix(ui): generate tab graph builder 2025-08-28 10:17:00 -04:00
psychedelicious
dbb9032648 fix(ui): iterations works for video models 2025-08-28 10:17:00 -04:00
psychedelicious
b9e32e59a2 fix(ui): missing tranlsation 2025-08-28 10:17:00 -04:00
psychedelicious
545a1d8737 fix(ui): fetching imageDTO for video 2025-08-28 10:17:00 -04:00
psychedelicious
c4718403a2 tidy(ui): remove unused VideoAtPosition component 2025-08-28 10:17:00 -04:00
psychedelicious
eb308b1ff7 feat(ui): simpler layout for video player 2025-08-28 10:17:00 -04:00
Mary Hipp
a277bea804 fix video styling 2025-08-28 10:17:00 -04:00
Mary Hipp
30619c0420 add runway back as a model and allow runway and veo3 to live together in peace and harmony 2025-08-28 10:17:00 -04:00
Mary Hipp
504d8e32be add runway to backend 2025-08-28 10:17:00 -04:00
Mary Hipp
f21229cd14 update redux selection to have a list of images and/or videos, update image viewer to show either image or video depending on what is selected 2025-08-28 10:17:00 -04:00
Mary Hipp
640ec676c3 lint 2025-08-28 10:17:00 -04:00
Mary Hipp
6370412e9c tsc 2025-08-28 10:17:00 -04:00
Mary Hipp
edec2c2775 lint the dang thing 2025-08-28 10:17:00 -04:00
psychedelicious
bd38be31d8 gallery 2025-08-28 10:17:00 -04:00
psychedelicious
b938ae0a7e Revert "feat(ui): consolidated gallery (wip)"
This reverts commit 12b70bca67.
2025-08-28 10:17:00 -04:00
Mary Hipp
6e5b1ed55f add videos to change board modal 2025-08-28 10:17:00 -04:00
Mary Hipp
5970bd38c2 add resolution as a generation setting 2025-08-28 10:17:00 -04:00
Mary Hipp
e046417cf5 replace runway with veo, build out veo3 model support 2025-08-28 10:17:00 -04:00
Mary Hipp
27a2cd19bd add Veo3 model support to backend 2025-08-28 10:17:00 -04:00
psychedelicious
0df631b802 feat(ui): consolidated gallery (wip) 2025-08-28 10:17:00 -04:00
psychedelicious
5bb7cd168d feat(ui): gallery optimistic updates for video 2025-08-28 10:17:00 -04:00
psychedelicious
b4ba84ad35 fix(ui): panel names on video tab 2025-08-28 10:17:00 -04:00
Mary Hipp
d1628f51c9 stubbing out change board functionality 2025-08-28 10:17:00 -04:00
Mary Hipp
17c1304ce2 hook up starring, unstarring, and deleting single videos (no multiselect yet), adapt context menus to work for both images and videos and start on video context menu 2025-08-28 10:17:00 -04:00
Mary Hipp
cc9a85f7d0 add readiness logic to video tab 2025-08-28 10:17:00 -04:00
psychedelicious
7e2999649a feat(ui): more video stuff 2025-08-28 10:17:00 -04:00
psychedelicious
1473142f73 feat(ui): fiddle w/ video stuff 2025-08-28 10:17:00 -04:00
psychedelicious
49343546e7 feat(ui): fiddle w/ video stuff 2025-08-28 10:17:00 -04:00
psychedelicious
39d5879405 chore: ruff 2025-08-28 10:17:00 -04:00
psychedelicious
4b4ec29a09 feat(nodes): update VideoField & VideoOutput 2025-08-28 10:17:00 -04:00
psychedelicious
dc6811076f feat(ui): add dnd target for video start frame 2025-08-28 10:17:00 -04:00
Mary Hipp
0568784ee9 add duration and aspect ratio to video settings 2025-08-28 10:17:00 -04:00
Mary Hipp
895eac6bcd integrating video into gallery - thinking maybe a new category of image would make more senes 2025-08-28 10:17:00 -04:00
Mary Hipp
fe0efa9bdf add noop video router 2025-08-28 10:17:00 -04:00
Mary Hipp
acabc8bd54 add video models 2025-08-28 10:17:00 -04:00
Mary Hipp
89f999af08 combine nodes that generate and save videos 2025-08-28 10:17:00 -04:00
Mary Hipp
9ae76bef51 build out adhoc video saving graph 2025-08-28 10:17:00 -04:00
Mary Hipp
0999b43616 push up updates for VideoField 2025-08-28 10:17:00 -04:00
Mary Hipp
e6e4f58163 update VideoField 2025-08-28 10:17:00 -04:00
Mary Hipp
b371930e02 split out RunwayVideoOutput from VideoOutput 2025-08-28 10:17:00 -04:00
Mary Hipp
9b50e2303b rough rough POC of video tab 2025-08-28 10:17:00 -04:00
Mary Hipp
49d1810991 video_output support 2025-08-28 10:17:00 -04:00
psychedelicious
b1b009f7b8 chore: bump version to v6.5.1 2025-08-28 22:57:14 +10:00
psychedelicious
3431e6385c chore: uv lock 2025-08-28 22:57:14 +10:00
psychedelicious
5db1027d32 Pin sentencepiece version in pyproject.toml
Pin sentencepiece version to 0.2.0 to avoid coredump issues.
2025-08-28 22:57:14 +10:00
Hosted Weblate
579f182fe9 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-08-28 22:51:40 +10:00
Riccardo Giovanetti
55bf41f63f translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2053 of 2082 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-28 22:51:40 +10:00
psychedelicious
fc32fd2d2e fix(ui): progress image renders at physical size 2025-08-28 22:47:52 +10:00
psychedelicious
a2b6536078 fix(ui): konva caching opt-out doesn't do what i thought it would 2025-08-28 22:45:03 +10:00
Mary Hipp Rogers
144c54a6c8 Revert "video_output support"
This reverts commit 453ef1a220.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ca40daeb97 Revert "rough rough POC of video tab"
This reverts commit e89266bfe3.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e600cdc826 Revert "split out RunwayVideoOutput from VideoOutput"
This reverts commit 97719b0aab.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b7c52f33dc Revert "update VideoField"
This reverts commit bd251f8cce.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e78157fcf0 Revert "push up updates for VideoField"
This reverts commit 94ba840948.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
7d7b98249f Revert "build out adhoc video saving graph"
This reverts commit 07565d4015.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f5bf84f304 Revert "combine nodes that generate and save videos"
This reverts commit eff9c7b92f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c30d5bece2 Revert "add video models"
This reverts commit 295b5a20a8.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
27845b2f1b Revert "add noop video router"
This reverts commit e9c4e12454.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
bad6eea077 Revert "integrating video into gallery - thinking maybe a new category of image would make more senes"
This reverts commit 5c93e53195.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
9c26ac5ce3 Revert "add duration and aspect ratio to video settings"
This reverts commit 4d8bcad15b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b7306bb5c9 Revert "feat(ui): add dnd target for video start frame"
This reverts commit 530d20c1be.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0c115177b2 Revert "feat(nodes): update VideoField & VideoOutput"
This reverts commit 67de3f2d9b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5aae41b5bb Revert "chore: ruff"
This reverts commit 9380d8901c.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
7ad09a2f79 Revert "feat(ui): fiddle w/ video stuff"
This reverts commit f98bbc32dd.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5a6d3639b7 Revert "feat(ui): fiddle w/ video stuff"
This reverts commit 79e8482b27.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
84617d3df2 Revert "feat(ui): more video stuff"
This reverts commit 963c2ec60c.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e05f30749e Revert "add readiness logic to video tab"
This reverts commit 288ac0a293.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
88a2e27338 Revert "hook up starring, unstarring, and deleting single videos (no multiselect yet), adapt context menus to work for both images and videos and start on video context menu"
This reverts commit a918198d4f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
15a6fd76c8 Revert "stubbing out change board functionality"
This reverts commit 67042e6dec.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
6adb46a86c Revert "fix(ui): panel names on video tab"
This reverts commit 64dfa125d2.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e8a74eb79d Revert "feat(ui): gallery optimistic updates for video"
This reverts commit 0ec6d33086.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
dcd716c384 Revert "feat(ui): consolidated gallery (wip)"
This reverts commit 6ef1c2a5e1.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
56697635dd Revert "add Veo3 model support to backend"
This reverts commit 49d569ec59.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5b5657e292 Revert "replace runway with veo, build out veo3 model support"
This reverts commit d95a698ebd.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ad3dfbe1ed Revert "add resolution as a generation setting"
This reverts commit b71829a827.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
59ddc4f7b0 Revert "add videos to change board modal"
This reverts commit 45b4432833.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4653b79f12 Revert "Revert "feat(ui): consolidated gallery (wip)""
This reverts commit 637d19c22b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
778d6f167f Revert "gallery"
This reverts commit aa4e3adadb.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
05c71f50f1 Revert "lint the dang thing"
This reverts commit 1b0d599dc2.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
406e0be39c Revert "tsc"
This reverts commit 7828102b67.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0d71234a12 Revert "lint"
This reverts commit b377b80446.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e38019bb70 Revert "update redux selection to have a list of images and/or videos, update image viewer to show either image or video depending on what is selected"
This reverts commit 8df3067599.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
a879880b42 Revert "add runway to backend"
This reverts commit f631b5178f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
71c8accbfe Revert "add runway back as a model and allow runway and veo3 to live together in peace and harmony"
This reverts commit b2026d9c00.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
154fb99daf Revert "fix video styling"
This reverts commit 3d9889e272.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0df476ce13 Revert "feat(ui): simpler layout for video player"
This reverts commit 3a1cedbced.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e7ad830fa9 Revert "tidy(ui): remove unused VideoAtPosition component"
This reverts commit e55d39a20b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e81e0a8286 Revert "fix(ui): fetching imageDTO for video"
This reverts commit fbf8aa17c8.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
d0f7e72cbb Revert "fix(ui): missing tranlsation"
This reverts commit 89efe9c2b1.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
fdead4fb8c Revert "fix(ui): iterations works for video models"
This reverts commit 24f22d539f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
31c9945b32 Revert "fix(ui): generate tab graph builder"
This reverts commit 84dc4e4ea9.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
22de8a4b12 Revert "feat(ui): video dnd"
This reverts commit f5fdba795a.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
89cb3c3230 Revert "chore(ui): dpdm"
This reverts commit 6a7fe6668b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
7bb99ece4e Revert "chore(ui): lint"
This reverts commit 55139bb169.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
28f040123f Revert "fix(ui): locate in gallery, galleryview when selecting image/video"
This reverts commit 26fe937d97.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
1be3a4db64 Revert "fix(ui): use correct placeholder for vidoes"
This reverts commit 7e031e9c01.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
cb44c995d2 Revert "docs(ui): add note about visual jank in gallery"
This reverts commit 2d9c82da85.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
9b9b35c315 Revert "add Video as new model type"
This reverts commit fb0a924918.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f6edab6032 Revert "add UI support for new model type Video"
This reverts commit c6f2d127ef.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f79665b023 Revert "updates for new model type"
This reverts commit 23cde86bc4.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
6b1bc7a87d Revert "split out video aspect/ratio into its own components"
This reverts commit 6c375b228e.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c6f2994c84 Revert "video metadata support"
This reverts commit b16d1a943d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0cff67ff23 Revert "add video_count to boardDTO"
This reverts commit 1cc6893d0d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
e957c11c9a Revert "add asset_count to BoardDTO and split it out from image count"
This reverts commit d4378d9f2a.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4baa685c7a Revert "add video_count and asset_count to boards UI"
This reverts commit e36490c2ec.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
1bd5907a12 Revert "add save frame functionality"
This reverts commit 6a20271dba.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
2fd56e6029 Revert "use context to track video ref so that toolbar can also save current frame"
This reverts commit 1bf25fadb3.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b0548edc8c Revert "lint"
This reverts commit 378f33bc92.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
41d781176f Revert "lint again"
This reverts commit 41e1697e79.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
8709de0b33 Revert "fix(ui): rebase conflict"
This reverts commit bc6dd12083.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
af43fe2fd4 Revert "feat(ui): simplify and consolidate video capture logic"
This reverts commit c5a76806c1.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ebbb11c3b1 Revert "feat(ui): add selector to get model config for current video model"
This reverts commit 5cabc37a87.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0fc8c08da3 Revert "feat(ui): use correct model config object in video graph builders"
This reverts commit 9fcba3b876.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
bfadcffe3c Revert "fix(ui): do not change canvas bbox on video model change"
This reverts commit 8eb3f40e1b.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
49c2332c13 Revert "fix(ui): do not store whole model config in state"
This reverts commit b2ed3c99d4.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
dacef158c4 Revert "feat(ui): metadata recall for videos"
This reverts commit 4c32b2a123.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0c34d8201e Revert "feat(ui): delete confirmation for videos"
This reverts commit 505c75a5ab.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
77132075ff Revert "hide video features if video is disabled"
This reverts commit 0de5097207.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
f008d3b0b2 Revert "add option for video upsell, rearrange navigation bar and gallery tabs"
This reverts commit 4845d31857.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4e66ccefe8 Revert "rearrange image | video | asset for boards"
This reverts commit 8a60def51f.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
5d0ed45326 Revert "fix view on large screens, restore auth for screen capture"
This reverts commit 1f526a1c27.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
379d633ac6 Revert "launchpad cleanup"
This reverts commit ab41f71a36.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
93bba1b692 Revert "studio init action for video tab"
This reverts commit 431fd83a43.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
667e175ab7 Revert "chore: ruff"
This reverts commit 3ae99df091.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
de146aa4aa Revert "chore(ui): lint"
This reverts commit 36c16d2781.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
ed9c2c8208 Revert "fix(ui): tab hotkeys for video"
This reverts commit 20813b5615.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
9d984878f3 Revert "tweak(ui): nav bar divider not so bright"
This reverts commit 269d4fe670.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
585eb8c69d Revert "fix(ui): graph builder check for veo"
This reverts commit 239fb86a46.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c105bae127 Revert "feat(ui): add border around starting frame image"
This reverts commit 8642e8881d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
c39f26266f Revert "feat(ui): tweak video settings padding"
This reverts commit 842d729ec8.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
47dffd123a Revert "fix(ui): do not highlight starting frame image in red when it is not required"
This reverts commit 0b05b24e9a.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
b946ec3172 Revert "chore(ui): lint"
This reverts commit 8c2e6a3988.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
024c02329d Revert "fix(ui): metadata viewer translations"
This reverts commit 2a6cfde488.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
4b43b59472 Revert "feat(ui): remove unimplemented context menu items for video"
This reverts commit a6b0581939.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
d11f115e1a Revert "fix(ui): make ctx menu download tooltip not refer to iamges"
This reverts commit e4f24c4dc4.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
92253ce854 Revert "fix(ui): make ctx menu star label not refer to iamges"
This reverts commit ec793cb636.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
0ebbfa90c9 Revert "fix(ui): more video translations"
This reverts commit 0d827d8306.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
fdfee11e37 Revert "chore(ui): lint"
This reverts commit 3971382a6d.
2025-08-28 08:32:47 -04:00
Mary Hipp Rogers
6091bf4f60 Revert "fix(ui): hide unused queue actions menu item category"
This reverts commit 07271ca468.
2025-08-28 08:32:47 -04:00
psychedelicious
07271ca468 fix(ui): hide unused queue actions menu item category 2025-08-28 08:23:58 -04:00
psychedelicious
3971382a6d chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
0d827d8306 fix(ui): more video translations 2025-08-28 08:23:58 -04:00
psychedelicious
ec793cb636 fix(ui): make ctx menu star label not refer to iamges 2025-08-28 08:23:58 -04:00
psychedelicious
e4f24c4dc4 fix(ui): make ctx menu download tooltip not refer to iamges 2025-08-28 08:23:58 -04:00
psychedelicious
a6b0581939 feat(ui): remove unimplemented context menu items for video 2025-08-28 08:23:58 -04:00
psychedelicious
2a6cfde488 fix(ui): metadata viewer translations 2025-08-28 08:23:58 -04:00
psychedelicious
8c2e6a3988 chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
0b05b24e9a fix(ui): do not highlight starting frame image in red when it is not required 2025-08-28 08:23:58 -04:00
psychedelicious
842d729ec8 feat(ui): tweak video settings padding 2025-08-28 08:23:58 -04:00
psychedelicious
8642e8881d feat(ui): add border around starting frame image 2025-08-28 08:23:58 -04:00
psychedelicious
239fb86a46 fix(ui): graph builder check for veo 2025-08-28 08:23:58 -04:00
psychedelicious
269d4fe670 tweak(ui): nav bar divider not so bright 2025-08-28 08:23:58 -04:00
psychedelicious
20813b5615 fix(ui): tab hotkeys for video 2025-08-28 08:23:58 -04:00
psychedelicious
36c16d2781 chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
3ae99df091 chore: ruff 2025-08-28 08:23:58 -04:00
Mary Hipp
431fd83a43 studio init action for video tab 2025-08-28 08:23:58 -04:00
Mary Hipp
ab41f71a36 launchpad cleanup 2025-08-28 08:23:58 -04:00
Mary Hipp
1f526a1c27 fix view on large screens, restore auth for screen capture 2025-08-28 08:23:58 -04:00
Mary Hipp
8a60def51f rearrange image | video | asset for boards 2025-08-28 08:23:58 -04:00
Mary Hipp
4845d31857 add option for video upsell, rearrange navigation bar and gallery tabs 2025-08-28 08:23:58 -04:00
Mary Hipp
0de5097207 hide video features if video is disabled 2025-08-28 08:23:58 -04:00
psychedelicious
505c75a5ab feat(ui): delete confirmation for videos 2025-08-28 08:23:58 -04:00
psychedelicious
4c32b2a123 feat(ui): metadata recall for videos 2025-08-28 08:23:58 -04:00
psychedelicious
b2ed3c99d4 fix(ui): do not store whole model config in state 2025-08-28 08:23:58 -04:00
psychedelicious
8eb3f40e1b fix(ui): do not change canvas bbox on video model change 2025-08-28 08:23:58 -04:00
psychedelicious
9fcba3b876 feat(ui): use correct model config object in video graph builders 2025-08-28 08:23:58 -04:00
psychedelicious
5cabc37a87 feat(ui): add selector to get model config for current video model 2025-08-28 08:23:58 -04:00
psychedelicious
c5a76806c1 feat(ui): simplify and consolidate video capture logic 2025-08-28 08:23:58 -04:00
psychedelicious
bc6dd12083 fix(ui): rebase conflict 2025-08-28 08:23:58 -04:00
Mary Hipp
41e1697e79 lint again 2025-08-28 08:23:58 -04:00
Mary Hipp
378f33bc92 lint 2025-08-28 08:23:58 -04:00
Mary Hipp
1bf25fadb3 use context to track video ref so that toolbar can also save current frame 2025-08-28 08:23:58 -04:00
Mary Hipp
6a20271dba add save frame functionality 2025-08-28 08:23:58 -04:00
Mary Hipp
e36490c2ec add video_count and asset_count to boards UI 2025-08-28 08:23:58 -04:00
Mary Hipp
d4378d9f2a add asset_count to BoardDTO and split it out from image count 2025-08-28 08:23:58 -04:00
Mary Hipp
1cc6893d0d add video_count to boardDTO 2025-08-28 08:23:58 -04:00
Mary Hipp
b16d1a943d video metadata support 2025-08-28 08:23:58 -04:00
Mary Hipp
6c375b228e split out video aspect/ratio into its own components 2025-08-28 08:23:58 -04:00
Mary Hipp
23cde86bc4 updates for new model type 2025-08-28 08:23:58 -04:00
Mary Hipp
c6f2d127ef add UI support for new model type Video 2025-08-28 08:23:58 -04:00
Mary Hipp
fb0a924918 add Video as new model type 2025-08-28 08:23:58 -04:00
psychedelicious
2d9c82da85 docs(ui): add note about visual jank in gallery 2025-08-28 08:23:58 -04:00
psychedelicious
7e031e9c01 fix(ui): use correct placeholder for vidoes 2025-08-28 08:23:58 -04:00
psychedelicious
26fe937d97 fix(ui): locate in gallery, galleryview when selecting image/video 2025-08-28 08:23:58 -04:00
psychedelicious
55139bb169 chore(ui): lint 2025-08-28 08:23:58 -04:00
psychedelicious
6a7fe6668b chore(ui): dpdm 2025-08-28 08:23:58 -04:00
psychedelicious
f5fdba795a feat(ui): video dnd 2025-08-28 08:23:58 -04:00
psychedelicious
84dc4e4ea9 fix(ui): generate tab graph builder 2025-08-28 08:23:58 -04:00
psychedelicious
24f22d539f fix(ui): iterations works for video models 2025-08-28 08:23:58 -04:00
psychedelicious
89efe9c2b1 fix(ui): missing tranlsation 2025-08-28 08:23:58 -04:00
psychedelicious
fbf8aa17c8 fix(ui): fetching imageDTO for video 2025-08-28 08:23:58 -04:00
psychedelicious
e55d39a20b tidy(ui): remove unused VideoAtPosition component 2025-08-28 08:23:58 -04:00
psychedelicious
3a1cedbced feat(ui): simpler layout for video player 2025-08-28 08:23:58 -04:00
Mary Hipp
3d9889e272 fix video styling 2025-08-28 08:23:58 -04:00
Mary Hipp
b2026d9c00 add runway back as a model and allow runway and veo3 to live together in peace and harmony 2025-08-28 08:23:58 -04:00
Mary Hipp
f631b5178f add runway to backend 2025-08-28 08:23:58 -04:00
Mary Hipp
8df3067599 update redux selection to have a list of images and/or videos, update image viewer to show either image or video depending on what is selected 2025-08-28 08:23:58 -04:00
Mary Hipp
b377b80446 lint 2025-08-28 08:23:58 -04:00
Mary Hipp
7828102b67 tsc 2025-08-28 08:23:58 -04:00
Mary Hipp
1b0d599dc2 lint the dang thing 2025-08-28 08:23:58 -04:00
psychedelicious
aa4e3adadb gallery 2025-08-28 08:23:58 -04:00
psychedelicious
637d19c22b Revert "feat(ui): consolidated gallery (wip)"
This reverts commit 12b70bca67.
2025-08-28 08:23:58 -04:00
Mary Hipp
45b4432833 add videos to change board modal 2025-08-28 08:23:58 -04:00
Mary Hipp
b71829a827 add resolution as a generation setting 2025-08-28 08:23:58 -04:00
Mary Hipp
d95a698ebd replace runway with veo, build out veo3 model support 2025-08-28 08:23:58 -04:00
Mary Hipp
49d569ec59 add Veo3 model support to backend 2025-08-28 08:23:58 -04:00
psychedelicious
6ef1c2a5e1 feat(ui): consolidated gallery (wip) 2025-08-28 08:23:58 -04:00
psychedelicious
0ec6d33086 feat(ui): gallery optimistic updates for video 2025-08-28 08:23:58 -04:00
psychedelicious
64dfa125d2 fix(ui): panel names on video tab 2025-08-28 08:23:58 -04:00
Mary Hipp
67042e6dec stubbing out change board functionality 2025-08-28 08:23:58 -04:00
Mary Hipp
a918198d4f hook up starring, unstarring, and deleting single videos (no multiselect yet), adapt context menus to work for both images and videos and start on video context menu 2025-08-28 08:23:58 -04:00
Mary Hipp
288ac0a293 add readiness logic to video tab 2025-08-28 08:23:58 -04:00
psychedelicious
963c2ec60c feat(ui): more video stuff 2025-08-28 08:23:58 -04:00
psychedelicious
79e8482b27 feat(ui): fiddle w/ video stuff 2025-08-28 08:23:58 -04:00
psychedelicious
f98bbc32dd feat(ui): fiddle w/ video stuff 2025-08-28 08:23:58 -04:00
psychedelicious
9380d8901c chore: ruff 2025-08-28 08:23:58 -04:00
psychedelicious
67de3f2d9b feat(nodes): update VideoField & VideoOutput 2025-08-28 08:23:58 -04:00
psychedelicious
530d20c1be feat(ui): add dnd target for video start frame 2025-08-28 08:23:58 -04:00
Mary Hipp
4d8bcad15b add duration and aspect ratio to video settings 2025-08-28 08:23:58 -04:00
Mary Hipp
5c93e53195 integrating video into gallery - thinking maybe a new category of image would make more senes 2025-08-28 08:23:58 -04:00
Mary Hipp
e9c4e12454 add noop video router 2025-08-28 08:23:58 -04:00
Mary Hipp
295b5a20a8 add video models 2025-08-28 08:23:58 -04:00
Mary Hipp
eff9c7b92f combine nodes that generate and save videos 2025-08-28 08:23:58 -04:00
Mary Hipp
07565d4015 build out adhoc video saving graph 2025-08-28 08:23:58 -04:00
Mary Hipp
94ba840948 push up updates for VideoField 2025-08-28 08:23:58 -04:00
Mary Hipp
bd251f8cce update VideoField 2025-08-28 08:23:58 -04:00
Mary Hipp
97719b0aab split out RunwayVideoOutput from VideoOutput 2025-08-28 08:23:58 -04:00
Mary Hipp
e89266bfe3 rough rough POC of video tab 2025-08-28 08:23:58 -04:00
Mary Hipp
453ef1a220 video_output support 2025-08-28 08:23:58 -04:00
psychedelicious
faf8f0f291 chore: bump version to v6.5.0 2025-08-28 13:32:37 +10:00
psychedelicious
5d36499982 chore: update whatsnew 2025-08-28 13:32:37 +10:00
Linos
151d67a0cc translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2082 of 2082 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-08-28 13:02:16 +10:00
Riccardo Giovanetti
72431ff197 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2053 of 2082 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-28 13:02:16 +10:00
psychedelicious
0de1feed76 chore(ui): lint 2025-08-28 12:59:35 +10:00
psychedelicious
7ffb626dbe feat(ui): add image load errors to logging 2025-08-28 12:59:35 +10:00
psychedelicious
79753289b1 feat(ui): log image failed to load errors at error level 2025-08-28 12:59:35 +10:00
psychedelicious
bac4c05fd9 feat(ui): log "destroying module" at debug level 2025-08-28 12:59:35 +10:00
psychedelicious
8a3b5d2c6f fix(ui): do not cache canvas entities when they have no w/h 2025-08-28 12:59:35 +10:00
psychedelicious
309578c19a fix(ui): progress image gets stuck on viewer when generating on canvas 2025-08-28 12:55:36 +10:00
Mary Hipp
fd58e1d0f2 update copy for API models without w/h controls 2025-08-27 09:24:22 -04:00
psychedelicious
04ffb979ce fix(ui): deny the pull of the square 2025-08-27 08:56:15 -04:00
psychedelicious
35c00d5a83 chore(ui): lint 2025-08-27 08:56:15 -04:00
psychedelicious
c2b49d58f5 fix(ui): gemini 2.5 unsupported gen mode error message 2025-08-27 08:56:15 -04:00
psychedelicious
6ff6b40a35 feat(ui): support unknown output image dimensions on canvas
Gemini 2.5 Flash makes no guarantees about output image sizes. Our
existing logic always rendered staged images on Canvas at the bbox dims
- not the image's physical dimensions. When Gemini returns an image that
doesn't match the bbox, it would get squished.

To rectify this, the canvas staging area renderer is updated to render
its images using their physical dimensions, as opposed to their
configured dimensions (i.e. bbox).

A flag on CanvasObjectImage enables this rendering behaviour.

Then, when saving the image as a layer from staging area, we use the
physical dimensions.

When the bbox and physical dimensions do not match, the bbox is not
touched, so it won't exactly encompass the staged image. No point in
resizing the bbox if the dimensions don't match - the next image could
be a different size, and the sizes might not be valid (it's an external
resource, after all).
2025-08-27 08:56:15 -04:00
psychedelicious
1f1beda567 fix(ui): remove gemini aspect ratio checking in graph builder 2025-08-27 08:56:15 -04:00
psychedelicious
91d62eb242 fix(ui): update ref image type when switching to gemini 2025-08-27 08:56:15 -04:00
psychedelicious
013e02d08b feat(ui): show w/h, scaled bbox settings only when relevant 2025-08-27 08:56:15 -04:00
psychedelicious
115053972c feat(ui): handle api model determination in a clearer way w/ list of base models; use it in dimensions component 2025-08-27 08:56:15 -04:00
psychedelicious
bcab754ac2 docs(ui): add note about reactflow types 2025-08-27 08:56:15 -04:00
psychedelicious
f1a542aca2 docs(ui): add note about extraneous coordiantes in paramsSlice 2025-08-27 08:56:15 -04:00
psychedelicious
0701cc63a1 feat(ui): hide width/height sliders for api models
These models only support aspect ratio inputs; not pixel dimensions
2025-08-27 08:56:15 -04:00
psychedelicious
9337710b45 chore(ui): lint 2025-08-27 08:56:15 -04:00
psychedelicious
592ef5a9ee feat(ui): improved support model handling when switching models
- Disable LoRAs instead of deleting them when base model changes
- Update toast message to indicate that we may have _updated_ a model
(prev just sayed cleared or disabled)
- Do not change ref image models if the new base model doesn't support
them. For example, changing from SDXL to Imagen does not update the ref
image model or alert the user, because Imagen does not support ref
images. Switching from Imagen to FLUX does update the ref image model
and alert the user. Just a bit less noisy.
2025-08-27 08:56:15 -04:00
psychedelicious
5fe39a3ae9 fix(ui): add gemini 2.5 to ref image supporting models 2025-08-27 08:56:15 -04:00
psychedelicious
1888c586ca feat(ui): do not prevent invoking when ref images are added but model does not support ref images 2025-08-27 08:56:15 -04:00
psychedelicious
88922a467e feat(ui): hide ref images UI when selected models does not support ref images 2025-08-27 08:56:15 -04:00
psychedelicious
84115e598c fix(ui): lock height slider when using api model 2025-08-27 08:56:15 -04:00
Mary Hipp
370fc67777 UI support for gemini 2.5 API model 2025-08-27 08:56:15 -04:00
Mary Hipp
fa810e1d02 add gemini 2.5 to base model 2025-08-27 08:56:15 -04:00
Attila Cseh
ec5043aa83 useNodeFieldElementExists turned private 2025-08-26 11:39:16 +10:00
Attila Cseh
9a2a0cef74 node field dnd logic updatedto prevent duplicates 2025-08-26 11:39:16 +10:00
Attila Cseh
c205c1d19e current board removed from options 2025-08-26 11:33:39 +10:00
Attila Cseh
ae1a815453 change board - sorting order of boards alphabetical 2025-08-26 11:33:39 +10:00
psychedelicious
687bc281e5 chore: prep for v6.5.0rc1 (#8479)
## Summary

Bump version

## Related Issues / Discussions

n/a

## QA Instructions

n/a

## Merge Plan

This is already released.

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2025-08-26 11:25:01 +10:00
psychedelicious
567316d753 chore: bump version to v6.5.0rc1 2025-08-25 18:10:18 +10:00
psychedelicious
53ac7c9d2c feat(ui): bbox aspect ratio lock is always inverted by shift 2025-08-25 17:59:20 +10:00
Riccardo Giovanetti
90be2a0cdf translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2050 of 2079 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-25 17:57:54 +10:00
Attila Cseh
c7fb8f69ae code review fixes 2025-08-25 17:53:59 +10:00
Attila Cseh
7fecb8e88b formatting fixed 2025-08-25 17:53:59 +10:00
Attila Cseh
ee6a2a6603 respect direction of selection in Gallery 2025-08-25 17:53:59 +10:00
Attila Cseh
2496ac19c4 remove input field from form 2025-08-25 16:33:09 +10:00
psychedelicious
e34ed199c9 feat(ui): respect aspect ratio when resizing bbox on canvas 2025-08-25 15:30:01 +10:00
psychedelicious
569533ef80 fix(ui): toggle bbox visiblity translation 2025-08-25 14:51:34 +10:00
psychedelicious
dfac73f9f0 fix(ui): disable color picker while middle-mouse panning canvas 2025-08-25 14:47:42 +10:00
psychedelicious
f4219d5db3 chore: uv lock 2025-08-23 14:17:56 +10:00
psychedelicious
04d1958e93 feat(app): vendor in invisible-watermark
Fixes errors like `AttributeError: module 'cv2.ximgproc' has no
attribute 'thinning'` which occur because there is a conflict between
our own `opencv-contrib-python` dependency and the `invisible-watermark`
library's `opencv-python`.
2025-08-23 14:17:56 +10:00
psychedelicious
47d7d93e78 fix(ui): float input precision
Determine the "base" step for floats. If no `multipleOf` is provided,
the "base" step is `undefined`, meaning the float can have any number of
decimal places.

The UI library does its own step constrains though and is rounding to 3
decimal places. Probably need to update the logic in the UI library to
have truly arbitrary precision for float fields.
2025-08-22 13:35:59 +10:00
psychedelicious
0e17950949 fix(ui): race condition when setting hf token and downloading model
I ran into a race condition where I set a HF token and it was valid, but
somehow this error toast still appeared. The conditional feel through to
an assertion that we never expected to get to, which crashed the UI.

Handled the unexpected case gracefully now.
2025-08-22 13:30:38 +10:00
psychedelicious
b0cfdc94b5 feat(ui): do not sample alpha in Canvas color picker
Closes #7897
2025-08-21 21:38:03 +10:00
psychedelicious
bb153b55d3 docs: update quick start 2025-08-21 21:26:09 +10:00
psychedelicious
93ef637d59 docs: update latest release links 2025-08-21 21:26:09 +10:00
Attila Cseh
c5689ca1a7 code review changes 2025-08-21 19:42:38 +10:00
Attila Cseh
008e421ad4 shuffle button on workflows 2025-08-21 19:42:38 +10:00
psychedelicious
28a77ab06c Revert "experiment: add non-lfs-tracked file to lfs-tracked dir"
This reverts commit 4f4b7ddfb0.
2025-08-21 15:49:20 +10:00
psychedelicious
be48d3c12d ci: give workflow perms to label/comment on pr 2025-08-21 15:49:20 +10:00
psychedelicious
518b21a49a experiment: add non-lfs-tracked file to lfs-tracked dir 2025-08-21 15:49:20 +10:00
psychedelicious
68825ca9eb ci: add workflow to catch incorrect usage of git-lfs 2025-08-21 15:49:20 +10:00
psychedelicious
73c5f0b479 chore: bump version to v6.4.0 2025-08-19 12:19:02 +10:00
psychedelicious
7b4e04cd7c git: move test LoRA to LFS 2025-08-19 11:56:59 +10:00
Linos
ae4368fabe translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2073 of 2073 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-08-19 10:28:35 +10:00
psychedelicious
df8e39a9e1 chore: bump version to v6.4.0rc2 2025-08-19 00:01:48 +10:00
psychedelicious
45b43de571 fix(ui): prevent node drag when editing title
Closes #8435
2025-08-18 23:20:28 +10:00
psychedelicious
6d18a72a05 fix(ui): fit to bbox when bbox is not aligned to 64px grid 2025-08-18 23:17:45 +10:00
Kent Keirsey
af58a75e97 Support PEFT Loras with Base_Model.model prefix (#8433)
* Support PEFT Loras with Base_Model.model prefix

* update tests

* ruff

* fix python complaints

* update kes

* format keys

* remove unneeded test
2025-08-18 09:14:46 -04:00
psychedelicious
fd4c3bd27a refactor: estimate working vae memory during encode/decode
- Move the estimation logic to utility functions
- Estimate memory _within_ the encode and decode methods, ensuring we
_always_ estimate working memory when running a VAE
2025-08-18 21:43:14 +10:00
psychedelicious
1f8a60ded2 fix(ui): export NumericalParameterConfig type 2025-08-18 21:38:17 +10:00
psychedelicious
b1b677997d chore: bump version to v6.4.0rc1 2025-08-18 21:34:09 +10:00
psychedelicious
f17b43d736 chore(ui): update whatsnew 2025-08-18 21:34:09 +10:00
psychedelicious
c009a50489 feat(ui): reduce storage persist debounce to 300ms
matches pre-server-backed-state-persistence value
2025-08-18 21:34:09 +10:00
psychedelicious
97a16c455c fix(ui): update board totals when generation completes 2025-08-18 21:34:09 +10:00
psychedelicious
a8a07598c8 chore: ruff 2025-08-18 21:14:00 +10:00
psychedelicious
23206e22e8 tests: skip excessively flaky MPS-specific tests in CI 2025-08-18 21:14:00 +10:00
psychedelicious
f4aba52b90 feat(ui): use flushSync for locateInGallery to ensure panel api calls finish before selecting image 2025-08-18 19:55:06 +10:00
psychedelicious
d17c273939 feat(ui): add locate in gallery button to current image buttons toolbar 2025-08-18 19:55:06 +10:00
psychedelicious
aeb5e7d50a feat(ui): hide locate in gallery from context when unable to actually locate
e.g. when on a tab that doesn't have a gallery, or the image is
intermediate
2025-08-18 19:55:06 +10:00
psychedelicious
580ad30832 feat(ui): use bold icon for locate in gallery 2025-08-18 19:55:06 +10:00
psychedelicious
6390f7d734 fix(ui): more reliable scrollIntoView/"Locate in Gallery"
Three changes needed to make scrollIntoView and "Locate in Gallery" work
reliably.

1. Use setTimeout to work around race condition with scrollIntoView in
gallery.

It was possible to call scrollIntoView before react-virtuoso was ready.
I think react-virtuoso was initialized but hadn't rendered/measured its
items yet, so when we scroll to e.g. index 742, the items have a zero
height, so it doesn't actually scroll down. Then the items render.

Setting a timeout here defers the scroll until after the next event loop
cycle, by which time we expect react-virutoso to be ready.

2. Ensure the scollIntoView effect in gallery triggers any time the
selection is touched by making its dependency the array of selected
images, not just the last selected image name.

The "locate in gallery" functionality works by selecting an image.
There's a reactive effect in the gallery that runs when the last
selected image changes and scrolls it into view.

But if you already have an image selected, selecting it again will not
change the image name bc it is a string primitive. The useEffect ignores
the selection.

So, if you clicked "locate in gallery" on an image that was already
selected, it wouldn't be scrolled into view - even if you had already
scrolled away from it.

To work around this, the effect now uses the whole selection array as
its dependency. Whenever the selection changes, we get a new array,
which triggers the effect.

3. Gallery slice had some checks to avoid creating a new array of
selected image names in state when the selected images didn't change.

For example, if image "abc" was selected, and we selected "abc" again,
instead of creating a new array with the same "abc" image, we bailed
early. IIRC this optimization addressed a rerender issue long ago.

This optimization needs to be removed in order for fix #2 above to work.
We now _want_ a new array whenever selection is set - even if it didn't
actually change.
2025-08-18 19:55:06 +10:00
psychedelicious
5ddbfefb6a feat(ui): add trace logging to scrollIntoView 2025-08-18 19:55:06 +10:00
psychedelicious
bbf5ed7956 fix(ui): use is_intermediate to determine if image is gallery image 2025-08-18 19:55:06 +10:00
Attila Cseh
19cd6eed08 locate in gallery image context menu 2025-08-18 19:55:06 +10:00
Attila Cseh
9c1eb263a8 new entity added above the currently selected one 2025-08-18 18:46:40 +10:00
Attila Cseh
75755189a7 prettier fixes 2025-08-18 18:46:40 +10:00
Attila Cseh
a9ab72d27d new layers created on the top of the existing layers 2025-08-18 18:46:40 +10:00
Attila Cseh
678eb34995 duplicate layer appear above original one 2025-08-18 18:46:40 +10:00
Attila Cseh
ef7050f560 merged layers order retained 2025-08-18 18:46:40 +10:00
Attila Cseh
9787d9de74 prettier fix 2025-08-18 18:30:08 +10:00
Attila Cseh
bb4a50bab2 confirmation before downloading starter bundle 2025-08-18 18:30:08 +10:00
482 changed files with 20071 additions and 7735 deletions

View File

@@ -18,5 +18,6 @@
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _❗Changes to a redux slice have a corresponding migration_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_

30
.github/workflows/lfs-checks.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
# Checks that large files and LFS-tracked files are properly checked in with pointer format.
# Uses https://github.com/ppremk/lfs-warning to detect LFS issues.
name: 'lfs checks'
on:
push:
branches:
- 'main'
pull_request:
types:
- 'ready_for_review'
- 'opened'
- 'synchronize'
merge_group:
workflow_dispatch:
jobs:
lfs-check:
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
# Required to label and comment on the PRs
pull-requests: write
steps:
- name: checkout
uses: actions/checkout@v4
- name: check lfs files
uses: ppremk/lfs-warning@v3.3

View File

@@ -33,30 +33,45 @@ Hardware requirements vary significantly depending on model and image output siz
More detail on system requirements can be found [here](./requirements.md).
## Step 2: Download
## Step 2: Download and Set Up the Launcher
Download the most recent launcher for your operating system:
The Launcher manages your Invoke install. Follow these instructions to download and set up the Launcher.
- [Download for Windows](https://download.invoke.ai/Invoke%20Community%20Edition.exe)
- [Download for macOS](https://download.invoke.ai/Invoke%20Community%20Edition.dmg)
- [Download for Linux](https://download.invoke.ai/Invoke%20Community%20Edition.AppImage)
!!! info "Instructions for each OS"
## Step 3: Install or Update
=== "Windows"
Run the launcher you just downloaded, click **Install** and follow the instructions to get set up.
- [Download for Windows](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition.Setup.latest.exe)
- Run the `EXE` to install the Launcher and start it.
- A desktop shortcut will be created; use this to run the Launcher in the future.
- You can delete the `EXE` file you downloaded.
=== "macOS"
- [Download for macOS](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest-arm64.dmg)
- Open the `DMG` and drag the app into `Applications`.
- Run the Launcher using its entry in `Applications`.
- You can delete the `DMG` file you downloaded.
=== "Linux"
- [Download for Linux](https://github.com/invoke-ai/launcher/releases/latest/download/Invoke.Community.Edition-latest.AppImage)
- You may need to edit the `AppImage` file properties and make it executable.
- Optionally move the file to a location that does not require admin privileges and add a desktop shortcut for it.
- Run the Launcher by double-clicking the `AppImage` or the shortcut you made.
## Step 3: Install Invoke
Run the Launcher you just set up if you haven't already. Click **Install** and follow the instructions to install (or update) Invoke.
If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation.
!!! warning "Problem running the launcher on macOS"
!!! tip "Updating"
macOS may not allow you to run the launcher. We are working to resolve this by signing the launcher executable. Until that is done, you can manually flag the launcher as safe:
The Launcher will check for updates for itself _and_ Invoke.
- Open the **Invoke Community Edition.dmg** file.
- Drag the launcher to **Applications**.
- Open a terminal.
- Run `xattr -d 'com.apple.quarantine' /Applications/Invoke\ Community\ Edition.app`.
You should now be able to run the launcher.
- When the Launcher detects an update is available for itself, you'll get a small popup window. Click through this and the Launcher will update itself.
- When the Launcher detects an update for Invoke, you'll see a small green alert in the Launcher. Click that and follow the instructions to update Invoke.
## Step 4: Launch

View File

@@ -0,0 +1,39 @@
from fastapi import Body, HTTPException
from fastapi.routing import APIRouter
from invokeai.app.services.videos_common import AddVideosToBoardResult, RemoveVideosFromBoardResult
board_videos_router = APIRouter(prefix="/v1/board_videos", tags=["boards"])
@board_videos_router.post(
"/batch",
operation_id="add_videos_to_board",
responses={
201: {"description": "Videos were added to board successfully"},
},
status_code=201,
response_model=AddVideosToBoardResult,
)
async def add_videos_to_board(
board_id: str = Body(description="The id of the board to add to"),
video_ids: list[str] = Body(description="The ids of the videos to add", embed=True),
) -> AddVideosToBoardResult:
"""Adds a list of videos to a board"""
raise HTTPException(status_code=501, detail="Not implemented")
@board_videos_router.post(
"/batch/delete",
operation_id="remove_videos_from_board",
responses={
201: {"description": "Videos were removed from board successfully"},
},
status_code=201,
response_model=RemoveVideosFromBoardResult,
)
async def remove_videos_from_board(
video_ids: list[str] = Body(description="The ids of the videos to remove", embed=True),
) -> RemoveVideosFromBoardResult:
"""Removes a list of videos from their board, if they had one"""
raise HTTPException(status_code=501, detail="Not implemented")

View File

@@ -7,7 +7,6 @@ from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus
from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
Batch,
BatchStatus,
CancelAllExceptCurrentResult,
@@ -18,6 +17,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
DeleteByDestinationResult,
EnqueueBatchResult,
FieldIdentifier,
ItemIdsResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
@@ -25,7 +25,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
SessionQueueItemNotFoundError,
SessionQueueStatus,
)
from invokeai.app.services.shared.pagination import CursorPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"])
@@ -68,36 +68,6 @@ async def enqueue_batch(
raise HTTPException(status_code=500, detail=f"Unexpected error while enqueuing batch: {e}")
@session_queue_router.get(
"/{queue_id}/list",
operation_id="list_queue_items",
responses={
200: {"model": CursorPaginatedResults[SessionQueueItem]},
},
)
async def list_queue_items(
queue_id: str = Path(description="The queue id to perform this operation on"),
limit: int = Query(default=50, description="The number of items to fetch"),
status: Optional[QUEUE_ITEM_STATUS] = Query(default=None, description="The status of items to fetch"),
cursor: Optional[int] = Query(default=None, description="The pagination cursor"),
priority: int = Query(default=0, description="The pagination cursor priority"),
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
) -> CursorPaginatedResults[SessionQueueItem]:
"""Gets cursor-paginated queue items"""
try:
return ApiDependencies.invoker.services.session_queue.list_queue_items(
queue_id=queue_id,
limit=limit,
status=status,
cursor=cursor,
priority=priority,
destination=destination,
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all items: {e}")
@session_queue_router.get(
"/{queue_id}/list_all",
operation_id="list_all_queue_items",
@@ -119,6 +89,56 @@ async def list_all_queue_items(
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue items: {e}")
@session_queue_router.get(
"/{queue_id}/item_ids",
operation_id="get_queue_item_ids",
responses={
200: {"model": ItemIdsResult},
},
)
async def get_queue_item_ids(
queue_id: str = Path(description="The queue id to perform this operation on"),
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
) -> ItemIdsResult:
"""Gets all queue item ids that match the given parameters"""
try:
return ApiDependencies.invoker.services.session_queue.get_queue_item_ids(queue_id=queue_id, order_dir=order_dir)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue item ids: {e}")
@session_queue_router.post(
"/{queue_id}/items_by_ids",
operation_id="get_queue_items_by_item_ids",
responses={200: {"model": list[SessionQueueItem]}},
)
async def get_queue_items_by_item_ids(
queue_id: str = Path(description="The queue id to perform this operation on"),
item_ids: list[int] = Body(
embed=True, description="Object containing list of queue item ids to fetch queue items for"
),
) -> list[SessionQueueItem]:
"""Gets queue items for the specified queue item ids. Maintains order of item ids."""
try:
session_queue_service = ApiDependencies.invoker.services.session_queue
# Fetch queue items preserving the order of requested item ids
queue_items: list[SessionQueueItem] = []
for item_id in item_ids:
try:
queue_item = session_queue_service.get_queue_item(item_id=item_id)
if queue_item.queue_id != queue_id: # Auth protection for items from other queues
continue
queue_items.append(queue_item)
except Exception:
# Skip missing queue items - they may have been deleted between item id fetch and queue item fetch
continue
return queue_items
except Exception:
raise HTTPException(status_code=500, detail="Failed to get queue items")
@session_queue_router.put(
"/{queue_id}/processor/resume",
operation_id="resume",
@@ -354,7 +374,10 @@ async def get_queue_item(
) -> SessionQueueItem:
"""Gets a queue item"""
try:
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
queue_item = ApiDependencies.invoker.services.session_queue.get_queue_item(item_id=item_id)
if queue_item.queue_id != queue_id:
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
return queue_item
except SessionQueueItemNotFoundError:
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
except Exception as e:

View File

@@ -0,0 +1,119 @@
from typing import Optional
from fastapi import Body, HTTPException, Path, Query
from fastapi.routing import APIRouter
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.services.videos_common import (
DeleteVideosResult,
StarredVideosResult,
UnstarredVideosResult,
VideoDTO,
VideoIdsResult,
VideoRecordChanges,
)
videos_router = APIRouter(prefix="/v1/videos", tags=["videos"])
@videos_router.patch(
"/i/{video_id}",
operation_id="update_video",
response_model=VideoDTO,
)
async def update_video(
video_id: str = Path(description="The id of the video to update"),
video_changes: VideoRecordChanges = Body(description="The changes to apply to the video"),
) -> VideoDTO:
"""Updates a video"""
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.get(
"/i/{video_id}",
operation_id="get_video_dto",
response_model=VideoDTO,
)
async def get_video_dto(
video_id: str = Path(description="The id of the video to get"),
) -> VideoDTO:
"""Gets a video's DTO"""
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.post("/delete", operation_id="delete_videos_from_list", response_model=DeleteVideosResult)
async def delete_videos_from_list(
video_ids: list[str] = Body(description="The list of ids of videos to delete", embed=True),
) -> DeleteVideosResult:
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.post("/star", operation_id="star_videos_in_list", response_model=StarredVideosResult)
async def star_videos_in_list(
video_ids: list[str] = Body(description="The list of ids of videos to star", embed=True),
) -> StarredVideosResult:
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.post("/unstar", operation_id="unstar_videos_in_list", response_model=UnstarredVideosResult)
async def unstar_videos_in_list(
video_ids: list[str] = Body(description="The list of ids of videos to unstar", embed=True),
) -> UnstarredVideosResult:
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.delete("/uncategorized", operation_id="delete_uncategorized_videos", response_model=DeleteVideosResult)
async def delete_uncategorized_videos() -> DeleteVideosResult:
"""Deletes all videos that are uncategorized"""
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.get("/", operation_id="list_video_dtos", response_model=OffsetPaginatedResults[VideoDTO])
async def list_video_dtos(
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
board_id: Optional[str] = Query(
default=None,
description="The board id to filter by. Use 'none' to find videos without a board.",
),
offset: int = Query(default=0, description="The page offset"),
limit: int = Query(default=10, description="The number of videos per page"),
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
search_term: Optional[str] = Query(default=None, description="The term to search for"),
) -> OffsetPaginatedResults[VideoDTO]:
"""Lists video DTOs"""
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.get("/ids", operation_id="get_video_ids")
async def get_video_ids(
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate videos."),
board_id: Optional[str] = Query(
default=None,
description="The board id to filter by. Use 'none' to find videos without a board.",
),
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
starred_first: bool = Query(default=True, description="Whether to sort by starred videos first"),
search_term: Optional[str] = Query(default=None, description="The term to search for"),
) -> VideoIdsResult:
"""Gets ordered list of video ids with metadata for optimistic updates"""
raise HTTPException(status_code=501, detail="Not implemented")
@videos_router.post(
"/videos_by_ids",
operation_id="get_videos_by_ids",
responses={200: {"model": list[VideoDTO]}},
)
async def get_videos_by_ids(
video_ids: list[str] = Body(embed=True, description="Object containing list of video ids to fetch DTOs for"),
) -> list[VideoDTO]:
"""Gets video DTOs for the specified video ids. Maintains order of input ids."""
raise HTTPException(status_code=501, detail="Not implemented")

View File

@@ -18,6 +18,7 @@ from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
from invokeai.app.api.routers import (
app_info,
board_images,
board_videos,
boards,
client_state,
download_queue,
@@ -27,6 +28,7 @@ from invokeai.app.api.routers import (
session_queue,
style_presets,
utilities,
videos,
workflows,
)
from invokeai.app.api.sockets import SocketIO
@@ -125,8 +127,10 @@ app.include_router(utilities.utilities_router, prefix="/api")
app.include_router(model_manager.model_manager_router, prefix="/api")
app.include_router(download_queue.download_queue_router, prefix="/api")
app.include_router(images.images_router, prefix="/api")
app.include_router(videos.videos_router, prefix="/api")
app.include_router(boards.boards_router, prefix="/api")
app.include_router(board_images.board_images_router, prefix="/api")
app.include_router(board_videos.board_videos_router, prefix="/api")
app.include_router(model_relationships.model_relationships_router, prefix="/api")
app.include_router(app_info.app_router, prefix="/api")
app.include_router(session_queue.session_queue_router, prefix="/api")

View File

@@ -36,6 +36,9 @@ from pydantic_core import PydanticUndefined
from invokeai.app.invocations.fields import (
FieldKind,
Input,
InputFieldJSONSchemaExtra,
UIType,
migrate_model_ui_type,
)
from invokeai.app.services.config.config_default import get_config
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -256,7 +259,9 @@ class BaseInvocation(ABC, BaseModel):
is_intermediate: bool = Field(
default=False,
description="Whether or not this is an intermediate invocation.",
json_schema_extra={"ui_type": "IsIntermediate", "field_kind": FieldKind.NodeAttribute},
json_schema_extra=InputFieldJSONSchemaExtra(
input=Input.Direct, field_kind=FieldKind.NodeAttribute, ui_type=UIType._IsIntermediate
).model_dump(exclude_none=True),
)
use_cache: bool = Field(
default=True,
@@ -445,6 +450,15 @@ with warnings.catch_warnings():
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
def is_enum_member(value: Any, enum_class: type[Enum]) -> bool:
"""Checks if a value is a member of an enum class."""
try:
enum_class(value)
return True
except ValueError:
return False
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
"""
Validates the fields of an invocation or invocation output:
@@ -456,51 +470,99 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
"""
for name, field in model_fields.items():
if name in RESERVED_PYDANTIC_FIELD_NAMES:
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved by pydantic)')
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved by pydantic)")
if not field.annotation:
raise InvalidFieldError(f'Invalid field type "{name}" on "{model_type}" (missing annotation)')
raise InvalidFieldError(f"{model_type}.{name}: Invalid field type (missing annotation)")
if not isinstance(field.json_schema_extra, dict):
raise InvalidFieldError(
f'Invalid field definition for "{name}" on "{model_type}" (missing json_schema_extra dict)'
)
raise InvalidFieldError(f"{model_type}.{name}: Invalid field definition (missing json_schema_extra dict)")
field_kind = field.json_schema_extra.get("field_kind", None)
# must have a field_kind
if not isinstance(field_kind, FieldKind):
if not is_enum_member(field_kind, FieldKind):
raise InvalidFieldError(
f'Invalid field definition for "{name}" on "{model_type}" (maybe it\'s not an InputField or OutputField?)'
f"{model_type}.{name}: Invalid field definition for (maybe it's not an InputField or OutputField?)"
)
if field_kind is FieldKind.Input and (
if field_kind == FieldKind.Input.value and (
name in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES or name in RESERVED_INPUT_FIELD_NAMES
):
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved input field name)')
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved input field name)")
if field_kind is FieldKind.Output and name in RESERVED_OUTPUT_FIELD_NAMES:
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved output field name)')
if field_kind == FieldKind.Output.value and name in RESERVED_OUTPUT_FIELD_NAMES:
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (reserved output field name)")
if (field_kind is FieldKind.Internal) and name not in RESERVED_INPUT_FIELD_NAMES:
raise InvalidFieldError(
f'Invalid field name "{name}" on "{model_type}" (internal field without reserved name)'
)
if field_kind == FieldKind.Internal.value and name not in RESERVED_INPUT_FIELD_NAMES:
raise InvalidFieldError(f"{model_type}.{name}: Invalid field name (internal field without reserved name)")
# node attribute fields *must* be in the reserved list
if (
field_kind is FieldKind.NodeAttribute
field_kind == FieldKind.NodeAttribute.value
and name not in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES
and name not in RESERVED_OUTPUT_FIELD_NAMES
):
raise InvalidFieldError(
f'Invalid field name "{name}" on "{model_type}" (node attribute field without reserved name)'
f"{model_type}.{name}: Invalid field name (node attribute field without reserved name)"
)
ui_type = field.json_schema_extra.get("ui_type", None)
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
field.json_schema_extra.pop("ui_type")
ui_model_base = field.json_schema_extra.get("ui_model_base", None)
ui_model_type = field.json_schema_extra.get("ui_model_type", None)
ui_model_variant = field.json_schema_extra.get("ui_model_variant", None)
ui_model_format = field.json_schema_extra.get("ui_model_format", None)
if ui_type is not None:
# There are 3 cases where we may need to take action:
#
# 1. The ui_type is a migratable, deprecated value. For example, ui_type=UIType.MainModel value is
# deprecated and should be migrated to:
# - ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]
# - ui_model_type=[ModelType.Main]
#
# 2. ui_type was set in conjunction with any of the new ui_model_[base|type|variant|format] fields, which
# is not allowed (they are mutually exclusive). In this case, we ignore ui_type and log a warning.
#
# 3. ui_type is a deprecated value that is not migratable. For example, ui_type=UIType.Image is deprecated;
# Image fields are now automatically detected based on the field's type annotation. In this case, we
# ignore ui_type and log a warning.
#
# The cases must be checked in this order to ensure proper handling.
# Easier to work with as an enum
ui_type = UIType(ui_type)
# The enum member values are not always the same as their names - we want to log the name so the user can
# easily review their code and see where the deprecated enum member is used.
human_readable_name = f"UIType.{ui_type.name}"
# Case 1: migratable deprecated value
did_migrate = migrate_model_ui_type(ui_type, field.json_schema_extra)
if did_migrate:
logger.warning(
f'{model_type}.{name}: Migrated deprecated "ui_type" "{human_readable_name}" to new ui_model_[base|type|variant|format] fields'
)
field.json_schema_extra.pop("ui_type")
# Case 2: mutually exclusive with new fields
elif (
ui_model_base is not None
or ui_model_type is not None
or ui_model_variant is not None
or ui_model_format is not None
):
logger.warning(
f'{model_type}.{name}: "ui_type" is mutually exclusive with "ui_model_[base|type|format|variant]", ignoring "ui_type"'
)
field.json_schema_extra.pop("ui_type")
# Case 3: deprecated value that is not migratable
elif ui_type.startswith("DEPRECATED_"):
logger.warning(f'{model_type}.{name}: Deprecated "ui_type" "{human_readable_name}", ignoring')
field.json_schema_extra.pop("ui_type")
return None

View File

@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
# TODO(ryand): This is effectively a copy of SD3ImageToLatentsInvocation and a subset of ImageToLatentsInvocation. We
# should refactor to avoid this duplication.
@@ -36,18 +37,12 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
image: ImageField = InputField(description="The image to encode.")
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
def _estimate_working_memory(self, image_tensor: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# Encode operations use approximately 50% of the memory required for decode operations
h = image_tensor.shape[-2]
w = image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 1100 # 50% of decode scaling constant (2200)
working_memory = h * w * element_size * scaling_constant
return int(working_memory)
@staticmethod
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor, estimated_working_memory: int) -> torch.Tensor:
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
assert isinstance(vae_info.model, AutoencoderKL)
estimated_working_memory = estimate_vae_working_memory_cogview4(
operation="encode", image_tensor=image_tensor, vae=vae_info.model
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoencoderKL)
@@ -74,10 +69,7 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, AutoencoderKL)
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
latents = self.vae_encode(
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
)
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")
name = context.tensors.save(tensor=latents)

View File

@@ -6,7 +6,6 @@ from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
# TODO(ryand): This is effectively a copy of SD3LatentsToImageInvocation and a subset of LatentsToImageInvocation. We
# should refactor to avoid this duplication.
@@ -39,22 +39,15 @@ class CogView4LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
return int(working_memory)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL))
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
estimated_working_memory = estimate_vae_working_memory_cogview4(
operation="decode", image_tensor=latents, vae=vae_info.model
)
with (
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),

View File

@@ -5,7 +5,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import (
GlmEncoderField,
ModelIdentifierField,
@@ -14,6 +14,7 @@ from invokeai.app.invocations.model import (
)
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation_output("cogview4_model_loader_output")
@@ -38,8 +39,9 @@ class CogView4ModelLoaderInvocation(BaseInvocation):
model: ModelIdentifierField = InputField(
description=FieldDescriptions.cogview4_model,
ui_type=UIType.CogView4MainModel,
input=Input.Direct,
ui_model_base=BaseModelType.CogView4,
ui_model_type=ModelType.Main,
)
def invoke(self, context: InvocationContext) -> CogView4ModelLoaderOutput:

View File

@@ -16,7 +16,6 @@ from invokeai.app.invocations.fields import (
ImageField,
InputField,
OutputField,
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageOutput
@@ -28,6 +27,7 @@ from invokeai.app.util.controlnet_utils import (
heuristic_resize_fast,
)
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
class ControlField(BaseModel):
@@ -63,13 +63,17 @@ class ControlOutput(BaseInvocationOutput):
control: ControlField = OutputField(description=FieldDescriptions.control)
@invocation("controlnet", title="ControlNet - SD1.5, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3")
@invocation(
"controlnet", title="ControlNet - SD1.5, SD2, SDXL", tags=["controlnet"], category="controlnet", version="1.1.3"
)
class ControlNetInvocation(BaseInvocation):
"""Collects ControlNet info to pass to other nodes"""
image: ImageField = InputField(description="The control image")
control_model: ModelIdentifierField = InputField(
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
description=FieldDescriptions.controlnet_model,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.ControlNet,
)
control_weight: Union[float, List[float]] = InputField(
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"

View File

@@ -1,11 +1,19 @@
from enum import Enum
from typing import Any, Callable, Optional, Tuple
from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter, model_validator
from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter
from pydantic.fields import _Unset
from pydantic_core import PydanticUndefined
from invokeai.app.util.metaenum import MetaEnum
from invokeai.backend.image_util.segment_anything.shared import BoundingBox
from invokeai.backend.model_manager.taxonomy import (
BaseModelType,
ClipVariantType,
ModelFormat,
ModelType,
ModelVariantType,
)
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
@@ -38,35 +46,6 @@ class UIType(str, Enum, metaclass=MetaEnum):
used, and the type will be ignored. They are included here for backwards compatibility.
"""
# region Model Field Types
MainModel = "MainModelField"
CogView4MainModel = "CogView4MainModelField"
FluxMainModel = "FluxMainModelField"
SD3MainModel = "SD3MainModelField"
SDXLMainModel = "SDXLMainModelField"
SDXLRefinerModel = "SDXLRefinerModelField"
ONNXModel = "ONNXModelField"
VAEModel = "VAEModelField"
FluxVAEModel = "FluxVAEModelField"
LoRAModel = "LoRAModelField"
ControlNetModel = "ControlNetModelField"
IPAdapterModel = "IPAdapterModelField"
T2IAdapterModel = "T2IAdapterModelField"
T5EncoderModel = "T5EncoderModelField"
CLIPEmbedModel = "CLIPEmbedModelField"
CLIPLEmbedModel = "CLIPLEmbedModelField"
CLIPGEmbedModel = "CLIPGEmbedModelField"
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
ControlLoRAModel = "ControlLoRAModelField"
SigLipModel = "SigLipModelField"
FluxReduxModel = "FluxReduxModelField"
LlavaOnevisionModel = "LLaVAModelField"
Imagen3Model = "Imagen3ModelField"
Imagen4Model = "Imagen4ModelField"
ChatGPT4oModel = "ChatGPT4oModelField"
FluxKontextModel = "FluxKontextModelField"
# endregion
# region Misc Field Types
Scheduler = "SchedulerField"
Any = "AnyField"
@@ -75,6 +54,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
# region Internal Field Types
_Collection = "CollectionField"
_CollectionItem = "CollectionItemField"
_IsIntermediate = "IsIntermediate"
# endregion
# region DEPRECATED
@@ -112,13 +92,44 @@ class UIType(str, Enum, metaclass=MetaEnum):
CollectionItem = "DEPRECATED_CollectionItem"
Enum = "DEPRECATED_Enum"
WorkflowField = "DEPRECATED_WorkflowField"
IsIntermediate = "DEPRECATED_IsIntermediate"
BoardField = "DEPRECATED_BoardField"
MetadataItem = "DEPRECATED_MetadataItem"
MetadataItemCollection = "DEPRECATED_MetadataItemCollection"
MetadataItemPolymorphic = "DEPRECATED_MetadataItemPolymorphic"
MetadataDict = "DEPRECATED_MetadataDict"
# Deprecated Model Field Types - use ui_model_[base|type|variant|format] instead
MainModel = "DEPRECATED_MainModelField"
CogView4MainModel = "DEPRECATED_CogView4MainModelField"
FluxMainModel = "DEPRECATED_FluxMainModelField"
SD3MainModel = "DEPRECATED_SD3MainModelField"
SDXLMainModel = "DEPRECATED_SDXLMainModelField"
SDXLRefinerModel = "DEPRECATED_SDXLRefinerModelField"
ONNXModel = "DEPRECATED_ONNXModelField"
VAEModel = "DEPRECATED_VAEModelField"
FluxVAEModel = "DEPRECATED_FluxVAEModelField"
LoRAModel = "DEPRECATED_LoRAModelField"
ControlNetModel = "DEPRECATED_ControlNetModelField"
IPAdapterModel = "DEPRECATED_IPAdapterModelField"
T2IAdapterModel = "DEPRECATED_T2IAdapterModelField"
T5EncoderModel = "DEPRECATED_T5EncoderModelField"
CLIPEmbedModel = "DEPRECATED_CLIPEmbedModelField"
CLIPLEmbedModel = "DEPRECATED_CLIPLEmbedModelField"
CLIPGEmbedModel = "DEPRECATED_CLIPGEmbedModelField"
SpandrelImageToImageModel = "DEPRECATED_SpandrelImageToImageModelField"
ControlLoRAModel = "DEPRECATED_ControlLoRAModelField"
SigLipModel = "DEPRECATED_SigLipModelField"
FluxReduxModel = "DEPRECATED_FluxReduxModelField"
LlavaOnevisionModel = "DEPRECATED_LLaVAModelField"
Imagen3Model = "DEPRECATED_Imagen3ModelField"
Imagen4Model = "DEPRECATED_Imagen4ModelField"
ChatGPT4oModel = "DEPRECATED_ChatGPT4oModelField"
Gemini2_5Model = "DEPRECATED_Gemini2_5ModelField"
FluxKontextModel = "DEPRECATED_FluxKontextModelField"
Veo3Model = "DEPRECATED_Veo3ModelField"
RunwayModel = "DEPRECATED_RunwayModelField"
# endregion
class UIComponent(str, Enum, metaclass=MetaEnum):
"""
@@ -224,6 +235,12 @@ class ImageField(BaseModel):
image_name: str = Field(description="The name of the image")
class VideoField(BaseModel):
"""A video primitive field"""
video_id: str = Field(description="The id of the video")
class BoardField(BaseModel):
"""A board primitive field"""
@@ -321,14 +338,9 @@ class ConditioningField(BaseModel):
)
class BoundingBoxField(BaseModel):
class BoundingBoxField(BoundingBox):
"""A bounding box primitive value."""
x_min: int = Field(ge=0, description="The minimum x-coordinate of the bounding box (inclusive).")
x_max: int = Field(ge=0, description="The maximum x-coordinate of the bounding box (exclusive).")
y_min: int = Field(ge=0, description="The minimum y-coordinate of the bounding box (inclusive).")
y_max: int = Field(ge=0, description="The maximum y-coordinate of the bounding box (exclusive).")
score: Optional[float] = Field(
default=None,
ge=0.0,
@@ -337,21 +349,6 @@ class BoundingBoxField(BaseModel):
"when the bounding box was produced by a detector and has an associated confidence score.",
)
@model_validator(mode="after")
def check_coords(self):
if self.x_min > self.x_max:
raise ValueError(f"x_min ({self.x_min}) is greater than x_max ({self.x_max}).")
if self.y_min > self.y_max:
raise ValueError(f"y_min ({self.y_min}) is greater than y_max ({self.y_max}).")
return self
def tuple(self) -> Tuple[int, int, int, int]:
"""
Returns the bounding box as a tuple suitable for use with PIL's `Image.crop()` method.
This method returns a tuple of the form (left, upper, right, lower) == (x_min, y_min, x_max, y_max).
"""
return (self.x_min, self.y_min, self.x_max, self.y_max)
class MetadataField(RootModel[dict[str, Any]]):
"""
@@ -418,10 +415,15 @@ class InputFieldJSONSchemaExtra(BaseModel):
ui_component: Optional[UIComponent] = None
ui_order: Optional[int] = None
ui_choice_labels: Optional[dict[str, str]] = None
ui_model_base: Optional[list[BaseModelType]] = None
ui_model_type: Optional[list[ModelType]] = None
ui_model_variant: Optional[list[ClipVariantType | ModelVariantType]] = None
ui_model_format: Optional[list[ModelFormat]] = None
model_config = ConfigDict(
validate_assignment=True,
json_schema_serialization_defaults_required=True,
use_enum_values=True,
)
@@ -474,16 +476,121 @@ class OutputFieldJSONSchemaExtra(BaseModel):
"""
field_kind: FieldKind
ui_hidden: bool
ui_type: Optional[UIType]
ui_order: Optional[int]
ui_hidden: bool = False
ui_order: Optional[int] = None
ui_type: Optional[UIType] = None
model_config = ConfigDict(
validate_assignment=True,
json_schema_serialization_defaults_required=True,
use_enum_values=True,
)
def migrate_model_ui_type(ui_type: UIType | str, json_schema_extra: dict[str, Any]) -> bool:
"""Migrate deprecated model-specifier ui_type values to new-style ui_model_[base|type|variant|format] in json_schema_extra."""
if not isinstance(ui_type, UIType):
ui_type = UIType(ui_type)
ui_model_type: list[ModelType] | None = None
ui_model_base: list[BaseModelType] | None = None
ui_model_format: list[ModelFormat] | None = None
ui_model_variant: list[ClipVariantType | ModelVariantType] | None = None
match ui_type:
case UIType.MainModel:
ui_model_base = [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]
ui_model_type = [ModelType.Main]
case UIType.CogView4MainModel:
ui_model_base = [BaseModelType.CogView4]
ui_model_type = [ModelType.Main]
case UIType.FluxMainModel:
ui_model_base = [BaseModelType.Flux]
ui_model_type = [ModelType.Main]
case UIType.SD3MainModel:
ui_model_base = [BaseModelType.StableDiffusion3]
ui_model_type = [ModelType.Main]
case UIType.SDXLMainModel:
ui_model_base = [BaseModelType.StableDiffusionXL]
ui_model_type = [ModelType.Main]
case UIType.SDXLRefinerModel:
ui_model_base = [BaseModelType.StableDiffusionXLRefiner]
ui_model_type = [ModelType.Main]
case UIType.VAEModel:
ui_model_type = [ModelType.VAE]
case UIType.FluxVAEModel:
ui_model_base = [BaseModelType.Flux]
ui_model_type = [ModelType.VAE]
case UIType.LoRAModel:
ui_model_type = [ModelType.LoRA]
case UIType.ControlNetModel:
ui_model_type = [ModelType.ControlNet]
case UIType.IPAdapterModel:
ui_model_type = [ModelType.IPAdapter]
case UIType.T2IAdapterModel:
ui_model_type = [ModelType.T2IAdapter]
case UIType.T5EncoderModel:
ui_model_type = [ModelType.T5Encoder]
case UIType.CLIPEmbedModel:
ui_model_type = [ModelType.CLIPEmbed]
case UIType.CLIPLEmbedModel:
ui_model_type = [ModelType.CLIPEmbed]
ui_model_variant = [ClipVariantType.L]
case UIType.CLIPGEmbedModel:
ui_model_type = [ModelType.CLIPEmbed]
ui_model_variant = [ClipVariantType.G]
case UIType.SpandrelImageToImageModel:
ui_model_type = [ModelType.SpandrelImageToImage]
case UIType.ControlLoRAModel:
ui_model_type = [ModelType.ControlLoRa]
case UIType.SigLipModel:
ui_model_type = [ModelType.SigLIP]
case UIType.FluxReduxModel:
ui_model_type = [ModelType.FluxRedux]
case UIType.LlavaOnevisionModel:
ui_model_type = [ModelType.LlavaOnevision]
case UIType.Imagen3Model:
ui_model_base = [BaseModelType.Imagen3]
ui_model_type = [ModelType.Main]
case UIType.Imagen4Model:
ui_model_base = [BaseModelType.Imagen4]
ui_model_type = [ModelType.Main]
case UIType.ChatGPT4oModel:
ui_model_base = [BaseModelType.ChatGPT4o]
ui_model_type = [ModelType.Main]
case UIType.Gemini2_5Model:
ui_model_base = [BaseModelType.Gemini2_5]
ui_model_type = [ModelType.Main]
case UIType.FluxKontextModel:
ui_model_base = [BaseModelType.FluxKontext]
ui_model_type = [ModelType.Main]
case UIType.Veo3Model:
ui_model_base = [BaseModelType.Veo3]
ui_model_type = [ModelType.Video]
case UIType.RunwayModel:
ui_model_base = [BaseModelType.Runway]
ui_model_type = [ModelType.Video]
case _:
pass
did_migrate = False
if ui_model_type is not None:
json_schema_extra["ui_model_type"] = [m.value for m in ui_model_type]
did_migrate = True
if ui_model_base is not None:
json_schema_extra["ui_model_base"] = [m.value for m in ui_model_base]
did_migrate = True
if ui_model_format is not None:
json_schema_extra["ui_model_format"] = [m.value for m in ui_model_format]
did_migrate = True
if ui_model_variant is not None:
json_schema_extra["ui_model_variant"] = [m.value for m in ui_model_variant]
did_migrate = True
return did_migrate
def InputField(
# copied from pydantic's Field
# TODO: Can we support default_factory?
@@ -510,35 +617,63 @@ def InputField(
ui_hidden: Optional[bool] = None,
ui_order: Optional[int] = None,
ui_choice_labels: Optional[dict[str, str]] = None,
ui_model_base: Optional[BaseModelType | list[BaseModelType]] = None,
ui_model_type: Optional[ModelType | list[ModelType]] = None,
ui_model_variant: Optional[ClipVariantType | ModelVariantType | list[ClipVariantType | ModelVariantType]] = None,
ui_model_format: Optional[ModelFormat | list[ModelFormat]] = None,
) -> Any:
"""
Creates an input field for an invocation.
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field) \
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field)
that adds a few extra parameters to support graph execution and the node editor UI.
:param Input input: [Input.Any] The kind of input this field requires. \
`Input.Direct` means a value must be provided on instantiation. \
`Input.Connection` means the value must be provided by a connection. \
`Input.Any` means either will do.
If the field is a `ModelIdentifierField`, use the `ui_model_[base|type|variant|format]` args to filter the model list
in the Workflow Editor. Otherwise, use `ui_type` to provide extra type hints for the UI.
:param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \
In some situations, the field's type is not enough to infer the correct UI type. \
For example, model selection fields should render a dropdown UI component to select a model. \
Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
Don't use both `ui_type` and `ui_model_[base|type|variant|format]` - if both are provided, a warning will be
logged and `ui_type` will be ignored.
:param UIComponent ui_component: [None] Optionally specifies a specific component to use in the UI. \
The UI will always render a suitable component, but sometimes you want something different than the default. \
For example, a `string` field will default to a single-line input, but you may want a multi-line textarea instead. \
For this case, you could provide `UIComponent.Textarea`.
Args:
input: The kind of input this field requires.
- `Input.Direct` means a value must be provided on instantiation.
- `Input.Connection` means the value must be provided by a connection.
- `Input.Any` means either will do.
:param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI.
ui_type: Optionally provides an extra type hint for the UI. In some situations, the field's type is not enough
to infer the correct UI type. For example, Scheduler fields are enums, but we want to render a special scheduler
dropdown in the UI. Use `UIType.Scheduler` to indicate this.
:param int ui_order: [None] Specifies the order in which this field should be rendered in the UI.
ui_component: Optionally specifies a specific component to use in the UI. The UI will always render a suitable
component, but sometimes you want something different than the default. For example, a `string` field will
default to a single-line input, but you may want a multi-line textarea instead. In this case, you could use
`UIComponent.Textarea`.
:param dict[str, str] ui_choice_labels: [None] Specifies the labels to use for the choices in an enum field.
ui_hidden: Specifies whether or not this field should be hidden in the UI.
ui_order: Specifies the order in which this field should be rendered in the UI. If omitted, the field will be
rendered after all fields with an explicit order, in the order they are defined in the Invocation class.
ui_model_base: Specifies the base model architectures to filter the model list by in the Workflow Editor. For
example, `ui_model_base=BaseModelType.StableDiffusionXL` will show only SDXL architecture models. This arg is
only valid if this Input field is annotated as a `ModelIdentifierField`.
ui_model_type: Specifies the model type(s) to filter the model list by in the Workflow Editor. For example,
`ui_model_type=ModelType.VAE` will show only VAE models. This arg is only valid if this Input field is
annotated as a `ModelIdentifierField`.
ui_model_variant: Specifies the model variant(s) to filter the model list by in the Workflow Editor. For example,
`ui_model_variant=ModelVariantType.Inpainting` will show only inpainting models. This arg is only valid if this
Input field is annotated as a `ModelIdentifierField`.
ui_model_format: Specifies the model format(s) to filter the model list by in the Workflow Editor. For example,
`ui_model_format=ModelFormat.Diffusers` will show only models in the diffusers format. This arg is only valid
if this Input field is annotated as a `ModelIdentifierField`.
ui_choice_labels: Specifies the labels to use for the choices in an enum field. If omitted, the enum values
will be used. This arg is only valid if the field is annotated with as a `Literal`. For example,
`Literal["choice1", "choice2", "choice3"]` with `ui_choice_labels={"choice1": "Choice 1", "choice2": "Choice 2",
"choice3": "Choice 3"}` will render a dropdown with the labels "Choice 1", "Choice 2" and "Choice 3".
"""
json_schema_extra_ = InputFieldJSONSchemaExtra(
@@ -546,8 +681,6 @@ def InputField(
field_kind=FieldKind.Input,
)
if ui_type is not None:
json_schema_extra_.ui_type = ui_type
if ui_component is not None:
json_schema_extra_.ui_component = ui_component
if ui_hidden is not None:
@@ -556,6 +689,28 @@ def InputField(
json_schema_extra_.ui_order = ui_order
if ui_choice_labels is not None:
json_schema_extra_.ui_choice_labels = ui_choice_labels
if ui_model_base is not None:
if isinstance(ui_model_base, list):
json_schema_extra_.ui_model_base = ui_model_base
else:
json_schema_extra_.ui_model_base = [ui_model_base]
if ui_model_type is not None:
if isinstance(ui_model_type, list):
json_schema_extra_.ui_model_type = ui_model_type
else:
json_schema_extra_.ui_model_type = [ui_model_type]
if ui_model_variant is not None:
if isinstance(ui_model_variant, list):
json_schema_extra_.ui_model_variant = ui_model_variant
else:
json_schema_extra_.ui_model_variant = [ui_model_variant]
if ui_model_format is not None:
if isinstance(ui_model_format, list):
json_schema_extra_.ui_model_format = ui_model_format
else:
json_schema_extra_.ui_model_format = [ui_model_format]
if ui_type is not None:
json_schema_extra_.ui_type = ui_type
"""
There is a conflict between the typing of invocation definitions and the typing of an invocation's
@@ -657,20 +812,20 @@ def OutputField(
"""
Creates an output field for an invocation output.
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization)
that adds a few extra parameters to support graph execution and the node editor UI.
:param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \
In some situations, the field's type is not enough to infer the correct UI type. \
For example, model selection fields should render a dropdown UI component to select a model. \
Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
Args:
ui_type: Optionally provides an extra type hint for the UI. In some situations, the field's type is not enough
to infer the correct UI type. For example, Scheduler fields are enums, but we want to render a special scheduler
dropdown in the UI. Use `UIType.Scheduler` to indicate this.
:param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \
ui_hidden: Specifies whether or not this field should be hidden in the UI.
:param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
ui_order: Specifies the order in which this field should be rendered in the UI. If omitted, the field will be
rendered after all fields with an explicit order, in the order they are defined in the Invocation class.
"""
return Field(
default=default,
title=title,
@@ -688,9 +843,9 @@ def OutputField(
min_length=min_length,
max_length=max_length,
json_schema_extra=OutputFieldJSONSchemaExtra(
ui_type=ui_type,
ui_hidden=ui_hidden,
ui_order=ui_order,
ui_type=ui_type,
field_kind=FieldKind.Output,
).model_dump(exclude_none=True),
)

View File

@@ -4,9 +4,10 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ControlLoRAField, ModelIdentifierField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation_output("flux_control_lora_loader_output")
@@ -29,7 +30,10 @@ class FluxControlLoRALoaderInvocation(BaseInvocation):
"""LoRA model and Image to use with FLUX transformer generation."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.control_lora_model, title="Control LoRA", ui_type=UIType.ControlLoRAModel
description=FieldDescriptions.control_lora_model,
title="Control LoRA",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.ControlLoRa,
)
image: ImageField = InputField(description="The image to encode.")
weight: float = InputField(description="The weight of the LoRA.", default=1.0)

View File

@@ -6,11 +6,12 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
class FluxControlNetField(BaseModel):
@@ -57,7 +58,9 @@ class FluxControlNetInvocation(BaseInvocation):
image: ImageField = InputField(description="The control image")
control_model: ModelIdentifierField = InputField(
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
description=FieldDescriptions.controlnet_model,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.ControlNet,
)
control_weight: float | list[float] = InputField(
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"

View File

@@ -5,7 +5,7 @@ from pydantic import field_validator, model_validator
from typing_extensions import Self
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import InputField, UIType
from invokeai.app.invocations.fields import InputField
from invokeai.app.invocations.ip_adapter import (
CLIP_VISION_MODEL_MAP,
IPAdapterField,
@@ -20,6 +20,7 @@ from invokeai.backend.model_manager.config import (
IPAdapterCheckpointConfig,
IPAdapterInvokeAIConfig,
)
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation(
@@ -36,7 +37,10 @@ class FluxIPAdapterInvocation(BaseInvocation):
image: ImageField = InputField(description="The IP-Adapter image prompt(s).")
ip_adapter_model: ModelIdentifierField = InputField(
description="The IP-Adapter model.", title="IP-Adapter Model", ui_type=UIType.IPAdapterModel
description="The IP-Adapter model.",
title="IP-Adapter Model",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.IPAdapter,
)
# Currently, the only known ViT model used by FLUX IP-Adapters is ViT-L.
clip_vision_model: Literal["ViT-L"] = InputField(description="CLIP Vision model to use.", default="ViT-L")

View File

@@ -6,10 +6,10 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import BaseModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
@invocation_output("flux_lora_loader_output")
@@ -36,7 +36,10 @@ class FluxLoRALoaderInvocation(BaseInvocation):
"""Apply a LoRA model to a FLUX transformer and/or text encoder."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
transformer: TransformerField | None = InputField(

View File

@@ -6,7 +6,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.t5_model_identifier import (
@@ -17,7 +17,7 @@ from invokeai.backend.flux.util import max_seq_lengths
from invokeai.backend.model_manager.config import (
CheckpointConfigBase,
)
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
@invocation_output("flux_model_loader_output")
@@ -46,23 +46,30 @@ class FluxModelLoaderInvocation(BaseInvocation):
model: ModelIdentifierField = InputField(
description=FieldDescriptions.flux_model,
ui_type=UIType.FluxMainModel,
input=Input.Direct,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.Main,
)
t5_encoder_model: ModelIdentifierField = InputField(
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
description=FieldDescriptions.t5_encoder,
input=Input.Direct,
title="T5 Encoder",
ui_model_type=ModelType.T5Encoder,
)
clip_embed_model: ModelIdentifierField = InputField(
description=FieldDescriptions.clip_embed_model,
ui_type=UIType.CLIPEmbedModel,
input=Input.Direct,
title="CLIP Embed",
ui_model_type=ModelType.CLIPEmbed,
)
vae_model: ModelIdentifierField = InputField(
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
description=FieldDescriptions.vae_model,
title="VAE",
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.VAE,
)
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:

View File

@@ -18,7 +18,6 @@ from invokeai.app.invocations.fields import (
InputField,
OutputField,
TensorField,
UIType,
)
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
@@ -64,7 +63,8 @@ class FluxReduxInvocation(BaseInvocation):
redux_model: ModelIdentifierField = InputField(
description="The FLUX Redux model to use.",
title="FLUX Redux Model",
ui_type=UIType.FluxReduxModel,
ui_model_base=BaseModelType.Flux,
ui_model_type=ModelType.FluxRedux,
)
downsampling_factor: int = InputField(
ge=1,

View File

@@ -3,7 +3,6 @@ from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -18,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
@invocation(
@@ -39,17 +39,11 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
input=Input.Connection,
)
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
"""Estimate the working memory required by the invocation in bytes."""
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
return int(working_memory)
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
assert isinstance(vae_info.model, AutoEncoder)
estimated_working_memory = estimate_vae_working_memory_flux(
operation="decode", image_tensor=latents, vae=vae_info.model
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoEncoder)
vae_dtype = next(iter(vae.parameters())).dtype

View File

@@ -15,6 +15,7 @@ from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
@invocation(
@@ -35,22 +36,16 @@ class FluxVaeEncodeInvocation(BaseInvocation):
input=Input.Connection,
)
def _estimate_working_memory(self, image_tensor: torch.Tensor, vae: AutoEncoder) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# Encode operations use approximately 50% of the memory required for decode operations
h = image_tensor.shape[-2]
w = image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 1100 # 50% of decode scaling constant (2200)
working_memory = h * w * element_size * scaling_constant
return int(working_memory)
@staticmethod
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor, estimated_working_memory: int) -> torch.Tensor:
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
# TODO(ryand): Expose seed parameter at the invocation level.
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
# should be used for VAE encode sampling.
assert isinstance(vae_info.model, AutoEncoder)
estimated_working_memory = estimate_vae_working_memory_flux(
operation="encode", image_tensor=image_tensor, vae=vae_info.model
)
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoEncoder)
@@ -70,10 +65,7 @@ class FluxVaeEncodeInvocation(BaseInvocation):
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
context.util.signal_progress("Running VAE")
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
latents = self.vae_encode(
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
)
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")
name = context.tensors.save(tensor=latents)

View File

@@ -27,6 +27,7 @@ from invokeai.backend.model_manager import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
@invocation(
@@ -52,47 +53,23 @@ class ImageToLatentsInvocation(BaseInvocation):
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
def _estimate_working_memory(
self, image_tensor: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# Encode operations use approximately 50% of the memory required for decode operations
element_size = 4 if self.fp32 else 2
scaling_constant = 1100 # 50% of decode scaling constant (2200)
if use_tiling:
tile_size = self.tile_size
if tile_size == 0:
tile_size = vae.tile_sample_min_size
assert isinstance(tile_size, int)
h = tile_size
w = tile_size
working_memory = h * w * element_size * scaling_constant
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
# and number of tiles. We could make this more precise in the future, but this should be good enough for
# most use cases.
working_memory = working_memory * 1.25
else:
h = image_tensor.shape[-2]
w = image_tensor.shape[-1]
working_memory = h * w * element_size * scaling_constant
if self.fp32:
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
working_memory += 250 * 2**20
return int(working_memory)
@staticmethod
@classmethod
def vae_encode(
cls,
vae_info: LoadedModel,
upcast: bool,
tiled: bool,
image_tensor: torch.Tensor,
tile_size: int = 0,
estimated_working_memory: int = 0,
) -> torch.Tensor:
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
operation="encode",
image_tensor=image_tensor,
vae=vae_info.model,
tile_size=tile_size if tiled else None,
fp32=upcast,
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
orig_dtype = vae.dtype
@@ -156,17 +133,13 @@ class ImageToLatentsInvocation(BaseInvocation):
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
use_tiling = self.tiled or context.config.get().force_tiled_decode
estimated_working_memory = self._estimate_working_memory(image_tensor, use_tiling, vae_info.model)
context.util.signal_progress("Running VAE encoder")
latents = self.vae_encode(
vae_info=vae_info,
upcast=self.fp32,
tiled=self.tiled,
tiled=self.tiled or context.config.get().force_tiled_decode,
image_tensor=image_tensor,
tile_size=self.tile_size,
estimated_working_memory=estimated_working_memory,
)
latents = latents.to("cpu")

View File

@@ -5,7 +5,7 @@ from pydantic import BaseModel, Field, field_validator, model_validator
from typing_extensions import Self
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
@@ -85,7 +85,8 @@ class IPAdapterInvocation(BaseInvocation):
description="The IP-Adapter model.",
title="IP-Adapter Model",
ui_order=-1,
ui_type=UIType.IPAdapterModel,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.IPAdapter,
)
clip_vision_model: Literal["ViT-H", "ViT-G", "ViT-L"] = InputField(
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",

View File

@@ -27,6 +27,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
@invocation(
@@ -53,39 +54,6 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
def _estimate_working_memory(
self, latents: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision). This estimate is accurate for both SD1 and SDXL.
element_size = 4 if self.fp32 else 2
scaling_constant = 2200 # Determined experimentally.
if use_tiling:
tile_size = self.tile_size
if tile_size == 0:
tile_size = vae.tile_sample_min_size
assert isinstance(tile_size, int)
out_h = tile_size
out_w = tile_size
working_memory = out_h * out_w * element_size * scaling_constant
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
# and number of tiles. We could make this more precise in the future, but this should be good enough for
# most use cases.
working_memory = working_memory * 1.25
else:
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
working_memory = out_h * out_w * element_size * scaling_constant
if self.fp32:
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
working_memory += 250 * 2**20
return int(working_memory)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
@@ -94,8 +62,13 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
estimated_working_memory = self._estimate_working_memory(latents, use_tiling, vae_info.model)
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
operation="decode",
image_tensor=latents,
vae=vae_info.model,
tile_size=self.tile_size if use_tiling else None,
fp32=self.fp32,
)
with (
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),

View File

@@ -6,11 +6,12 @@ from pydantic import field_validator
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration, LlavaOnevisionProcessor
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import StringOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.llava_onevision_pipeline import LlavaOnevisionPipeline
from invokeai.backend.model_manager.taxonomy import ModelType
from invokeai.backend.util.devices import TorchDevice
@@ -34,7 +35,7 @@ class LlavaOnevisionVllmInvocation(BaseInvocation):
vllm_model: ModelIdentifierField = InputField(
title="LLaVA Model Type",
description=FieldDescriptions.vllm_model,
ui_type=UIType.LlavaOnevisionModel,
ui_model_type=ModelType.LlavaOnevision,
)
@field_validator("images", mode="before")

View File

@@ -53,7 +53,7 @@ from invokeai.app.invocations.primitives import (
from invokeai.app.invocations.scheduler import SchedulerOutput
from invokeai.app.invocations.t2i_adapter import T2IAdapterField, T2IAdapterInvocation
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import ModelType, SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
from invokeai.version import __version__
@@ -473,7 +473,6 @@ class MetadataToModelOutput(BaseInvocationOutput):
model: ModelIdentifierField = OutputField(
description=FieldDescriptions.main_model,
title="Model",
ui_type=UIType.MainModel,
)
name: str = OutputField(description="Model Name", title="Name")
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
@@ -488,7 +487,6 @@ class MetadataToSDXLModelOutput(BaseInvocationOutput):
model: ModelIdentifierField = OutputField(
description=FieldDescriptions.main_model,
title="Model",
ui_type=UIType.SDXLMainModel,
)
name: str = OutputField(description="Model Name", title="Name")
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
@@ -519,8 +517,7 @@ class MetadataToModelInvocation(BaseInvocation, WithMetadata):
input=Input.Direct,
)
default_value: ModelIdentifierField = InputField(
description="The default model to use if not found in the metadata",
ui_type=UIType.MainModel,
description="The default model to use if not found in the metadata", ui_model_type=ModelType.Main
)
_validate_custom_label = model_validator(mode="after")(validate_custom_label)
@@ -575,7 +572,8 @@ class MetadataToSDXLModelInvocation(BaseInvocation, WithMetadata):
)
default_value: ModelIdentifierField = InputField(
description="The default SDXL Model to use if not found in the metadata",
ui_type=UIType.SDXLMainModel,
ui_model_type=ModelType.Main,
ui_model_base=BaseModelType.StableDiffusionXL,
)
_validate_custom_label = model_validator(mode="after")(validate_custom_label)

View File

@@ -9,7 +9,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import (
@@ -145,7 +145,7 @@ class ModelIdentifierInvocation(BaseInvocation):
@invocation(
"main_model_loader",
title="Main Model - SD1.5",
title="Main Model - SD1.5, SD2",
tags=["model"],
category="model",
version="1.0.4",
@@ -153,7 +153,11 @@ class ModelIdentifierInvocation(BaseInvocation):
class MainModelLoaderInvocation(BaseInvocation):
"""Loads a main model, outputting its submodels."""
model: ModelIdentifierField = InputField(description=FieldDescriptions.main_model, ui_type=UIType.MainModel)
model: ModelIdentifierField = InputField(
description=FieldDescriptions.main_model,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2],
ui_model_type=ModelType.Main,
)
# TODO: precision?
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
@@ -187,7 +191,10 @@ class LoRALoaderInvocation(BaseInvocation):
"""Apply selected lora to unet and text_encoder."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.StableDiffusion1,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
unet: Optional[UNetField] = InputField(
@@ -250,7 +257,9 @@ class LoRASelectorInvocation(BaseInvocation):
"""Selects a LoRA model and weight."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
@@ -332,7 +341,10 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
"""Apply selected lora to unet and text_encoder."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
description=FieldDescriptions.lora_model,
title="LoRA",
ui_model_base=BaseModelType.StableDiffusionXL,
ui_model_type=ModelType.LoRA,
)
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
unet: Optional[UNetField] = InputField(
@@ -473,13 +485,26 @@ class SDXLLoRACollectionLoader(BaseInvocation):
@invocation(
"vae_loader", title="VAE Model - SD1.5, SDXL, SD3, FLUX", tags=["vae", "model"], category="model", version="1.0.4"
"vae_loader",
title="VAE Model - SD1.5, SD2, SDXL, SD3, FLUX",
tags=["vae", "model"],
category="model",
version="1.0.4",
)
class VAELoaderInvocation(BaseInvocation):
"""Loads a VAE model, outputting a VaeLoaderOutput"""
vae_model: ModelIdentifierField = InputField(
description=FieldDescriptions.vae_model, title="VAE", ui_type=UIType.VAEModel
description=FieldDescriptions.vae_model,
title="VAE",
ui_model_base=[
BaseModelType.StableDiffusion1,
BaseModelType.StableDiffusion2,
BaseModelType.StableDiffusionXL,
BaseModelType.StableDiffusion3,
BaseModelType.Flux,
],
ui_model_type=ModelType.VAE,
)
def invoke(self, context: InvocationContext) -> VAEOutput:

View File

@@ -27,6 +27,7 @@ from invokeai.app.invocations.fields import (
SD3ConditioningField,
TensorField,
UIComponent,
VideoField,
)
from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -287,6 +288,30 @@ class ImageCollectionInvocation(BaseInvocation):
return ImageCollectionOutput(collection=self.collection)
# endregion
# region Video
@invocation_output("video_output")
class VideoOutput(BaseInvocationOutput):
"""Base class for nodes that output a video"""
video: VideoField = OutputField(description="The output video")
width: int = OutputField(description="The width of the video in pixels")
height: int = OutputField(description="The height of the video in pixels")
duration_seconds: float = OutputField(description="The duration of the video in seconds")
@classmethod
def build(cls, video_id: str, width: int, height: int, duration_seconds: float) -> "VideoOutput":
return cls(
video=VideoField(video_id=video_id),
width=width,
height=height,
duration_seconds=duration_seconds,
)
# endregion
# region DenoiseMask

View File

@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
@invocation(
@@ -32,18 +33,12 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
image: ImageField = InputField(description="The image to encode")
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
def _estimate_working_memory(self, image_tensor: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# Encode operations use approximately 50% of the memory required for decode operations
h = image_tensor.shape[-2]
w = image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 1100 # 50% of decode scaling constant (2200)
working_memory = h * w * element_size * scaling_constant
return int(working_memory)
@staticmethod
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor, estimated_working_memory: int) -> torch.Tensor:
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
assert isinstance(vae_info.model, AutoencoderKL)
estimated_working_memory = estimate_vae_working_memory_sd3(
operation="encode", image_tensor=image_tensor, vae=vae_info.model
)
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
assert isinstance(vae, AutoencoderKL)
@@ -70,10 +65,7 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, AutoencoderKL)
estimated_working_memory = self._estimate_working_memory(image_tensor, vae_info.model)
latents = self.vae_encode(
vae_info=vae_info, image_tensor=image_tensor, estimated_working_memory=estimated_working_memory
)
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
latents = latents.to("cpu")
name = context.tensors.save(tensor=latents)

View File

@@ -6,7 +6,6 @@ from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
@invocation(
@@ -41,22 +41,15 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
input=Input.Connection,
)
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
"""Estimate the working memory required by the invocation in bytes."""
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
element_size = next(vae.parameters()).element_size()
scaling_constant = 2200 # Determined experimentally.
working_memory = out_h * out_w * element_size * scaling_constant
return int(working_memory)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (AutoencoderKL))
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
estimated_working_memory = estimate_vae_working_memory_sd3(
operation="decode", image_tensor=latents, vae=vae_info.model
)
with (
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),

View File

@@ -6,14 +6,14 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.t5_model_identifier import (
preprocess_t5_encoder_model_identifier,
preprocess_t5_tokenizer_model_identifier,
)
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ClipVariantType, ModelType, SubModelType
@invocation_output("sd3_model_loader_output")
@@ -39,36 +39,43 @@ class Sd3ModelLoaderInvocation(BaseInvocation):
model: ModelIdentifierField = InputField(
description=FieldDescriptions.sd3_model,
ui_type=UIType.SD3MainModel,
input=Input.Direct,
ui_model_base=BaseModelType.StableDiffusion3,
ui_model_type=ModelType.Main,
)
t5_encoder_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.t5_encoder,
ui_type=UIType.T5EncoderModel,
input=Input.Direct,
title="T5 Encoder",
default=None,
ui_model_type=ModelType.T5Encoder,
)
clip_l_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.clip_embed_model,
ui_type=UIType.CLIPLEmbedModel,
input=Input.Direct,
title="CLIP L Encoder",
default=None,
ui_model_type=ModelType.CLIPEmbed,
ui_model_variant=ClipVariantType.L,
)
clip_g_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.clip_g_model,
ui_type=UIType.CLIPGEmbedModel,
input=Input.Direct,
title="CLIP G Encoder",
default=None,
ui_model_type=ModelType.CLIPEmbed,
ui_model_variant=ClipVariantType.G,
)
vae_model: Optional[ModelIdentifierField] = InputField(
description=FieldDescriptions.vae_model, ui_type=UIType.VAEModel, title="VAE", default=None
description=FieldDescriptions.vae_model,
title="VAE",
default=None,
ui_model_base=BaseModelType.StableDiffusion3,
ui_model_type=ModelType.VAE,
)
def invoke(self, context: InvocationContext) -> Sd3ModelLoaderOutput:

View File

@@ -1,8 +1,8 @@
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import SubModelType
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType, SubModelType
@invocation_output("sdxl_model_loader_output")
@@ -29,7 +29,9 @@ class SDXLModelLoaderInvocation(BaseInvocation):
"""Loads an sdxl base model, outputting its submodels."""
model: ModelIdentifierField = InputField(
description=FieldDescriptions.sdxl_main_model, ui_type=UIType.SDXLMainModel
description=FieldDescriptions.sdxl_main_model,
ui_model_base=BaseModelType.StableDiffusionXL,
ui_model_type=ModelType.Main,
)
# TODO: precision?
@@ -67,7 +69,9 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation):
"""Loads an sdxl refiner model, outputting its submodels."""
model: ModelIdentifierField = InputField(
description=FieldDescriptions.sdxl_refiner_model, ui_type=UIType.SDXLRefinerModel
description=FieldDescriptions.sdxl_refiner_model,
ui_model_base=BaseModelType.StableDiffusionXLRefiner,
ui_model_type=ModelType.Main,
)
# TODO: precision?

View File

@@ -1,72 +1,75 @@
from enum import Enum
from itertools import zip_longest
from pathlib import Path
from typing import Literal
import numpy as np
import torch
from PIL import Image
from pydantic import BaseModel, Field
from transformers import AutoProcessor
from pydantic import BaseModel, Field, model_validator
from transformers.models.sam import SamModel
from transformers.models.sam.processing_sam import SamProcessor
from transformers.models.sam2 import Sam2Model
from transformers.models.sam2.processing_sam2 import Sam2Processor
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.fields import BoundingBoxField, ImageField, InputField, TensorField
from invokeai.app.invocations.primitives import MaskOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.segment_anything.mask_refinement import mask_to_polygon, polygon_to_mask
from invokeai.backend.image_util.segment_anything.segment_anything_2_pipeline import SegmentAnything2Pipeline
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
from invokeai.backend.image_util.segment_anything.shared import SAMInput, SAMPoint
SegmentAnythingModelKey = Literal["segment-anything-base", "segment-anything-large", "segment-anything-huge"]
SegmentAnythingModelKey = Literal[
"segment-anything-base",
"segment-anything-large",
"segment-anything-huge",
"segment-anything-2-tiny",
"segment-anything-2-small",
"segment-anything-2-base",
"segment-anything-2-large",
]
SEGMENT_ANYTHING_MODEL_IDS: dict[SegmentAnythingModelKey, str] = {
"segment-anything-base": "facebook/sam-vit-base",
"segment-anything-large": "facebook/sam-vit-large",
"segment-anything-huge": "facebook/sam-vit-huge",
"segment-anything-2-tiny": "facebook/sam2.1-hiera-tiny",
"segment-anything-2-small": "facebook/sam2.1-hiera-small",
"segment-anything-2-base": "facebook/sam2.1-hiera-base-plus",
"segment-anything-2-large": "facebook/sam2.1-hiera-large",
}
class SAMPointLabel(Enum):
negative = -1
neutral = 0
positive = 1
class SAMPoint(BaseModel):
x: int = Field(..., description="The x-coordinate of the point")
y: int = Field(..., description="The y-coordinate of the point")
label: SAMPointLabel = Field(..., description="The label of the point")
class SAMPointsField(BaseModel):
points: list[SAMPoint] = Field(..., description="The points of the object")
points: list[SAMPoint] = Field(..., description="The points of the object", min_length=1)
def to_list(self) -> list[list[int]]:
def to_list(self) -> list[list[float]]:
return [[point.x, point.y, point.label.value] for point in self.points]
@invocation(
"segment_anything",
title="Segment Anything",
tags=["prompt", "segmentation"],
tags=["prompt", "segmentation", "sam", "sam2"],
category="segmentation",
version="1.2.0",
version="1.3.0",
)
class SegmentAnythingInvocation(BaseInvocation):
"""Runs a Segment Anything Model."""
"""Runs a Segment Anything Model (SAM or SAM2)."""
# Reference:
# - https://arxiv.org/pdf/2304.02643
# - https://huggingface.co/docs/transformers/v4.43.3/en/model_doc/grounding-dino#grounded-sam
# - https://github.com/NielsRogge/Transformers-Tutorials/blob/a39f33ac1557b02ebfb191ea7753e332b5ca933f/Grounding%20DINO/GroundingDINO_with_Segment_Anything.ipynb
model: SegmentAnythingModelKey = InputField(description="The Segment Anything model to use.")
model: SegmentAnythingModelKey = InputField(description="The Segment Anything model to use (SAM or SAM2).")
image: ImageField = InputField(description="The image to segment.")
bounding_boxes: list[BoundingBoxField] | None = InputField(
default=None, description="The bounding boxes to prompt the SAM model with."
default=None, description="The bounding boxes to prompt the model with."
)
point_lists: list[SAMPointsField] | None = InputField(
default=None,
description="The list of point lists to prompt the SAM model with. Each list of points represents a single object.",
description="The list of point lists to prompt the model with. Each list of points represents a single object.",
)
apply_polygon_refinement: bool = InputField(
description="Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging).",
@@ -77,14 +80,18 @@ class SegmentAnythingInvocation(BaseInvocation):
default="all",
)
@model_validator(mode="after")
def validate_points_and_boxes_len(self):
if self.point_lists is not None and self.bounding_boxes is not None:
if len(self.point_lists) != len(self.bounding_boxes):
raise ValueError("If both point_lists and bounding_boxes are provided, they must have the same length.")
return self
@torch.no_grad()
def invoke(self, context: InvocationContext) -> MaskOutput:
# The models expect a 3-channel RGB image.
image_pil = context.images.get_pil(self.image.image_name, mode="RGB")
if self.point_lists is not None and self.bounding_boxes is not None:
raise ValueError("Only one of point_lists or bounding_box can be provided.")
if (not self.bounding_boxes or len(self.bounding_boxes) == 0) and (
not self.point_lists or len(self.point_lists) == 0
):
@@ -111,26 +118,38 @@ class SegmentAnythingInvocation(BaseInvocation):
# model, and figure out how to make it work in the pipeline.
# torch_dtype=TorchDevice.choose_torch_dtype(),
)
sam_processor = AutoProcessor.from_pretrained(model_path, local_files_only=True)
assert isinstance(sam_processor, SamProcessor)
sam_processor = SamProcessor.from_pretrained(model_path, local_files_only=True)
return SegmentAnythingPipeline(sam_model=sam_model, sam_processor=sam_processor)
def _segment(self, context: InvocationContext, image: Image.Image) -> list[torch.Tensor]:
"""Use Segment Anything (SAM) to generate masks given an image + a set of bounding boxes."""
# Convert the bounding boxes to the SAM input format.
sam_bounding_boxes = (
[[bb.x_min, bb.y_min, bb.x_max, bb.y_max] for bb in self.bounding_boxes] if self.bounding_boxes else None
)
sam_points = [p.to_list() for p in self.point_lists] if self.point_lists else None
@staticmethod
def _load_sam_2_model(model_path: Path):
sam2_model = Sam2Model.from_pretrained(model_path, local_files_only=True)
sam2_processor = Sam2Processor.from_pretrained(model_path, local_files_only=True)
return SegmentAnything2Pipeline(sam2_model=sam2_model, sam2_processor=sam2_processor)
with (
context.models.load_remote_model(
source=SEGMENT_ANYTHING_MODEL_IDS[self.model], loader=SegmentAnythingInvocation._load_sam_model
) as sam_pipeline,
):
assert isinstance(sam_pipeline, SegmentAnythingPipeline)
masks = sam_pipeline.segment(image=image, bounding_boxes=sam_bounding_boxes, point_lists=sam_points)
def _segment(self, context: InvocationContext, image: Image.Image) -> list[torch.Tensor]:
"""Use Segment Anything (SAM or SAM2) to generate masks given an image + a set of bounding boxes."""
source = SEGMENT_ANYTHING_MODEL_IDS[self.model]
inputs: list[SAMInput] = []
for bbox_field, point_field in zip_longest(self.bounding_boxes or [], self.point_lists or [], fillvalue=None):
inputs.append(
SAMInput(
bounding_box=bbox_field,
points=point_field.points if point_field else None,
)
)
if "sam2" in source:
loader = SegmentAnythingInvocation._load_sam_2_model
with context.models.load_remote_model(source=source, loader=loader) as pipeline:
assert isinstance(pipeline, SegmentAnything2Pipeline)
masks = pipeline.segment(image=image, inputs=inputs)
else:
loader = SegmentAnythingInvocation._load_sam_model
with context.models.load_remote_model(source=source, loader=loader) as pipeline:
assert isinstance(pipeline, SegmentAnythingPipeline)
masks = pipeline.segment(image=image, inputs=inputs)
masks = self._process_masks(masks)
if self.apply_polygon_refinement:

View File

@@ -11,7 +11,6 @@ from invokeai.app.invocations.fields import (
FieldDescriptions,
ImageField,
InputField,
UIType,
WithBoard,
WithMetadata,
)
@@ -19,6 +18,7 @@ from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.session_processor.session_processor_common import CanceledException
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.taxonomy import ModelType
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.tiles.tiles import calc_tiles_min_overlap
from invokeai.backend.tiles.utils import TBLR, Tile
@@ -33,7 +33,7 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
image_to_image_model: ModelIdentifierField = InputField(
title="Image-to-Image Model",
description=FieldDescriptions.spandrel_image_to_image_model,
ui_type=UIType.SpandrelImageToImageModel,
ui_model_type=ModelType.SpandrelImageToImage,
)
tile_size: int = InputField(
default=512, description="The tile size for tiled image-to-image. Set to 0 to disable tiling."

View File

@@ -8,11 +8,12 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
from invokeai.backend.model_manager.taxonomy import BaseModelType, ModelType
class T2IAdapterField(BaseModel):
@@ -60,7 +61,8 @@ class T2IAdapterInvocation(BaseInvocation):
description="The T2I-Adapter model.",
title="T2I-Adapter Model",
ui_order=-1,
ui_type=UIType.T2IAdapterModel,
ui_model_base=[BaseModelType.StableDiffusion1, BaseModelType.StableDiffusionXL],
ui_model_type=ModelType.T2IAdapter,
)
weight: Union[float, list[float]] = InputField(
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"

View File

@@ -49,3 +49,11 @@ class BoardImageRecordStorageBase(ABC):
) -> int:
"""Gets the number of images for a board."""
pass
@abstractmethod
def get_asset_count_for_board(
self,
board_id: str,
) -> int:
"""Gets the number of assets for a board."""
pass

View File

@@ -3,6 +3,8 @@ from typing import Optional, cast
from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase
from invokeai.app.services.image_records.image_records_common import (
ASSETS_CATEGORIES,
IMAGE_CATEGORIES,
ImageCategory,
ImageRecord,
deserialize_image_record,
@@ -151,15 +153,38 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
def get_image_count_for_board(self, board_id: str) -> int:
with self._db.transaction() as cursor:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(IMAGE_CATEGORIES)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
cursor.execute(
"""--sql
f"""--sql
SELECT COUNT(*)
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE images.is_intermediate = FALSE
WHERE images.is_intermediate = FALSE AND images.image_category IN ( {placeholders} )
AND board_images.board_id = ?;
""",
(board_id,),
(*category_strings, board_id),
)
count = cast(int, cursor.fetchone()[0])
return count
def get_asset_count_for_board(self, board_id: str) -> int:
with self._db.transaction() as cursor:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(ASSETS_CATEGORIES)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
cursor.execute(
f"""--sql
SELECT COUNT(*)
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE images.is_intermediate = FALSE AND images.image_category IN ( {placeholders} )
AND board_images.board_id = ?;
""",
(*category_strings, board_id),
)
count = cast(int, cursor.fetchone()[0])
return count

View File

@@ -12,12 +12,20 @@ class BoardDTO(BoardRecord):
"""The URL of the thumbnail of the most recent image in the board."""
image_count: int = Field(description="The number of images in the board.")
"""The number of images in the board."""
asset_count: int = Field(description="The number of assets in the board.")
"""The number of assets in the board."""
video_count: int = Field(description="The number of videos in the board.")
"""The number of videos in the board."""
def board_record_to_dto(board_record: BoardRecord, cover_image_name: Optional[str], image_count: int) -> BoardDTO:
def board_record_to_dto(
board_record: BoardRecord, cover_image_name: Optional[str], image_count: int, asset_count: int, video_count: int
) -> BoardDTO:
"""Converts a board record to a board DTO."""
return BoardDTO(
**board_record.model_dump(exclude={"cover_image_name"}),
cover_image_name=cover_image_name,
image_count=image_count,
asset_count=asset_count,
video_count=video_count,
)

View File

@@ -17,7 +17,7 @@ class BoardService(BoardServiceABC):
board_name: str,
) -> BoardDTO:
board_record = self.__invoker.services.board_records.save(board_name)
return board_record_to_dto(board_record, None, 0)
return board_record_to_dto(board_record, None, 0, 0, 0)
def get_dto(self, board_id: str) -> BoardDTO:
board_record = self.__invoker.services.board_records.get(board_id)
@@ -27,7 +27,9 @@ class BoardService(BoardServiceABC):
else:
cover_image_name = None
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id)
return board_record_to_dto(board_record, cover_image_name, image_count)
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id)
video_count = 0 # noop for OSS
return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count)
def update(
self,
@@ -42,7 +44,9 @@ class BoardService(BoardServiceABC):
cover_image_name = None
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(board_id)
return board_record_to_dto(board_record, cover_image_name, image_count)
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(board_id)
video_count = 0 # noop for OSS
return board_record_to_dto(board_record, cover_image_name, image_count, asset_count, video_count)
def delete(self, board_id: str) -> None:
self.__invoker.services.board_records.delete(board_id)
@@ -67,7 +71,9 @@ class BoardService(BoardServiceABC):
cover_image_name = None
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id)
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count))
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id)
video_count = 0 # noop for OSS
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count))
return OffsetPaginatedResults[BoardDTO](items=board_dtos, offset=offset, limit=limit, total=len(board_dtos))
@@ -84,6 +90,8 @@ class BoardService(BoardServiceABC):
cover_image_name = None
image_count = self.__invoker.services.board_image_records.get_image_count_for_board(r.board_id)
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count))
asset_count = self.__invoker.services.board_image_records.get_asset_count_for_board(r.board_id)
video_count = 0 # noop for OSS
board_dtos.append(board_record_to_dto(r, cover_image_name, image_count, asset_count, video_count))
return board_dtos

View File

@@ -150,4 +150,15 @@ class BulkDownloadService(BulkDownloadBase):
def _is_valid_path(self, path: Union[str, Path]) -> bool:
"""Validates the path given for a bulk download."""
path = path if isinstance(path, Path) else Path(path)
return path.exists()
# Resolve the path to handle any path traversal attempts (e.g., ../)
resolved_path = path.resolve()
# The path may not traverse out of the bulk downloads folder or its subfolders
does_not_traverse = resolved_path.parent == self._bulk_downloads_folder.resolve()
# The path must exist and be a .zip file
does_exist = resolved_path.exists()
is_zip_file = resolved_path.suffix == ".zip"
return does_exist and is_zip_file and does_not_traverse

View File

@@ -234,8 +234,8 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
error_type: Optional[str] = Field(default=None, description="The error type, if any")
error_message: Optional[str] = Field(default=None, description="The error message, if any")
error_traceback: Optional[str] = Field(default=None, description="The error traceback, if any")
created_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was created")
updated_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was last updated")
created_at: str = Field(description="The timestamp when the queue item was created")
updated_at: str = Field(description="The timestamp when the queue item was last updated")
started_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was started")
completed_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was completed")
batch_status: BatchStatus = Field(description="The status of the batch")
@@ -258,8 +258,8 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
error_type=queue_item.error_type,
error_message=queue_item.error_message,
error_traceback=queue_item.error_traceback,
created_at=str(queue_item.created_at) if queue_item.created_at else None,
updated_at=str(queue_item.updated_at) if queue_item.updated_at else None,
created_at=str(queue_item.created_at),
updated_at=str(queue_item.updated_at),
started_at=str(queue_item.started_at) if queue_item.started_at else None,
completed_at=str(queue_item.completed_at) if queue_item.completed_at else None,
batch_status=batch_status,

View File

@@ -58,6 +58,15 @@ class ImageCategory(str, Enum, metaclass=MetaEnum):
"""OTHER: The image is some other type of image with a specialized purpose. To be used by external nodes."""
IMAGE_CATEGORIES: list[ImageCategory] = [ImageCategory.GENERAL]
ASSETS_CATEGORIES: list[ImageCategory] = [
ImageCategory.CONTROL,
ImageCategory.MASK,
ImageCategory.USER,
ImageCategory.OTHER,
]
class InvalidImageCategoryException(ValueError):
"""Raised when a provided value is not a valid ImageCategory.

View File

@@ -15,6 +15,7 @@ from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
from invokeai.backend.model_manager.config import (
AnyModelConfig,
ControlAdapterDefaultSettings,
LoraModelDefaultSettings,
MainModelDefaultSettings,
)
from invokeai.backend.model_manager.taxonomy import (
@@ -83,8 +84,8 @@ class ModelRecordChanges(BaseModelExcludeNull):
file_size: Optional[int] = Field(description="Size of model file", default=None)
format: Optional[str] = Field(description="format of model file", default=None)
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
default_settings: Optional[MainModelDefaultSettings | ControlAdapterDefaultSettings] = Field(
description="Default settings for this model", default=None
default_settings: Optional[MainModelDefaultSettings | LoraModelDefaultSettings | ControlAdapterDefaultSettings] = (
Field(description="Default settings for this model", default=None)
)
# Checkpoint-specific changes

View File

@@ -15,6 +15,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
ItemIdsResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
@@ -23,6 +24,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
)
from invokeai.app.services.shared.graph import GraphExecutionState
from invokeai.app.services.shared.pagination import CursorPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
class SessionQueueBase(ABC):
@@ -145,7 +147,7 @@ class SessionQueueBase(ABC):
status: Optional[QUEUE_ITEM_STATUS] = None,
destination: Optional[str] = None,
) -> CursorPaginatedResults[SessionQueueItem]:
"""Gets a page of session queue items"""
"""Gets a page of session queue items. Do not remove."""
pass
@abstractmethod
@@ -157,9 +159,18 @@ class SessionQueueBase(ABC):
"""Gets all queue items that match the given parameters"""
pass
@abstractmethod
def get_queue_item_ids(
self,
queue_id: str,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
) -> ItemIdsResult:
"""Gets all queue item ids that match the given parameters"""
pass
@abstractmethod
def get_queue_item(self, item_id: int) -> SessionQueueItem:
"""Gets a session queue item by ID"""
"""Gets a session queue item by ID for a given queue"""
pass
@abstractmethod

View File

@@ -176,6 +176,14 @@ DEFAULT_QUEUE_ID = "default"
QUEUE_ITEM_STATUS = Literal["pending", "in_progress", "completed", "failed", "canceled"]
class ItemIdsResult(BaseModel):
"""Response containing ordered item ids with metadata for optimistic updates."""
item_ids: list[int] = Field(description="Ordered list of item ids")
total_count: int = Field(description="Total number of queue items matching the query")
NodeFieldValueValidator = TypeAdapter(list[NodeFieldValue])

View File

@@ -22,6 +22,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
EnqueueBatchResult,
IsEmptyResult,
IsFullResult,
ItemIdsResult,
PruneResult,
RetryItemsResult,
SessionQueueCountsByDestination,
@@ -34,6 +35,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
)
from invokeai.app.services.shared.graph import GraphExecutionState
from invokeai.app.services.shared.pagination import CursorPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
@@ -671,6 +673,26 @@ class SqliteSessionQueue(SessionQueueBase):
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
return items
def get_queue_item_ids(
self,
queue_id: str,
order_dir: SQLiteDirection = SQLiteDirection.Descending,
) -> ItemIdsResult:
with self._db.transaction() as cursor_:
query = f"""--sql
SELECT item_id
FROM session_queue
WHERE queue_id = ?
ORDER BY created_at {order_dir.value}
"""
query_params = [queue_id]
cursor_.execute(query, query_params)
result = cast(list[sqlite3.Row], cursor_.fetchall())
item_ids = [row[0] for row in result]
return ItemIdsResult(item_ids=item_ids, total_count=len(item_ids))
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
with self._db.transaction() as cursor:
cursor.execute(

View File

@@ -0,0 +1,179 @@
import datetime
from typing import Optional, Union
from pydantic import BaseModel, Field, StrictBool, StrictStr
from invokeai.app.util.misc import get_iso_timestamp
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
VIDEO_DTO_COLS = ", ".join(
[
"videos." + c
for c in [
"video_id",
"width",
"height",
"session_id",
"node_id",
"is_intermediate",
"created_at",
"updated_at",
"deleted_at",
"starred",
]
]
)
class VideoRecord(BaseModelExcludeNull):
"""Deserialized video record without metadata."""
video_id: str = Field(description="The unique id of the video.")
"""The unique id of the video."""
width: int = Field(description="The width of the video in px.")
"""The actual width of the video in px. This may be different from the width in metadata."""
height: int = Field(description="The height of the video in px.")
"""The actual height of the video in px. This may be different from the height in metadata."""
created_at: Union[datetime.datetime, str] = Field(description="The created timestamp of the video.")
"""The created timestamp of the video."""
updated_at: Union[datetime.datetime, str] = Field(description="The updated timestamp of the video.")
"""The updated timestamp of the video."""
deleted_at: Optional[Union[datetime.datetime, str]] = Field(
default=None, description="The deleted timestamp of the video."
)
"""The deleted timestamp of the video."""
is_intermediate: bool = Field(description="Whether this is an intermediate video.")
"""Whether this is an intermediate video."""
session_id: Optional[str] = Field(
default=None,
description="The session ID that generated this video, if it is a generated video.",
)
"""The session ID that generated this video, if it is a generated video."""
node_id: Optional[str] = Field(
default=None,
description="The node ID that generated this video, if it is a generated video.",
)
"""The node ID that generated this video, if it is a generated video."""
starred: bool = Field(description="Whether this video is starred.")
"""Whether this video is starred."""
class VideoRecordChanges(BaseModelExcludeNull):
"""A set of changes to apply to a video record.
Only limited changes are valid:
- `session_id`: change the session associated with a video
- `is_intermediate`: change the video's `is_intermediate` flag
- `starred`: change whether the video is starred
"""
session_id: Optional[StrictStr] = Field(
default=None,
description="The video's new session ID.",
)
"""The video's new session ID."""
is_intermediate: Optional[StrictBool] = Field(default=None, description="The video's new `is_intermediate` flag.")
"""The video's new `is_intermediate` flag."""
starred: Optional[StrictBool] = Field(default=None, description="The video's new `starred` state")
"""The video's new `starred` state."""
def deserialize_video_record(video_dict: dict) -> VideoRecord:
"""Deserializes a video record."""
# Retrieve all the values, setting "reasonable" defaults if they are not present.
video_id = video_dict.get("video_id", "unknown")
width = video_dict.get("width", 0)
height = video_dict.get("height", 0)
session_id = video_dict.get("session_id", None)
node_id = video_dict.get("node_id", None)
created_at = video_dict.get("created_at", get_iso_timestamp())
updated_at = video_dict.get("updated_at", get_iso_timestamp())
deleted_at = video_dict.get("deleted_at", get_iso_timestamp())
is_intermediate = video_dict.get("is_intermediate", False)
starred = video_dict.get("starred", False)
return VideoRecord(
video_id=video_id,
width=width,
height=height,
session_id=session_id,
node_id=node_id,
created_at=created_at,
updated_at=updated_at,
deleted_at=deleted_at,
is_intermediate=is_intermediate,
starred=starred,
)
class VideoCollectionCounts(BaseModel):
starred_count: int = Field(description="The number of starred videos in the collection.")
unstarred_count: int = Field(description="The number of unstarred videos in the collection.")
class VideoIdsResult(BaseModel):
"""Response containing ordered video ids with metadata for optimistic updates."""
video_ids: list[str] = Field(description="Ordered list of video ids")
starred_count: int = Field(description="Number of starred videos (when starred_first=True)")
total_count: int = Field(description="Total number of videos matching the query")
class VideoUrlsDTO(BaseModelExcludeNull):
"""The URLs for an image and its thumbnail."""
video_id: str = Field(description="The unique id of the video.")
"""The unique id of the video."""
video_url: str = Field(description="The URL of the video.")
"""The URL of the video."""
thumbnail_url: str = Field(description="The URL of the video's thumbnail.")
"""The URL of the video's thumbnail."""
class VideoDTO(VideoRecord, VideoUrlsDTO):
"""Deserialized video record, enriched for the frontend."""
board_id: Optional[str] = Field(
default=None, description="The id of the board the image belongs to, if one exists."
)
"""The id of the board the image belongs to, if one exists."""
def video_record_to_dto(
video_record: VideoRecord,
video_url: str,
thumbnail_url: str,
board_id: Optional[str],
) -> VideoDTO:
"""Converts a video record to a video DTO."""
return VideoDTO(
**video_record.model_dump(),
video_url=video_url,
thumbnail_url=thumbnail_url,
board_id=board_id,
)
class ResultWithAffectedBoards(BaseModel):
affected_boards: list[str] = Field(description="The ids of boards affected by the delete operation")
class DeleteVideosResult(ResultWithAffectedBoards):
deleted_videos: list[str] = Field(description="The ids of the videos that were deleted")
class StarredVideosResult(ResultWithAffectedBoards):
starred_videos: list[str] = Field(description="The ids of the videos that were starred")
class UnstarredVideosResult(ResultWithAffectedBoards):
unstarred_videos: list[str] = Field(description="The ids of the videos that were unstarred")
class AddVideosToBoardResult(ResultWithAffectedBoards):
added_videos: list[str] = Field(description="The video ids that were added to the board")
class RemoveVideosFromBoardResult(ResultWithAffectedBoards):
removed_videos: list[str] = Field(description="The video ids that were removed from their board")

View File

@@ -0,0 +1,304 @@
# This file is vendored from https://github.com/ShieldMnt/invisible-watermark
#
# `invisible-watermark` is MIT licensed as of August 23, 2025, when the code was copied into this repo.
#
# Why we vendored it in:
# `invisible-watermark` has a dependency on `opencv-python`, which conflicts with Invoke's dependency on
# `opencv-contrib-python`. It's easier to copy the code over than complicate the installation process by
# requiring an extra post-install step of removing `opencv-python` and installing `opencv-contrib-python`.
import struct
import uuid
import base64
import cv2
import numpy as np
import pywt
class WatermarkEncoder(object):
def __init__(self, content=b""):
seq = np.array([n for n in content], dtype=np.uint8)
self._watermarks = list(np.unpackbits(seq))
self._wmLen = len(self._watermarks)
self._wmType = "bytes"
def set_by_ipv4(self, addr):
bits = []
ips = addr.split(".")
for ip in ips:
bits += list(np.unpackbits(np.array([ip % 255], dtype=np.uint8)))
self._watermarks = bits
self._wmLen = len(self._watermarks)
self._wmType = "ipv4"
assert self._wmLen == 32
def set_by_uuid(self, uid):
u = uuid.UUID(uid)
self._wmType = "uuid"
seq = np.array([n for n in u.bytes], dtype=np.uint8)
self._watermarks = list(np.unpackbits(seq))
self._wmLen = len(self._watermarks)
def set_by_bytes(self, content):
self._wmType = "bytes"
seq = np.array([n for n in content], dtype=np.uint8)
self._watermarks = list(np.unpackbits(seq))
self._wmLen = len(self._watermarks)
def set_by_b16(self, b16):
content = base64.b16decode(b16)
self.set_by_bytes(content)
self._wmType = "b16"
def set_by_bits(self, bits=[]):
self._watermarks = [int(bit) % 2 for bit in bits]
self._wmLen = len(self._watermarks)
self._wmType = "bits"
def set_watermark(self, wmType="bytes", content=""):
if wmType == "ipv4":
self.set_by_ipv4(content)
elif wmType == "uuid":
self.set_by_uuid(content)
elif wmType == "bits":
self.set_by_bits(content)
elif wmType == "bytes":
self.set_by_bytes(content)
elif wmType == "b16":
self.set_by_b16(content)
else:
raise NameError("%s is not supported" % wmType)
def get_length(self):
return self._wmLen
# @classmethod
# def loadModel(cls):
# RivaWatermark.loadModel()
def encode(self, cv2Image, method="dwtDct", **configs):
(r, c, channels) = cv2Image.shape
if r * c < 256 * 256:
raise RuntimeError("image too small, should be larger than 256x256")
if method == "dwtDct":
embed = EmbedMaxDct(self._watermarks, wmLen=self._wmLen, **configs)
return embed.encode(cv2Image)
# elif method == 'dwtDctSvd':
# embed = EmbedDwtDctSvd(self._watermarks, wmLen=self._wmLen, **configs)
# return embed.encode(cv2Image)
# elif method == 'rivaGan':
# embed = RivaWatermark(self._watermarks, self._wmLen)
# return embed.encode(cv2Image)
else:
raise NameError("%s is not supported" % method)
class WatermarkDecoder(object):
def __init__(self, wm_type="bytes", length=0):
self._wmType = wm_type
if wm_type == "ipv4":
self._wmLen = 32
elif wm_type == "uuid":
self._wmLen = 128
elif wm_type == "bytes":
self._wmLen = length
elif wm_type == "bits":
self._wmLen = length
elif wm_type == "b16":
self._wmLen = length
else:
raise NameError("%s is unsupported" % wm_type)
def reconstruct_ipv4(self, bits):
ips = [str(ip) for ip in list(np.packbits(bits))]
return ".".join(ips)
def reconstruct_uuid(self, bits):
nums = np.packbits(bits)
bstr = b""
for i in range(16):
bstr += struct.pack(">B", nums[i])
return str(uuid.UUID(bytes=bstr))
def reconstruct_bits(self, bits):
# return ''.join([str(b) for b in bits])
return bits
def reconstruct_b16(self, bits):
bstr = self.reconstruct_bytes(bits)
return base64.b16encode(bstr)
def reconstruct_bytes(self, bits):
nums = np.packbits(bits)
bstr = b""
for i in range(self._wmLen // 8):
bstr += struct.pack(">B", nums[i])
return bstr
def reconstruct(self, bits):
if len(bits) != self._wmLen:
raise RuntimeError("bits are not matched with watermark length")
if self._wmType == "ipv4":
return self.reconstruct_ipv4(bits)
elif self._wmType == "uuid":
return self.reconstruct_uuid(bits)
elif self._wmType == "bits":
return self.reconstruct_bits(bits)
elif self._wmType == "b16":
return self.reconstruct_b16(bits)
else:
return self.reconstruct_bytes(bits)
def decode(self, cv2Image, method="dwtDct", **configs):
(r, c, channels) = cv2Image.shape
if r * c < 256 * 256:
raise RuntimeError("image too small, should be larger than 256x256")
bits = []
if method == "dwtDct":
embed = EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
bits = embed.decode(cv2Image)
# elif method == 'dwtDctSvd':
# embed = EmbedDwtDctSvd(watermarks=[], wmLen=self._wmLen, **configs)
# bits = embed.decode(cv2Image)
# elif method == 'rivaGan':
# embed = RivaWatermark(watermarks=[], wmLen=self._wmLen, **configs)
# bits = embed.decode(cv2Image)
else:
raise NameError("%s is not supported" % method)
return self.reconstruct(bits)
# @classmethod
# def loadModel(cls):
# RivaWatermark.loadModel()
class EmbedMaxDct(object):
def __init__(self, watermarks=[], wmLen=8, scales=[0, 36, 36], block=4):
self._watermarks = watermarks
self._wmLen = wmLen
self._scales = scales
self._block = block
def encode(self, bgr):
(row, col, channels) = bgr.shape
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
for channel in range(2):
if self._scales[channel] <= 0:
continue
ca1, (h1, v1, d1) = pywt.dwt2(yuv[: row // 4 * 4, : col // 4 * 4, channel], "haar")
self.encode_frame(ca1, self._scales[channel])
yuv[: row // 4 * 4, : col // 4 * 4, channel] = pywt.idwt2((ca1, (v1, h1, d1)), "haar")
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
return bgr_encoded
def decode(self, bgr):
(row, col, channels) = bgr.shape
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
scores = [[] for i in range(self._wmLen)]
for channel in range(2):
if self._scales[channel] <= 0:
continue
ca1, (h1, v1, d1) = pywt.dwt2(yuv[: row // 4 * 4, : col // 4 * 4, channel], "haar")
scores = self.decode_frame(ca1, self._scales[channel], scores)
avgScores = list(map(lambda l: np.array(l).mean(), scores))
bits = np.array(avgScores) * 255 > 127
return bits
def decode_frame(self, frame, scale, scores):
(row, col) = frame.shape
num = 0
for i in range(row // self._block):
for j in range(col // self._block):
block = frame[
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
]
score = self.infer_dct_matrix(block, scale)
# score = self.infer_dct_svd(block, scale)
wmBit = num % self._wmLen
scores[wmBit].append(score)
num = num + 1
return scores
def diffuse_dct_svd(self, block, wmBit, scale):
u, s, v = np.linalg.svd(cv2.dct(block))
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
def infer_dct_svd(self, block, scale):
u, s, v = np.linalg.svd(cv2.dct(block))
score = 0
score = int((s[0] % scale) > scale * 0.5)
return score
if score >= 0.5:
return 1.0
else:
return 0.0
def diffuse_dct_matrix(self, block, wmBit, scale):
pos = np.argmax(abs(block.flatten()[1:])) + 1
i, j = pos // self._block, pos % self._block
val = block[i][j]
if val >= 0.0:
block[i][j] = (val // scale + 0.25 + 0.5 * wmBit) * scale
else:
val = abs(val)
block[i][j] = -1.0 * (val // scale + 0.25 + 0.5 * wmBit) * scale
return block
def infer_dct_matrix(self, block, scale):
pos = np.argmax(abs(block.flatten()[1:])) + 1
i, j = pos // self._block, pos % self._block
val = block[i][j]
if val < 0:
val = abs(val)
if (val % scale) > 0.5 * scale:
return 1
else:
return 0
def encode_frame(self, frame, scale):
"""
frame is a matrix (M, N)
we get K (watermark bits size) blocks (self._block x self._block)
For i-th block, we encode watermark[i] bit into it
"""
(row, col) = frame.shape
num = 0
for i in range(row // self._block):
for j in range(col // self._block):
block = frame[
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
]
wmBit = self._watermarks[(num % self._wmLen)]
diffusedBlock = self.diffuse_dct_matrix(block, wmBit, scale)
# diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
frame[
i * self._block : i * self._block + self._block, j * self._block : j * self._block + self._block
] = diffusedBlock
num = num + 1

View File

@@ -6,13 +6,10 @@ configuration variable, that allows the watermarking to be supressed.
import cv2
import numpy as np
from imwatermark import WatermarkEncoder
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.app.services.config.config_default import get_config
config = get_config()
from invokeai.backend.image_util.imwatermark.vendor import WatermarkEncoder
class InvisibleWatermark:

View File

@@ -0,0 +1,109 @@
from typing import Optional
import torch
from PIL import Image
# Import SAM2 components - these should be available in transformers 4.56.0+
from transformers.models.sam2 import Sam2Model
from transformers.models.sam2.processing_sam2 import Sam2Processor
from invokeai.backend.image_util.segment_anything.shared import SAMInput
from invokeai.backend.raw_model import RawModel
class SegmentAnything2Pipeline(RawModel):
"""A wrapper class for the transformers SAM2 model and processor that makes it compatible with the model manager."""
def __init__(self, sam2_model: Sam2Model, sam2_processor: Sam2Processor):
"""Initialize the SAM2 pipeline.
Args:
sam2_model: The SAM2 model
sam2_processor: The SAM2 processor (can be Sam2Processor or Sam2VideoProcessor)
"""
self._sam2_model = sam2_model
self._sam2_processor = sam2_processor
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
# HACK: The SAM2 pipeline may not work on MPS devices. We only allow it to be moved to CPU or CUDA.
if device is not None and device.type not in {"cpu", "cuda"}:
device = None
self._sam2_model.to(device=device, dtype=dtype)
def calc_size(self) -> int:
# HACK: Fix the circular import issue.
from invokeai.backend.model_manager.load.model_util import calc_module_size
return calc_module_size(self._sam2_model)
def segment(
self,
image: Image.Image,
inputs: list[SAMInput],
) -> torch.Tensor:
"""Segment the image using the provided inputs.
Args:
image: The image to segment.
inputs: A list of SAMInput objects containing bounding boxes and/or point lists.
Returns:
torch.Tensor: The segmentation masks. dtype: torch.bool. shape: [num_masks, channels, height, width].
"""
input_boxes: list[list[float]] = []
input_points: list[list[list[float]]] = []
input_labels: list[list[int]] = []
for i in inputs:
box: list[float] | None = None
points: list[list[float]] | None = None
labels: list[int] | None = None
if i.bounding_box is not None:
box: list[float] | None = [
i.bounding_box.x_min,
i.bounding_box.y_min,
i.bounding_box.x_max,
i.bounding_box.y_max,
]
if i.points is not None:
points = []
labels = []
for point in i.points:
points.append([point.x, point.y])
labels.append(point.label.value)
if box is not None:
input_boxes.append(box)
if points is not None:
input_points.append(points)
if labels is not None:
input_labels.append(labels)
batched_input_boxes = [input_boxes] if input_boxes else None
batched_input_points = [input_points] if input_points else None
batched_input_labels = [input_labels] if input_labels else None
processed_inputs = self._sam2_processor(
images=image,
input_boxes=batched_input_boxes,
input_points=batched_input_points,
input_labels=batched_input_labels,
return_tensors="pt",
).to(self._sam2_model.device)
# Generate masks using the SAM2 model
outputs = self._sam2_model(**processed_inputs)
# Post-process the masks to get the final segmentation
masks = self._sam2_processor.post_process_masks(
masks=outputs.pred_masks,
original_sizes=processed_inputs.original_sizes,
reshaped_input_sizes=processed_inputs.reshaped_input_sizes,
)
# There should be only one batch.
assert len(masks) == 1
return masks[0]

View File

@@ -1,20 +1,13 @@
from typing import Optional, TypeAlias
from typing import Optional
import torch
from PIL import Image
from transformers.models.sam import SamModel
from transformers.models.sam.processing_sam import SamProcessor
from invokeai.backend.image_util.segment_anything.shared import SAMInput
from invokeai.backend.raw_model import RawModel
# Type aliases for the inputs to the SAM model.
ListOfBoundingBoxes: TypeAlias = list[list[int]]
"""A list of bounding boxes. Each bounding box is in the format [xmin, ymin, xmax, ymax]."""
ListOfPoints: TypeAlias = list[list[int]]
"""A list of points. Each point is in the format [x, y]."""
ListOfPointLabels: TypeAlias = list[int]
"""A list of SAM point labels. Each label is an integer where -1 is background, 0 is neutral, and 1 is foreground."""
class SegmentAnythingPipeline(RawModel):
"""A wrapper class for the transformers SAM model and processor that makes it compatible with the model manager."""
@@ -38,55 +31,65 @@ class SegmentAnythingPipeline(RawModel):
def segment(
self,
image: Image.Image,
bounding_boxes: list[list[int]] | None = None,
point_lists: list[list[list[int]]] | None = None,
inputs: list[SAMInput],
) -> torch.Tensor:
"""Run the SAM model.
Either bounding_boxes or point_lists must be provided. If both are provided, bounding_boxes will be used and
point_lists will be ignored.
"""Segment the image using the provided inputs.
Args:
image (Image.Image): The image to segment.
bounding_boxes (list[list[int]]): The bounding box prompts. Each bounding box is in the format
[xmin, ymin, xmax, ymax].
point_lists (list[list[list[int]]]): The points prompts. Each point is in the format [x, y, label].
`label` is an integer where -1 is background, 0 is neutral, and 1 is foreground.
image: The image to segment.
inputs: A list of SAMInput objects containing bounding boxes and/or point lists.
Returns:
torch.Tensor: The segmentation masks. dtype: torch.bool. shape: [num_masks, channels, height, width].
"""
# Prep the inputs:
# - Create a list of bounding boxes or points and labels.
# - Add a batch dimension of 1 to the inputs.
if bounding_boxes:
input_boxes: list[ListOfBoundingBoxes] | None = [bounding_boxes]
input_points: list[ListOfPoints] | None = None
input_labels: list[ListOfPointLabels] | None = None
elif point_lists:
input_boxes: list[ListOfBoundingBoxes] | None = None
input_points: list[ListOfPoints] | None = []
input_labels: list[ListOfPointLabels] | None = []
for point_list in point_lists:
input_points.append([[p[0], p[1]] for p in point_list])
input_labels.append([p[2] for p in point_list])
input_boxes: list[list[float]] = []
input_points: list[list[list[float]]] = []
input_labels: list[list[int]] = []
else:
raise ValueError("Either bounding_boxes or points and labels must be provided.")
for i in inputs:
box: list[float] | None = None
points: list[list[float]] | None = None
labels: list[int] | None = None
inputs = self._sam_processor(
if i.bounding_box is not None:
box: list[float] | None = [
i.bounding_box.x_min,
i.bounding_box.y_min,
i.bounding_box.x_max,
i.bounding_box.y_max,
]
if i.points is not None:
points = []
labels = []
for point in i.points:
points.append([point.x, point.y])
labels.append(point.label.value)
if box is not None:
input_boxes.append(box)
if points is not None:
input_points.append(points)
if labels is not None:
input_labels.append(labels)
batched_input_boxes = [input_boxes] if input_boxes else None
batched_input_points = input_points if input_points else None
batched_input_labels = input_labels if input_labels else None
processed_inputs = self._sam_processor(
images=image,
input_boxes=input_boxes,
input_points=input_points,
input_labels=input_labels,
input_boxes=batched_input_boxes,
input_points=batched_input_points,
input_labels=batched_input_labels,
return_tensors="pt",
).to(self._sam_model.device)
outputs = self._sam_model(**inputs)
outputs = self._sam_model(**processed_inputs)
masks = self._sam_processor.post_process_masks(
masks=outputs.pred_masks,
original_sizes=inputs.original_sizes,
reshaped_input_sizes=inputs.reshaped_input_sizes,
original_sizes=processed_inputs.original_sizes,
reshaped_input_sizes=processed_inputs.reshaped_input_sizes,
)
# There should be only one batch.

View File

@@ -0,0 +1,49 @@
from enum import Enum
from pydantic import BaseModel, model_validator
from pydantic.fields import Field
class BoundingBox(BaseModel):
x_min: int = Field(..., description="The minimum x-coordinate of the bounding box (inclusive).")
x_max: int = Field(..., description="The maximum x-coordinate of the bounding box (exclusive).")
y_min: int = Field(..., description="The minimum y-coordinate of the bounding box (inclusive).")
y_max: int = Field(..., description="The maximum y-coordinate of the bounding box (exclusive).")
@model_validator(mode="after")
def check_coords(self):
if self.x_min > self.x_max:
raise ValueError(f"x_min ({self.x_min}) is greater than x_max ({self.x_max}).")
if self.y_min > self.y_max:
raise ValueError(f"y_min ({self.y_min}) is greater than y_max ({self.y_max}).")
return self
def tuple(self) -> tuple[int, int, int, int]:
"""
Returns the bounding box as a tuple suitable for use with PIL's `Image.crop()` method.
This method returns a tuple of the form (left, upper, right, lower) == (x_min, y_min, x_max, y_max).
"""
return (self.x_min, self.y_min, self.x_max, self.y_max)
class SAMPointLabel(Enum):
negative = -1
neutral = 0
positive = 1
class SAMPoint(BaseModel):
x: int = Field(..., description="The x-coordinate of the point")
y: int = Field(..., description="The y-coordinate of the point")
label: SAMPointLabel = Field(..., description="The label of the point")
class SAMInput(BaseModel):
bounding_box: BoundingBox | None = Field(None, description="The bounding box to use for segmentation")
points: list[SAMPoint] | None = Field(None, description="The points to use for segmentation")
@model_validator(mode="after")
def check_input(self):
if not self.bounding_box and not self.points:
raise ValueError("Either bounding_box or points must be provided")
return self

View File

@@ -207,15 +207,24 @@ class IPAdapterPlusXL(IPAdapterPlus):
def load_ip_adapter_tensors(ip_adapter_ckpt_path: pathlib.Path, device: str) -> IPAdapterStateDict:
state_dict: IPAdapterStateDict = {"ip_adapter": {}, "image_proj": {}}
state_dict: IPAdapterStateDict = {
"ip_adapter": {},
"image_proj": {},
"adapter_modules": {}, # added for noobai-mark-ipa
"image_proj_model": {}, # added for noobai-mark-ipa
}
if ip_adapter_ckpt_path.suffix == ".safetensors":
model = safetensors.torch.load_file(ip_adapter_ckpt_path, device=device)
for key in model.keys():
if key.startswith("image_proj."):
state_dict["image_proj"][key.replace("image_proj.", "")] = model[key]
elif key.startswith("ip_adapter."):
if key.startswith("ip_adapter."):
state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = model[key]
elif key.startswith("image_proj_model."):
state_dict["image_proj_model"][key.replace("image_proj_model.", "")] = model[key]
elif key.startswith("image_proj."):
state_dict["image_proj"][key.replace("image_proj.", "")] = model[key]
elif key.startswith("adapter_modules."):
state_dict["adapter_modules"][key.replace("adapter_modules.", "")] = model[key]
else:
raise RuntimeError(f"Encountered unexpected IP Adapter state dict key: '{key}'.")
else:

View File

@@ -90,6 +90,11 @@ class MainModelDefaultSettings(BaseModel):
model_config = ConfigDict(extra="forbid")
class LoraModelDefaultSettings(BaseModel):
weight: float | None = Field(default=None, ge=-1, le=2, description="Default weight for this model")
model_config = ConfigDict(extra="forbid")
class ControlAdapterDefaultSettings(BaseModel):
# This could be narrowed to controlnet processor nodes, but they change. Leaving this a string is safer.
preprocessor: str | None
@@ -287,6 +292,9 @@ class LoRAConfigBase(ABC, BaseModel):
type: Literal[ModelType.LoRA] = ModelType.LoRA
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
default_settings: Optional[LoraModelDefaultSettings] = Field(
description="Default settings for this model", default=None
)
@classmethod
def flux_lora_format(cls, mod: ModelOnDisk):
@@ -492,6 +500,15 @@ class MainConfigBase(ABC, BaseModel):
variant: AnyVariant = ModelVariantType.Normal
class VideoConfigBase(ABC, BaseModel):
type: Literal[ModelType.Video] = ModelType.Video
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
default_settings: Optional[MainModelDefaultSettings] = Field(
description="Default settings for this model", default=None
)
variant: AnyVariant = ModelVariantType.Normal
class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase, LegacyProbeMixin, ModelConfigBase):
"""Model config for main checkpoint models."""
@@ -649,6 +666,21 @@ class ApiModelConfig(MainConfigBase, ModelConfigBase):
raise NotImplementedError("API models are not parsed from disk.")
class VideoApiModelConfig(VideoConfigBase, ModelConfigBase):
"""Model config for API-based video models."""
format: Literal[ModelFormat.Api] = ModelFormat.Api
@classmethod
def matches(cls, mod: ModelOnDisk) -> bool:
# API models are not stored on disk, so we can't match them.
return False
@classmethod
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
raise NotImplementedError("API models are not parsed from disk.")
def get_model_discriminator_value(v: Any) -> str:
"""
Computes the discriminator value for a model config.
@@ -718,12 +750,13 @@ AnyModelConfig = Annotated[
Annotated[FluxReduxConfig, FluxReduxConfig.get_tag()],
Annotated[LlavaOnevisionConfig, LlavaOnevisionConfig.get_tag()],
Annotated[ApiModelConfig, ApiModelConfig.get_tag()],
Annotated[VideoApiModelConfig, VideoApiModelConfig.get_tag()],
],
Discriminator(get_model_discriminator_value),
]
AnyModelConfigValidator = TypeAdapter(AnyModelConfig)
AnyDefaultSettings: TypeAlias = Union[MainModelDefaultSettings, ControlAdapterDefaultSettings]
AnyDefaultSettings: TypeAlias = Union[MainModelDefaultSettings, LoraModelDefaultSettings, ControlAdapterDefaultSettings]
class ModelConfigFactory:

View File

@@ -23,6 +23,7 @@ from invokeai.backend.model_manager.config import (
AnyModelConfig,
ControlAdapterDefaultSettings,
InvalidModelConfigException,
LoraModelDefaultSettings,
MainModelDefaultSettings,
ModelConfigFactory,
SubmodelDefinition,
@@ -217,6 +218,8 @@ class ModelProbe(object):
if not fields["default_settings"]:
if fields["type"] in {ModelType.ControlNet, ModelType.T2IAdapter, ModelType.ControlLoRa}:
fields["default_settings"] = get_default_settings_control_adapters(fields["name"])
if fields["type"] in {ModelType.LoRA}:
fields["default_settings"] = get_default_settings_lora()
elif fields["type"] is ModelType.Main:
fields["default_settings"] = get_default_settings_main(fields["base"])
@@ -543,6 +546,10 @@ def get_default_settings_control_adapters(model_name: str) -> Optional[ControlAd
return None
def get_default_settings_lora() -> LoraModelDefaultSettings:
return LoraModelDefaultSettings()
def get_default_settings_main(model_base: BaseModelType) -> Optional[MainModelDefaultSettings]:
if model_base is BaseModelType.StableDiffusion1 or model_base is BaseModelType.StableDiffusion2:
return MainModelDefaultSettings(width=512, height=512)

View File

@@ -28,8 +28,11 @@ class BaseModelType(str, Enum):
CogView4 = "cogview4"
Imagen3 = "imagen3"
Imagen4 = "imagen4"
Gemini2_5 = "gemini-2.5"
ChatGPT4o = "chatgpt-4o"
FluxKontext = "flux-kontext"
Veo3 = "veo3"
Runway = "runway"
class ModelType(str, Enum):
@@ -51,6 +54,7 @@ class ModelType(str, Enum):
SigLIP = "siglip"
FluxRedux = "flux_redux"
LlavaOnevision = "llava_onevision"
Video = "video"
class SubModelType(str, Enum):

View File

@@ -18,16 +18,25 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
# First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format).
all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys())
# Next, check that this is likely a FLUX model by spot-checking a few keys.
expected_keys = [
# Check if keys use transformer prefix
transformer_prefix_keys = [
"transformer.single_transformer_blocks.0.attn.to_q.lora_A.weight",
"transformer.single_transformer_blocks.0.attn.to_q.lora_B.weight",
"transformer.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
"transformer.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
]
all_expected_keys_present = all(k in state_dict for k in expected_keys)
transformer_keys_present = all(k in state_dict for k in transformer_prefix_keys)
return all_keys_in_peft_format and all_expected_keys_present
# Check if keys use base_model.model prefix
base_model_prefix_keys = [
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_A.weight",
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_B.weight",
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
]
base_model_keys_present = all(k in state_dict for k in base_model_prefix_keys)
return all_keys_in_peft_format and (transformer_keys_present or base_model_keys_present)
def lora_model_from_flux_diffusers_state_dict(
@@ -49,8 +58,16 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
"""
# Remove the "transformer." prefix from all keys.
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
# Determine which prefix is used and remove it from all keys.
# Check if any key starts with "base_model.model." prefix
has_base_model_prefix = any(k.startswith("base_model.model.") for k in grouped_state_dict.keys())
if has_base_model_prefix:
# Remove the "base_model.model." prefix from all keys.
grouped_state_dict = {k.replace("base_model.model.", ""): v for k, v in grouped_state_dict.items()}
else:
# Remove the "transformer." prefix from all keys.
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
# Constants for FLUX.1
num_double_layers = 19

View File

@@ -0,0 +1,117 @@
from typing import Literal
import torch
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
def estimate_vae_working_memory_sd15_sdxl(
operation: Literal["encode", "decode"],
image_tensor: torch.Tensor,
vae: AutoencoderKL | AutoencoderTiny,
tile_size: int | None,
fp32: bool,
) -> int:
"""Estimate the working memory required to encode or decode the given tensor."""
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
# element size (precision). This estimate is accurate for both SD1 and SDXL.
element_size = 4 if fp32 else 2
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
if tile_size is not None:
if tile_size == 0:
tile_size = vae.tile_sample_min_size
assert isinstance(tile_size, int)
h = tile_size
w = tile_size
working_memory = h * w * element_size * scaling_constant
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
# and number of tiles. We could make this more precise in the future, but this should be good enough for
# most use cases.
working_memory = working_memory * 1.25
else:
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
working_memory = h * w * element_size * scaling_constant
if fp32:
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
working_memory += 250 * 2**20
print(f"estimate_vae_working_memory_sd15_sdxl: {int(working_memory)}")
return int(working_memory)
def estimate_vae_working_memory_cogview4(
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
working_memory = h * w * element_size * scaling_constant
print(f"estimate_vae_working_memory_cogview4: {int(working_memory)}")
return int(working_memory)
def estimate_vae_working_memory_flux(
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoEncoder
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
out_h = latent_scale_factor_for_operation * image_tensor.shape[-2]
out_w = latent_scale_factor_for_operation * image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
working_memory = out_h * out_w * element_size * scaling_constant
print(f"estimate_vae_working_memory_flux: {int(working_memory)}")
return int(working_memory)
def estimate_vae_working_memory_sd3(
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
) -> int:
"""Estimate the working memory required by the invocation in bytes."""
# Encode operations use approximately 50% of the memory required for decode operations
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
element_size = next(vae.parameters()).element_size()
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
# Encoding uses ~45% the working memory as decoding.
scaling_constant = 2200 if operation == "decode" else 1100
working_memory = h * w * element_size * scaling_constant
print(f"estimate_vae_working_memory_sd3: {int(working_memory)}")
return int(working_memory)

View File

@@ -0,0 +1,39 @@
# Bash commands
All commands should be run from `<REPO_ROOT>/invokeai/frontend/web/`.
- `pnpm lint:prettier`: check formatting
- `pnpm lint:eslint`: check for linting issues
- `pnpm lint:knip`: check for unused dependencies
- `pnpm lint:dpdm`: check for dependency cycles
- `pnpm lint:tsc`: check for TypeScript issues
- `pnpm lint`: run all checks
- `pnpm fix`: automatically fix issues where possible
- `pnpm test:no-watch`: run the test suite
# Writing Tests
This repo uses `vitest` for unit tests.
Tests should be colocated with the code they test, and should use the `.test.ts` suffix.
Tests do not need to be written for code that is trivial or has no logic (e.g. simple type definitions, re-exports, etc.). We currently do not do UI tests.
# Agents
- Use @agent-javascript-pro and @agent-typescript-pro for JavaScript and TypeScript code generation and assistance.
- Use @frontend-developer for general frontend development tasks.
## Workflow
Split up tasks into smaller subtasks and handle them one at a time using an agent. Ensure each subtask is completed before moving on to the next.
Each agent should maintain a work log in a markdown file.
When an agent completes a task, it should:
1. Summarize the changes made.
2. List any files that were added, modified, or deleted.
3. Commit the changes with a descriptive commit message.
DO NOT PUSH ANY CHANGES TO THE REMOTE REPOSITORY.

View File

@@ -45,7 +45,7 @@
"@dagrejs/dagre": "^1.1.5",
"@dagrejs/graphlib": "^2.2.4",
"@fontsource-variable/inter": "^5.2.6",
"@invoke-ai/ui-library": "^0.0.46",
"@invoke-ai/ui-library": "^0.0.47",
"@nanostores/react": "^1.0.0",
"@observ33r/object-equals": "^1.1.5",
"@reduxjs/toolkit": "2.8.2",
@@ -56,7 +56,7 @@
"chakra-react-select": "^4.9.2",
"cmdk": "^1.1.1",
"compare-versions": "^6.1.1",
"dockview": "^4.4.1",
"dockview": "^4.7.1",
"es-toolkit": "^1.39.7",
"filesize": "^10.1.6",
"fracturedjsonjs": "^4.1.0",
@@ -69,6 +69,7 @@
"linkify-react": "^4.3.1",
"linkifyjs": "^4.3.1",
"lru-cache": "^11.1.0",
"media-chrome": "^4.13.0",
"mtwist": "^1.0.2",
"nanoid": "^5.1.5",
"nanostores": "^1.0.1",
@@ -87,6 +88,7 @@
"react-hotkeys-hook": "4.5.0",
"react-i18next": "^15.5.3",
"react-icons": "^5.5.0",
"react-player": "^3.3.1",
"react-redux": "9.2.0",
"react-resizable-panels": "^3.0.3",
"react-textarea-autosize": "^8.5.9",

File diff suppressed because it is too large Load Diff

View File

@@ -14,8 +14,7 @@
"gallery": {
"galleryImageSize": "حجم الصورة",
"gallerySettings": "إعدادات المعرض",
"autoSwitchNewImages": "التبديل التلقائي إلى الصور الجديدة",
"noImagesInGallery": "لا توجد صور في المعرض"
"autoSwitchNewImages": "التبديل التلقائي إلى الصور الجديدة"
},
"modelManager": {
"modelManager": "مدير النموذج",
@@ -62,12 +61,10 @@
"infillMethod": "طريقة التعبئة",
"tileSize": "حجم البلاطة",
"copyImage": "نسخ الصورة",
"downloadImage": "تحميل الصورة",
"usePrompt": "استخدم المحث",
"useSeed": "استخدام البذور",
"useAll": "استخدام الكل",
"info": "معلومات",
"showOptionsPanel": "إظهار لوحة الخيارات"
"info": "معلومات"
},
"settings": {
"models": "موديلات",

View File

@@ -24,7 +24,6 @@
"ipAdapter": "IP Adapter",
"auto": "Auto",
"controlNet": "ControlNet",
"imageFailedToLoad": "Kann Bild nicht laden",
"modelManager": "Model Manager",
"learnMore": "Mehr erfahren",
"loading": "Lade",
@@ -52,7 +51,6 @@
"somethingWentWrong": "Etwas ist schief gelaufen",
"copyError": "$t(gallery.copy) Fehler",
"input": "Eingabe",
"notInstalled": "Nicht $t(common.installed)",
"alpha": "Alpha",
"red": "Rot",
"green": "Grün",
@@ -62,11 +60,8 @@
"direction": "Richtung",
"save": "Speichern",
"created": "Erstellt",
"prevPage": "Vorherige Seite",
"nextPage": "Nächste Seite",
"unknownError": "Unbekannter Fehler",
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Siehe hier:",
"localSystem": "Lokales System",
"orderBy": "Ordnen nach",
"saveAs": "Speichern als",
"updated": "Aktualisiert",
@@ -77,7 +72,6 @@
"selected": "Ausgewählt",
"beta": "Beta",
"editor": "Editor",
"goTo": "Gehe zu",
"positivePrompt": "Positiv-Prompt",
"negativePrompt": "Negativ-Prompt",
"tab": "Tabulator",
@@ -106,7 +100,6 @@
"values": "Werte",
"min": "Min",
"max": "Max",
"resetToDefaults": "Auf Standard zurücksetzen",
"seed": "Seed",
"row": "Reihe",
"column": "Spalte",
@@ -135,14 +128,12 @@
"galleryImageSize": "Bildgröße",
"gallerySettings": "Galerie-Einstellungen",
"autoSwitchNewImages": "Auto-Wechsel zu neuen Bildern",
"noImagesInGallery": "Keine Bilder in der Galerie",
"loading": "Lade",
"deleteImage_one": "Lösche Bild",
"deleteImage_other": "Lösche {{count}} Bilder",
"copy": "Kopieren",
"download": "Runterladen",
"featuresWillReset": "Wenn Sie dieses Bild löschen, werden diese Funktionen sofort zurückgesetzt.",
"unableToLoad": "Galerie kann nicht geladen werden",
"downloadSelection": "Auswahl herunterladen",
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
@@ -182,16 +173,12 @@
"gallery": "Galerie",
"sortDirection": "Sortierreihenfolge",
"sideBySide": "Nebeneinander",
"openViewer": "Viewer öffnen",
"viewerImage": "Viewer-Bild",
"exitCompare": "Vergleichen beenden",
"closeViewer": "Viewer schließen",
"selectAnImageToCompare": "Wählen Sie ein Bild zum Vergleichen",
"stretchToFit": "Strecken bis es passt",
"displayBoardSearch": "Board durchsuchen",
"displaySearch": "Bild suchen",
"go": "Los",
"jump": "Springen",
"assetsTab": "Dateien, die Sie zur Verwendung in Ihren Projekten hochgeladen haben.",
"imagesTab": "Bilder, die Sie in Invoke erstellt und gespeichert haben.",
"boardsSettings": "Ordnereinstellungen",
@@ -210,10 +197,6 @@
"title": "Bbox Werkzeug",
"desc": "Bbox Werkzeug auswählen."
},
"setFillToWhite": {
"title": "Farbe auf Weiß einstellen",
"desc": "Setzt die aktuelle Werkzeugfarbe auf weiß."
},
"title": "Leinwand",
"selectBrushTool": {
"title": "Pinselwerkzeug",
@@ -578,7 +561,6 @@
"urlOrLocalPath": "URL oder lokaler Pfad",
"install": "Installieren",
"textualInversions": "Textuelle Inversionen",
"ipAdapters": "IP-Adapter",
"modelImageUpdated": "Modellbild aktualisiert",
"path": "Pfad",
"pathToConfig": "Pfad zur Konfiguration",
@@ -601,7 +583,6 @@
"repoVariant": "Repo Variante",
"learnMoreAboutSupportedModels": "Erfahren Sie mehr über die Modelle, die wir unterstützen",
"clipEmbed": "CLIP einbetten",
"starterModelsInModelManager": "Modelle für Ihren Start finden Sie im Modell-Manager",
"noModelsInstalledDesc1": "Installiere Modelle mit dem",
"modelImageUpdateFailed": "Modellbild-Update fehlgeschlagen",
"prune": "Bereinigen",
@@ -661,11 +642,9 @@
"scaledHeight": "Skaliert H",
"infillMethod": "Infill-Methode",
"tileSize": "Kachelgröße",
"downloadImage": "Bild herunterladen",
"usePrompt": "Prompt verwenden",
"useSeed": "Seed verwenden",
"useAll": "Alle verwenden",
"showOptionsPanel": "Optionsleiste zeigen",
"copyImage": "Bild kopieren",
"denoisingStrength": "Stärke der Entrauschung",
"symmetry": "Symmetrie",
@@ -681,10 +660,6 @@
"remixImage": "Remix des Bilds erstellen",
"imageActions": "Weitere Bildaktionen",
"invoke": {
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Breite ist {{width}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Höhe ist {{height}}",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Breite ist {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Höhe ist {{height}}",
"noNodesInGraph": "Keine Knoten im Graphen",
"canvasIsTransforming": "Leinwand ist beschäftigt (wird transformiert)",
"canvasIsRasterizing": "Leinwand ist beschäftigt (wird gerastert)",
@@ -750,7 +725,6 @@
"parametersNotSet": "Parameter nicht zurückgerufen",
"addedToBoard": "Dem Board hinzugefügt",
"loadedWithWarnings": "Workflow mit Warnungen geladen",
"imageSaved": "Bild gespeichert",
"linkCopied": "Link kopiert",
"problemCopyingLayer": "Ebene kann nicht kopiert werden",
"problemSavingLayer": "Ebene kann nicht gespeichert werden",
@@ -761,8 +735,6 @@
"prunedQueue": "Warteschlange bereinigt",
"modelAddedSimple": "Modell zur Warteschlange hinzugefügt",
"parametersSet": "Parameter zurückgerufen",
"imageNotLoadedDesc": "Bild konnte nicht gefunden werden",
"setControlImage": "Als Kontrollbild festlegen",
"sentToUpscale": "An Vergrößerung gesendet",
"parameterNotSetDescWithMessage": "{{parameter}} kann nicht zurückgerufen werden: {{message}}",
"unableToLoadImageMetadata": "Bildmetadaten können nicht geladen werden",
@@ -775,7 +747,6 @@
"parameterSet": "Parameter zurückgerufen",
"importFailed": "Import fehlgeschlagen",
"importSuccessful": "Import erfolgreich",
"setNodeField": "Als Knotenfeld festlegen",
"somethingWentWrong": "Etwas ist schief gelaufen",
"workflowLoaded": "Arbeitsablauf geladen",
"workflowDeleted": "Arbeitsablauf gelöscht",
@@ -783,16 +754,12 @@
"layerCopiedToClipboard": "Ebene in die Zwischenablage kopiert",
"sentToCanvas": "An Leinwand gesendet",
"problemDeletingWorkflow": "Problem beim Löschen des Arbeitsablaufs",
"uploadFailedInvalidUploadDesc_withCount_one": "Darf maximal 1 PNG-, JPEG- oder WEBP-Bild sein.",
"uploadFailedInvalidUploadDesc_withCount_other": "Dürfen maximal {{count}} PNG-, JPEG- oder WEBP-Bild sein.",
"problemRetrievingWorkflow": "Problem beim Abrufen des Arbeitsablaufs",
"uploadFailedInvalidUploadDesc": "Müssen PNG-, JPEG- oder WEBP-Bilder sein.",
"pasteSuccess": "Eingefügt in {{destination}}",
"pasteFailed": "Einfügen fehlgeschlagen",
"unableToCopy": "Kopieren nicht möglich",
"unableToCopyDesc_theseSteps": "diese Schritte",
"noRasterLayers": "Keine Rasterebenen gefunden",
"noActiveRasterLayers": "Keine aktiven Rasterebenen",
"noVisibleRasterLayers": "Keine sichtbaren Rasterebenen"
},
"accessibility": {
@@ -845,16 +812,13 @@
"archiveBoard": "Ordner archivieren",
"archived": "Archiviert",
"noBoards": "Kein {{boardType}} Ordner",
"hideBoards": "Ordner verstecken",
"viewBoards": "Ordner ansehen",
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt.",
"assetsWithCount_one": "{{count}} in der Sammlung",
"assetsWithCount_other": "{{count}} in der Sammlung",
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
"updateBoardError": "Fehler beim Aktualisieren des Ordners",
"uncategorizedImages": "Nicht kategorisierte Bilder",
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen",
"deletedImagesCannotBeRestored": "Gelöschte Bilder können nicht wiederhergestellt werden."
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen"
},
"queue": {
"status": "Status",
@@ -909,7 +873,6 @@
"batchQueuedDesc_other": "{{count}} Einträge an {{direction}} der Wartschlange hinzugefügt",
"openQueue": "Warteschlange öffnen",
"batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung",
"batchFieldValues": "Stapelverarbeitungswerte",
"batchQueued": "Stapelverarbeitung eingereiht",
"graphQueued": "Graph eingereiht",
"graphFailedToQueue": "Fehler beim Einreihen des Graphen",
@@ -956,8 +919,6 @@
"allPrompts": "Alle Prompts",
"imageDimensions": "Bilder Auslösungen",
"parameterSet": "Parameter {{parameter}} setzen",
"recallParameter": "{{label}} Abrufen",
"parsingFailed": "Parsing Fehlgeschlagen",
"canvasV2Metadata": "Leinwand",
"guidance": "Führung",
"seamlessXAxis": "Nahtlose X Achse",
@@ -1240,9 +1201,7 @@
"collectionFieldType": "{{name}} (Sammlung)",
"connectionWouldCreateCycle": "Verbindung würde einen Kreislauf/cycle schaffen",
"inputMayOnlyHaveOneConnection": "Eingang darf nur eine Verbindung haben",
"hideLegendNodes": "Feldtyp-Legende ausblenden",
"integer": "Ganze Zahl",
"addLinearView": "Zur linearen Ansicht hinzufügen",
"currentImageDescription": "Zeigt das aktuelle Bild im Node-Editor an",
"ipAdapter": "IP-Adapter",
"hideMinimapnodes": "Miniatur-Kartenansicht ausblenden",
@@ -1251,7 +1210,6 @@
"reloadNodeTemplates": "Knoten-Vorlagen neu laden",
"newWorkflow": "Neuer Arbeitsablauf / Workflow",
"newWorkflowDesc": "Einen neuen Arbeitsablauf erstellen?",
"noFieldsLinearview": "Keine Felder zur linearen Ansicht hinzugefügt",
"clearWorkflow": "Workflow löschen",
"clearWorkflowDesc": "Diesen Arbeitsablauf löschen und neu starten?",
"noConnectionInProgress": "Es besteht keine Verbindung",
@@ -1259,7 +1217,6 @@
"nodeVersion": "Knoten Version",
"node": "Knoten",
"nodeSearch": "Knoten suchen",
"removeLinearView": "Entfernen aus Linear View",
"nodeOutputs": "Knoten-Ausgänge",
"nodeTemplate": "Knoten-Vorlage",
"nodeType": "Knotentyp",
@@ -1270,7 +1227,6 @@
"clearWorkflowDesc2": "Ihr aktueller Arbeitsablauf hat ungespeicherte Änderungen.",
"scheduler": "Planer",
"showMinimapnodes": "MiniMap anzeigen",
"showLegendNodes": "Feldtyp-Legende anzeigen",
"executionStateCompleted": "Erledigt",
"downloadWorkflow": "Workflow JSON herunterladen",
"executionStateInProgress": "In Bearbeitung",
@@ -1280,7 +1236,6 @@
"fieldTypesMustMatch": "Feldtypen müssen übereinstimmen",
"fitViewportNodes": "An Ansichtsgröße anpassen",
"loadingNodes": "Lade Nodes...",
"mismatchedVersion": "Ungültiger Knoten: Knoten {{node}} vom Typ {{type}} hat keine passende Version (Update versuchen?)",
"fullyContainNodesHelp": "Nodes müssen vollständig innerhalb der Auswahlbox sein, um ausgewählt werden zu können",
"noWorkflow": "Kein Workflow",
"executionStateError": "Fehler",
@@ -1292,9 +1247,7 @@
"sourceNodeDoesNotExist": "Ungültiger Rand: Quell- / Ausgabe-Knoten {{node}} existiert nicht",
"updateAllNodes": "Update Knoten",
"allNodesUpdated": "Alle Knoten aktualisiert",
"unknownTemplate": "Unbekannte Vorlage",
"updateApp": "Update App",
"unknownInput": "Unbekannte Eingabe: {{name}}",
"unknownNodeType": "Unbekannter Knotentyp",
"float": "Kommazahlen",
"enum": "Aufzählung",
@@ -1310,7 +1263,6 @@
"workflowAuthor": "Autor",
"graph": "Graph",
"workflowDescription": "Kurze Beschreibung",
"versionUnknown": " Version unbekannt",
"workflow": "Arbeitsablauf",
"noGraph": "Kein Graph",
"version": "Version",
@@ -1328,7 +1280,6 @@
"unknownErrorValidatingWorkflow": "Unbekannter Fehler beim Validieren des Arbeitsablaufes",
"inputFieldTypeParseError": "Typ des Eingabefelds {{node}}.{{field}} kann nicht analysiert werden ({{message}})",
"workflowSettings": "Arbeitsablauf Editor Einstellungen",
"unableToLoadWorkflow": "Arbeitsablauf kann nicht geladen werden",
"viewMode": "In linearen Ansicht verwenden",
"unableToValidateWorkflow": "Arbeitsablauf kann nicht validiert werden",
"outputFieldTypeParseError": "Typ des Ausgabefelds {{node}}.{{field}} kann nicht analysiert werden ({{message}})",
@@ -1344,7 +1295,6 @@
"arithmeticSequence": "Arithmetische Folge",
"noBatchGroup": "keine Gruppe",
"generatorNoValues": "leer",
"generatorLoading": "wird geladen",
"generatorLoadFromFile": "Aus Datei laden",
"showEdgeLabels": "Kantenbeschriftungen anzeigen",
"downloadWorkflowError": "Fehler beim Herunterladen des Arbeitsablaufs",
@@ -1352,14 +1302,11 @@
"description": "Beschreibung",
"loadWorkflowDesc": "Arbeitsablauf laden?",
"loadWorkflowDesc2": "Ihr aktueller Arbeitsablauf enthält nicht gespeicherte Änderungen.",
"loadingTemplates": "Lade {{name}}",
"missingSourceOrTargetHandle": "Fehlender Quell- oder Zielgriff",
"missingSourceOrTargetNode": "Fehlender Quell- oder Zielknoten",
"showEdgeLabelsHelp": "Beschriftungen an Kanten anzeigen, um die verknüpften Knoten zu kennzeichnen"
},
"hrf": {
"enableHrf": "Korrektur für hohe Auflösungen",
"upscaleMethod": "Vergrößerungsmethode",
"metadata": {
"strength": "Auflösungs-Fix Stärke",
"enabled": "Auflösungs-Fix aktiviert",
@@ -1370,11 +1317,9 @@
"models": {
"noMatchingModels": "Keine passenden Modelle",
"loading": "lade",
"noMatchingLoRAs": "Keine passenden LoRAs",
"noModelsAvailable": "Keine Modelle verfügbar",
"selectModel": "Wählen ein Modell aus",
"noRefinerModelsInstalled": "Keine SDXL Refiner-Modelle installiert",
"noLoRAsInstalled": "Keine LoRAs installiert",
"addLora": "LoRA hinzufügen",
"defaultVAE": "Standard VAE",
"lora": "LoRA",
@@ -1404,31 +1349,23 @@
"workflows": "Arbeitsabläufe",
"workflowName": "Arbeitsablauf-Name",
"saveWorkflowAs": "Arbeitsablauf speichern als",
"searchWorkflows": "Suche Arbeitsabläufe",
"newWorkflowCreated": "Neuer Arbeitsablauf erstellt",
"problemSavingWorkflow": "Problem beim Speichern des Arbeitsablaufs",
"problemLoading": "Problem beim Laden von Arbeitsabläufen",
"downloadWorkflow": "Speichern als",
"savingWorkflow": "Speichere Arbeitsablauf...",
"saveWorkflow": "Arbeitsablauf speichern",
"noWorkflows": "Keine Arbeitsabläufe",
"workflowLibrary": "Bibliothek",
"unnamedWorkflow": "Unbenannter Arbeitsablauf",
"noDescription": "Keine Beschreibung",
"clearWorkflowSearchFilter": "Suchfilter zurücksetzen",
"workflowEditorMenu": "Arbeitsablauf-Editor Menü",
"deleteWorkflow": "Arbeitsablauf löschen",
"workflowSaved": "Arbeitsablauf gespeichert",
"uploadWorkflow": "Aus Datei laden",
"openWorkflow": "Arbeitsablauf öffnen",
"saveWorkflowToProject": "Arbeitsablauf in Projekt speichern",
"workflowCleared": "Arbeitsablauf gelöscht",
"loading": "Lade Arbeitsabläufe",
"name": "Name",
"ascending": "Aufsteigend",
"defaultWorkflows": "Standard Arbeitsabläufe",
"userWorkflows": "Benutzer Arbeitsabläufe",
"projectWorkflows": "Projekt Arbeitsabläufe",
"opened": "Geöffnet",
"loadWorkflow": "Arbeitsablauf $t(common.load)",
"updated": "Aktualisiert",
@@ -1442,12 +1379,10 @@
"copyShareLink": "Teilen-Link kopieren",
"download": "Herunterladen",
"convertGraph": "Graph konvertieren",
"filterByTags": "Nach Tags filtern",
"yourWorkflows": "Ihre Arbeitsabläufe",
"recentlyOpened": "Kürzlich geöffnet"
},
"sdxl": {
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
"scheduler": "Planer",
"steps": "Schritte"
},
@@ -1459,13 +1394,11 @@
"addPromptTrigger": "Prompt-Trigger hinzufügen",
"compatibleEmbeddings": "Kompatible Einbettungen",
"replace": "Ersetzen",
"insert": "Einfügen",
"discard": "Verwerfen",
"generateFromImage": "Prompt aus Bild generieren",
"expandCurrentPrompt": "Aktuelle Prompt erweitern",
"uploadImageForPromptGeneration": "Bild zur Prompt-Generierung hochladen",
"expandingPrompt": "Prompt wird erweitert...",
"resultTitle": "Prompt-Erweiterung abgeschlossen"
"expandingPrompt": "Prompt wird erweitert..."
},
"ui": {
"tabs": {
@@ -1604,8 +1537,6 @@
"opacity": "Opazität",
"removeBookmark": "Lesezeichen entfernen",
"rasterLayer": "Rasterebene",
"rasterLayers_withCount_visible": "Rasterebenen ({{count}})",
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
"deleteSelected": "Ausgewählte löschen",
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
"newControlLayerOk": "Kontroll-Ebene erstellt",
@@ -1613,10 +1544,8 @@
"newRasterLayerOk": "Rasterebene erstellt",
"moveToFront": "Nach vorne bringen",
"copyToClipboard": "In die Zwischenablage kopieren",
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
"clearCaches": "Cache leeren",
"controlLayer": "Kontroll-Ebene",
"rasterLayers_withCount_hidden": "Rasterebenen ({{count}} ausgeblendet)",
"transparency": "Transparenz",
"canvas": "Leinwand",
"global": "Global",
@@ -1639,9 +1568,7 @@
"weight": "Gewichtung",
"addReferenceImage": "$t(controlLayers.referenceImage) hinzufügen",
"addInpaintMask": "$t(controlLayers.inpaintMask) hinzufügen",
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage) hinzufügen",
"regionalGuidance": "Regionale Führung",
"globalReferenceImages_withCount_visible": "Globale Referenzbilder ({{count}})",
"addPositivePrompt": "$t(controlLayers.prompt) hinzufügen",
"locked": "Gesperrt",
"showHUD": "HUD anzeigen",
@@ -1649,16 +1576,12 @@
"addRasterLayer": "$t(controlLayers.rasterLayer) hinzufügen",
"addRegionalGuidance": "$t(controlLayers.regionalGuidance) hinzufügen",
"addControlLayer": "$t(controlLayers.controlLayer) hinzufügen",
"newCanvasSession": "Neue Leinwand-Sitzung",
"replaceLayer": "Ebene ersetzen",
"newGallerySession": "Neue Galerie-Sitzung",
"unlocked": "Entsperrt",
"showProgressOnCanvas": "Fortschritt auf Leinwand anzeigen",
"controlMode": {
"balanced": "Ausgewogen"
},
"globalReferenceImages_withCount_hidden": "Globale Referenzbilder ({{count}} ausgeblendet)",
"sendToGallery": "An Galerie senden",
"stagingArea": {
"accept": "Annehmen",
"next": "Nächste",
@@ -1666,8 +1589,6 @@
"discard": "Verwerfen",
"previous": "Vorherige"
},
"regionalGuidance_withCount_visible": "Regionale Führung ({{count}})",
"regionalGuidance_withCount_hidden": "Regionale Führung ({{count}} ausgeblendet)",
"settings": {
"snapToGrid": {
"on": "Ein",
@@ -1677,8 +1598,6 @@
},
"layer_one": "Ebene",
"layer_other": "Ebenen",
"layer_withCount_one": "Ebene ({{count}})",
"layer_withCount_other": "Ebenen ({{count}})",
"fill": {
"fillStyle": "Füllstil",
"diagonal": "Diagonal",

View File

@@ -38,10 +38,13 @@
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
"hideBoards": "Hide Boards",
"loading": "Loading...",
"locateInGalery": "Locate in Gallery",
"menuItemAutoAdd": "Auto-add to this Board",
"move": "Move",
"movingImagesToBoard_one": "Moving {{count}} image to board:",
"movingImagesToBoard_other": "Moving {{count}} images to board:",
"movingVideosToBoard_one": "Moving {{count}} video to board:",
"movingVideosToBoard_other": "Moving {{count}} videos to board:",
"myBoard": "My Board",
"noBoards": "No {{boardType}} Boards",
"noMatching": "No matching Boards",
@@ -58,6 +61,8 @@
"imagesWithCount_other": "{{count}} images",
"assetsWithCount_one": "{{count}} asset",
"assetsWithCount_other": "{{count}} assets",
"videosWithCount_one": "{{count}} video",
"videosWithCount_other": "{{count}} videos",
"updateBoardError": "Error updating board"
},
"accordions": {
@@ -99,6 +104,7 @@
"copy": "Copy",
"copyError": "$t(gallery.copy) Error",
"clipboard": "Clipboard",
"crop": "Crop",
"on": "On",
"off": "Off",
"or": "or",
@@ -237,7 +243,10 @@
"resultSubtitle": "Choose how to handle the expanded prompt:",
"replace": "Replace",
"insert": "Insert",
"discard": "Discard"
"discard": "Discard",
"noPromptHistory": "No prompt history recorded.",
"noMatchingPrompts": "No matching prompts in history.",
"toSwitchBetweenPrompts": "to switch between prompts."
},
"queue": {
"queue": "Queue",
@@ -293,7 +302,7 @@
"completedIn": "Completed in",
"batch": "Batch",
"origin": "Origin",
"destination": "Destination",
"destination": "Dest",
"upscaling": "Upscaling",
"canvas": "Canvas",
"generation": "Generation",
@@ -319,7 +328,13 @@
"iterations_other": "Iterations",
"generations_one": "Generation",
"generations_other": "Generations",
"batchSize": "Batch Size"
"batchSize": "Batch Size",
"createdAt": "Created At",
"completedAt": "Completed At",
"sortColumn": "Sort Column",
"sortBy": "Sort by {{column}}",
"sortOrderAscending": "Ascending",
"sortOrderDescending": "Descending"
},
"invocationCache": {
"invocationCache": "Invocation Cache",
@@ -360,6 +375,9 @@
"deleteImage_one": "Delete Image",
"deleteImage_other": "Delete {{count}} Images",
"deleteImagePermanent": "Deleted images cannot be restored.",
"deleteVideo_one": "Delete Video",
"deleteVideo_other": "Delete {{count}} Videos",
"deleteVideoPermanent": "Deleted videos cannot be restored.",
"displayBoardSearch": "Board Search",
"displaySearch": "Image Search",
"download": "Download",
@@ -379,9 +397,10 @@
"sortDirection": "Sort Direction",
"showStarredImagesFirst": "Show Starred Images First",
"noImageSelected": "No Image Selected",
"noVideoSelected": "No Video Selected",
"noImagesInGallery": "No Images to Display",
"starImage": "Star Image",
"unstarImage": "Unstar Image",
"starImage": "Star",
"unstarImage": "Unstar",
"unableToLoad": "Unable to load Gallery",
"deleteSelection": "Delete Selection",
"downloadSelection": "Download Selection",
@@ -410,7 +429,9 @@
"openViewer": "Open Viewer",
"closeViewer": "Close Viewer",
"move": "Move",
"useForPromptGeneration": "Use for Prompt Generation"
"useForPromptGeneration": "Use for Prompt Generation",
"videos": "Videos",
"videosTab": "Videos you've created and saved within Invoke."
},
"hotkeys": {
"hotkeys": "Hotkeys",
@@ -455,10 +476,22 @@
"title": "Select the Queue Tab",
"desc": "Selects the Queue tab."
},
"selectVideoTab": {
"title": "Select the Video Tab",
"desc": "Selects the Video tab."
},
"focusPrompt": {
"title": "Focus Prompt",
"desc": "Move cursor focus to the positive prompt."
},
"promptHistoryPrev": {
"title": "Previous Prompt in History",
"desc": "When the prompt is focused, move to the previous (older) prompt in your history."
},
"promptHistoryNext": {
"title": "Next Prompt in History",
"desc": "When the prompt is focused, move to the next (newer) prompt in your history."
},
"toggleLeftPanel": {
"title": "Toggle Left Panel",
"desc": "Show or hide the left panel."
@@ -481,6 +514,9 @@
"key": "1"
}
},
"video": {
"title": "Video"
},
"canvas": {
"title": "Canvas",
"selectBrushTool": {
@@ -571,9 +607,13 @@
"title": "Prev Layer",
"desc": "Select the previous layer in the list."
},
"setFillToWhite": {
"title": "Set Color to White",
"desc": "Set the current tool color to white."
"setFillColorsToDefault": {
"title": "Set Colors to Default",
"desc": "Set the current tool colors to default."
},
"toggleFillColor": {
"title": "Toggle Fill Color",
"desc": "Toggle the current tool fill color."
},
"filterSelected": {
"title": "Filter",
@@ -621,6 +661,10 @@
"title": "Fit Bbox To Masks",
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
},
"toggleBbox": {
"title": "Toggle Bbox Visibility",
"desc": "Hide or show the generation bounding box"
},
"applySegmentAnything": {
"title": "Apply Segment Anything",
"desc": "Apply the current Segment Anything mask.",
@@ -766,6 +810,9 @@
}
}
},
"lora": {
"weight": "Weight"
},
"metadata": {
"allPrompts": "All Prompts",
"cfgScale": "CFG scale",
@@ -776,11 +823,13 @@
"guidance": "Guidance",
"height": "Height",
"imageDetails": "Image Details",
"videoDetails": "Video Details",
"imageDimensions": "Image Dimensions",
"metadata": "Metadata",
"model": "Model",
"negativePrompt": "Negative Prompt",
"noImageDetails": "No image details found",
"noVideoDetails": "No video details found",
"noMetaData": "No metadata found",
"noRecallParameters": "No parameters to recall found",
"parameterSet": "Parameter {{parameter}} set",
@@ -798,7 +847,11 @@
"vae": "VAE",
"width": "Width",
"workflow": "Workflow",
"canvasV2Metadata": "Canvas Layers"
"canvasV2Metadata": "Canvas Layers",
"videoModel": "Model",
"videoDuration": "Duration",
"videoAspectRatio": "Aspect Ratio",
"videoResolution": "Resolution"
},
"modelManager": {
"active": "active",
@@ -873,6 +926,9 @@
"install": "Install",
"installAll": "Install All",
"installRepo": "Install Repo",
"installBundle": "Install Bundle",
"installBundleMsg1": "Are you sure you want to install the {{bundleName}} bundle?",
"installBundleMsg2": "This bundle will install the following {{count}} models:",
"ipAdapters": "IP Adapters",
"learnMoreAboutSupportedModels": "Learn more about the models we support",
"load": "Load",
@@ -1181,6 +1237,7 @@
},
"parameters": {
"aspect": "Aspect",
"duration": "Duration",
"lockAspectRatio": "Lock Aspect Ratio",
"swapDimensions": "Swap Dimensions",
"setToOptimalSize": "Optimize size for model",
@@ -1205,9 +1262,15 @@
"height": "Height",
"imageFit": "Fit Initial Image To Output Size",
"images": "Images",
"images_withCount_one": "Image",
"images_withCount_other": "Images",
"videos_withCount_one": "Video",
"videos_withCount_other": "Videos",
"infillMethod": "Infill Method",
"infillColorValue": "Fill Color",
"info": "Info",
"startingFrameImage": "Start Frame",
"startingFrameImageAspectRatioWarning": "Image aspect ratio does not match the video aspect ratio ({{videoAspectRatio}}). This could lead to unexpected cropping during video generation.",
"invoke": {
"addingImagesTo": "Adding images to",
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.",
@@ -1231,6 +1294,7 @@
"batchNodeCollectionSizeMismatchNoGroupId": "Batch group collection size mismatch",
"batchNodeCollectionSizeMismatch": "Collection size mismatch on Batch {{batchGroupId}}",
"noModelSelected": "No model selected",
"noStartingFrameImage": "No starting frame image",
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
"noCLIPEmbedModelSelected": "No CLIP Embed model selected for FLUX generation",
@@ -1243,7 +1307,7 @@
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with FLUX Kontext via BFL API",
"incompatibleLoRAs": "Incompatible LoRA(s) added",
"canvasIsFiltering": "Canvas is busy (filtering)",
"canvasIsTransforming": "Canvas is busy (transforming)",
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
@@ -1253,7 +1317,8 @@
"noNodesInGraph": "No nodes in graph",
"systemDisconnected": "System disconnected",
"promptExpansionPending": "Prompt expansion in progress",
"promptExpansionResultPending": "Please accept or discard your prompt expansion result"
"promptExpansionResultPending": "Please accept or discard your prompt expansion result",
"videoIsDisabled": "Video generation is not enabled for {{accountType}} accounts."
},
"maskBlur": "Mask Blur",
"negativePromptPlaceholder": "Negative Prompt",
@@ -1271,9 +1336,11 @@
"seamlessXAxis": "Seamless X Axis",
"seamlessYAxis": "Seamless Y Axis",
"seed": "Seed",
"videoActions": "Video Actions",
"imageActions": "Image Actions",
"sendToCanvas": "Send To Canvas",
"sendToUpscale": "Send To Upscale",
"sendToVideo": "Send To Video",
"showOptionsPanel": "Show Side Panel (O or T)",
"shuffle": "Shuffle Seed",
"steps": "Steps",
@@ -1285,6 +1352,7 @@
"postProcessing": "Post-Processing (Shift + U)",
"processImage": "Process Image",
"upscaling": "Upscaling",
"video": "Video",
"useAll": "Use All",
"useSize": "Use Size",
"useCpuNoise": "Use CPU Noise",
@@ -1296,6 +1364,7 @@
"gaussianBlur": "Gaussian Blur",
"boxBlur": "Box Blur",
"staged": "Staged",
"resolution": "Resolution",
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your <LinkComponent>account settings</LinkComponent> to upgrade."
},
"dynamicPrompts": {
@@ -1373,8 +1442,8 @@
"addedToBoard": "Added to board {{name}}'s assets",
"addedToUncategorized": "Added to board $t(boards.uncategorized)'s assets",
"baseModelChanged": "Base Model Changed",
"baseModelChangedCleared_one": "Cleared or disabled {{count}} incompatible submodel",
"baseModelChangedCleared_other": "Cleared or disabled {{count}} incompatible submodels",
"baseModelChangedCleared_one": "Updated, cleared or disabled {{count}} incompatible submodel",
"baseModelChangedCleared_other": "Updated, cleared or disabled {{count}} incompatible submodels",
"canceled": "Processing Canceled",
"connected": "Connected to Server",
"imageCopied": "Image Copied",
@@ -1942,8 +2011,11 @@
"zoomToNode": "Zoom to Node",
"nodeFieldTooltip": "To add a node field, click the small plus sign button on the field in the Workflow Editor, or drag the field by its name into the form.",
"addToForm": "Add to Form",
"removeFromForm": "Remove from Form",
"label": "Label",
"showDescription": "Show Description",
"showShuffle": "Show Shuffle",
"shuffle": "Shuffle",
"component": "Component",
"numberInput": "Number Input",
"singleLine": "Single Line",
@@ -2024,6 +2096,24 @@
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
"pullBboxIntoReferenceImageError": "Problem Pulling BBox Into ReferenceImage",
"addAdjustments": "Add Adjustments",
"removeAdjustments": "Remove Adjustments",
"adjustments": {
"simple": "Simple",
"curves": "Curves",
"heading": "Adjustments",
"expand": "Expand adjustments",
"collapse": "Collapse adjustments",
"brightness": "Brightness",
"contrast": "Contrast",
"saturation": "Saturation",
"temperature": "Temperature",
"tint": "Tint",
"sharpness": "Sharpness",
"finish": "Finish",
"reset": "Reset",
"master": "Master"
},
"regionIsEmpty": "Selected region is empty",
"mergeVisible": "Merge Visible",
"mergeDown": "Merge Down",
@@ -2222,6 +2312,8 @@
},
"fill": {
"fillColor": "Fill Color",
"bgFillColor": "Background Color",
"fgFillColor": "Foreground Color",
"fillStyle": "Fill Style",
"solid": "Solid",
"grid": "Grid",
@@ -2393,12 +2485,21 @@
"saveAs": "Save As",
"cancel": "Cancel",
"process": "Process",
"help1": "Select a single target object. Add <Bold>Include</Bold> and <Bold>Exclude</Bold> points to indicate which parts of the layer are part of the target object.",
"help2": "Start with one <Bold>Include</Bold> point within the target object. Add more points to refine the selection. Fewer points typically produce better results.",
"help3": "Invert the selection to select everything except the target object.",
"desc": "Select a single target object. After selection is complete, click <Bold>Apply</Bold> to discard everything outside the selected area, or save the selection as a new layer.",
"visualModeDesc": "Visual mode uses box and point inputs to select an object.",
"visualMode1": "Click and drag to draw a box around the object you want to select. You may get better results by drawing the box a bit larger or smaller than the object.",
"visualMode2": "Click to add a green <Bold>include</Bold> point, or shift-click to add a red <Bold>exclude</Bold> point to tell the model what to include or exclude.",
"visualMode3": "Points can be used to refine a box selection or used independently.",
"promptModeDesc": "Prompt mode uses text input to select an object.",
"promptMode1": "Type a brief description of the object you want to select.",
"promptMode2": "Use simple language, avoiding complex descriptions or multiple objects.",
"clickToAdd": "Click on the layer to add a point",
"dragToMove": "Drag a point to move it",
"clickToRemove": "Click on a point to remove it"
"clickToRemove": "Click on a point to remove it",
"model": "Model",
"segmentAnything1": "Segment Anything 1",
"segmentAnything2": "Segment Anything 2",
"prompt": "Selection Prompt"
},
"settings": {
"snapToGrid": {
@@ -2554,19 +2655,30 @@
"queue": "Queue",
"upscaling": "Upscaling",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"video": "Video",
"gallery": "Gallery"
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Workflow Editor",
"imageViewer": "Image Viewer",
"canvas": "Canvas"
"imageViewer": "Viewer",
"canvas": "Canvas",
"video": "Video"
},
"launchpad": {
"workflowsTitle": "Go deep with Workflows.",
"upscalingTitle": "Upscale and add detail.",
"canvasTitle": "Edit and refine on Canvas.",
"generateTitle": "Generate images from text prompts.",
"videoTitle": "Generate videos from text prompts.",
"video": {
"startingFrameCalloutTitle": "Add a Starting Frame",
"startingFrameCalloutDesc": "Add an image to control the first frame of your video."
},
"addStartingFrame": {
"title": "Add a Starting Frame",
"description": "Add an image to control the first frame of your video."
},
"modelGuideText": "Want to learn what prompts work best for each model?",
"modelGuideLink": "Check out our Model Guide.",
"createNewWorkflowFromScratch": "Create a new Workflow from scratch",
@@ -2641,6 +2753,10 @@
}
}
},
"video": {
"noVideoSelected": "No video selected",
"selectFromGallery": "Select a video from the gallery to play"
},
"system": {
"enableLogging": "Enable Logging",
"logLevel": {
@@ -2678,8 +2794,9 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"Studio state is saved to the server, allowing you to continue your work on any device.",
"Support for multiple reference images for FLUX Kontext (local model only)."
"Select Object v2: Improved object selection with point and box inputs or text prompts.",
"Raster Layer Adjustments: Easily adjust layer brightness, contrast, saturation, curves and more.",
"Prompt History: Review and quickly recall your last 100 prompts."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -47,11 +47,8 @@
"editor": "Editor",
"orderBy": "Ordenar por",
"file": "Archivo",
"goTo": "Ir a",
"imageFailedToLoad": "No se puede cargar la imagen",
"saveAs": "Guardar Como",
"somethingWentWrong": "Algo salió mal",
"nextPage": "Página Siguiente",
"selected": "Seleccionado",
"tab": "Tabulador",
"positivePrompt": "Prompt Positivo",
@@ -61,7 +58,6 @@
"unknown": "Desconocido",
"input": "Entrada",
"template": "Plantilla",
"prevPage": "Página Anterior",
"red": "Rojo",
"alpha": "Transparencia",
"outputs": "Resultados",
@@ -94,8 +90,6 @@
"edit": "Editar",
"safetensors": "Safetensors",
"toResolve": "Para resolver",
"localSystem": "Sistema local",
"notInstalled": "No $t(common.installed)",
"outpaint": "outpaint",
"simple": "Sencillo",
"close": "Cerrar"
@@ -104,7 +98,6 @@
"galleryImageSize": "Tamaño de la imagen",
"gallerySettings": "Ajustes de la galería",
"autoSwitchNewImages": "Auto seleccionar Imágenes nuevas",
"noImagesInGallery": "No hay imágenes para mostrar",
"deleteImage_one": "Eliminar Imagen",
"deleteImage_many": "Eliminar {{count}} Imágenes",
"deleteImage_other": "Eliminar {{count}} Imágenes",
@@ -118,9 +111,7 @@
"selectForCompare": "Seleccionar para comparar",
"alwaysShowImageSizeBadge": "Mostrar siempre las dimensiones de la imagen",
"currentlyInUse": "Esta imagen se utiliza actualmente con las siguientes funciones:",
"unableToLoad": "No se puede cargar la galería",
"selectAllOnPage": "Seleccionar todo en la página",
"selectAnImageToCompare": "Seleccione una imagen para comparar",
"bulkDownloadFailed": "Error en la descarga",
"compareHelp2": "Presione <Kbd> M </Kbd> para recorrer los modos de comparación.",
"move": "Mover",
@@ -145,7 +136,6 @@
"exitBoardSearch": "Finalizar búsqueda",
"exitSearch": "Salir de la búsqueda de imágenes",
"featuresWillReset": "Si elimina esta imagen, dichas funciones se restablecerán inmediatamente.",
"jump": "Omitir",
"loading": "Cargando",
"newestFirst": "La más nueva primero",
"unstarImage": "Dejar de ser favorita",
@@ -163,9 +153,7 @@
"boardsSettings": "Ajustes de los tableros",
"imagesSettings": "Configuración de imágenes de la galería",
"compareHelp3": "Presione <Kbd> C </Kbd> para intercambiar las imágenes comparadas.",
"showArchivedBoards": "Mostrar paneles archivados",
"closeViewer": "Cerrar visor",
"openViewer": "Abrir visor"
"showArchivedBoards": "Mostrar paneles archivados"
},
"modelManager": {
"modelManager": "Gestor de Modelos",
@@ -239,12 +227,10 @@
"scaledHeight": "Alto escalado",
"infillMethod": "Método de relleno",
"tileSize": "Tamaño del mosaico",
"downloadImage": "Descargar imagen",
"usePrompt": "Usar Entrada",
"useSeed": "Usar Semilla",
"useAll": "Usar Todo",
"info": "Información",
"showOptionsPanel": "Mostrar panel lateral (O o T)",
"symmetry": "Simetría",
"copyImage": "Copiar la imagen",
"general": "General",
@@ -323,8 +309,6 @@
"hideMinimapnodes": "Ocultar el minimapa",
"fitViewportNodes": "Ajustar la vista",
"zoomOutNodes": "Alejar",
"hideLegendNodes": "Ocultar la leyenda del tipo de campo",
"showLegendNodes": "Mostrar la leyenda del tipo de campo",
"showMinimapnodes": "Mostrar el minimapa",
"reloadNodeTemplates": "Recargar las plantillas de nodos",
"loadWorkflow": "Cargar el flujo de trabajo",
@@ -361,7 +345,6 @@
"assetsWithCount_one": "{{count}} activo",
"assetsWithCount_many": "{{count}} activos",
"assetsWithCount_other": "{{count}} activos",
"hideBoards": "Ocultar paneles",
"addPrivateBoard": "Agregar un panel privado",
"addSharedBoard": "Añadir panel compartido",
"boards": "Paneles",
@@ -372,7 +355,6 @@
"noBoards": "No hay paneles {{boardType}}",
"shared": "Paneles compartidos",
"deletedPrivateBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al elegir \"Eliminar solo el panel\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen.",
"viewBoards": "Ver paneles",
"private": "Paneles privados",
"updateBoardError": "No se pudo actualizar el panel"
},
@@ -461,7 +443,6 @@
"other": "Otro",
"queueFront": "Añadir al principio de la cola",
"gallery": "Galería",
"batchFieldValues": "Valores de procesamiento por lotes",
"session": "Sesión",
"notReady": "La cola aún no está lista",
"graphQueued": "Gráfico en cola",
@@ -494,15 +475,11 @@
"layer_one": "Capa",
"layer_many": "Capas",
"layer_other": "Capas",
"layer_withCount_one": "({{count}}) capa",
"layer_withCount_many": "({{count}}) capas",
"layer_withCount_other": "({{count}}) capas",
"copyToClipboard": "Copiar al portapapeles"
},
"whatsNew": {
"readReleaseNotes": "Leer las notas de la versión",
"watchRecentReleaseVideos": "Ver videos de versiones recientes",
"watchUiUpdatesOverview": "Descripción general de las actualizaciones de la interfaz de usuario de Watch",
"whatsNewInInvoke": "Novedades en Invoke",
"items": [
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large."
@@ -527,13 +504,11 @@
},
"hrf": {
"hrf": "Solución de alta resolución",
"enableHrf": "Activar corrección de alta resolución",
"metadata": {
"enabled": "Corrección de alta resolución activada",
"strength": "Forzar la corrección de alta resolución",
"method": "Método de corrección de alta resolución"
},
"upscaleMethod": "Método de expansión"
}
},
"prompt": {
"addPromptTrigger": "Añadir activador de los avisos",
@@ -591,10 +566,6 @@
"title": "Ajustar capas al lienzo",
"desc": "Escala y posiciona la vista para que se ajuste a todas las capas visibles."
},
"setFillToWhite": {
"title": "Establecer color en blanco",
"desc": "Establece el color actual de la herramienta en blanco."
},
"resetSelected": {
"title": "Restablecer capa",
"desc": "Restablecer la capa seleccionada. Solo se aplica a Máscara de retoque y Guía regional."
@@ -868,10 +839,8 @@
"seed": "Semilla",
"strength": "Forzar imagen a imagen",
"recallParameters": "Parámetros de recuperación",
"recallParameter": "Recuperar {{label}}",
"steps": "Pasos",
"noRecallParameters": "Sin parámetros para recuperar",
"parsingFailed": "Error al analizar"
"noRecallParameters": "Sin parámetros para recuperar"
},
"system": {
"logLevel": {

View File

@@ -28,7 +28,6 @@
"gallery": {
"galleryImageSize": "Kuvan koko",
"gallerySettings": "Gallerian asetukset",
"autoSwitchNewImages": "Vaihda uusiin kuviin automaattisesti",
"noImagesInGallery": "Ei kuvia galleriassa"
"autoSwitchNewImages": "Vaihda uusiin kuviin automaattisesti"
}
}

View File

@@ -27,21 +27,15 @@
"error": "Erreur",
"installed": "Installé",
"format": "format",
"goTo": "Aller à",
"input": "Entrée",
"linear": "Linéaire",
"localSystem": "Système local",
"learnMore": "En savoir plus",
"modelManager": "Gestionnaire de modèle",
"notInstalled": "Non $t(common.installed)",
"openInNewTab": "Ouvrir dans un nouvel onglet",
"somethingWentWrong": "Une erreur s'est produite",
"created": "Créé",
"tab": "Onglet",
"folder": "Dossier",
"imageFailedToLoad": "Impossible de charger l'Image",
"prevPage": "Page précédente",
"nextPage": "Page suivante",
"selected": "Sélectionné",
"save": "Enregistrer",
"updated": "Mis à jour",
@@ -111,7 +105,6 @@
"min": "Min",
"max": "Max",
"values": "Valeurs",
"resetToDefaults": "Réinitialiser par défaut",
"seed": "Graine",
"combinatorial": "Combinatoire"
},
@@ -119,11 +112,9 @@
"galleryImageSize": "Taille de l'image",
"gallerySettings": "Paramètres de la galerie",
"autoSwitchNewImages": "Basculer automatiquement vers de nouvelles images",
"noImagesInGallery": "Aucune image à afficher",
"bulkDownloadRequestedDesc": "Votre demande de téléchargement est en cours de traitement. Cela peut prendre quelques instants.",
"deleteSelection": "Supprimer la sélection",
"selectAllOnPage": "Séléctionner toute la page",
"unableToLoad": "Impossible de charger la Galerie",
"featuresWillReset": "Si vous supprimez cette image, ces fonctionnalités vont être réinitialisés.",
"loading": "Chargement",
"sortDirection": "Direction de tri",
@@ -149,7 +140,6 @@
"openInViewer": "Ouvrir dans le Visualiseur",
"showArchivedBoards": "Montrer les Planches archivées",
"selectForCompare": "Séléctionner pour comparaison",
"selectAnImageToCompare": "Séléctionner une Image à comparer",
"exitCompare": "Sortir de la comparaison",
"compareHelp2": "Appuyez sur <Kbd>M</Kbd> pour faire défiler les modes de comparaison.",
"swapImages": "Échanger les Images",
@@ -157,10 +147,7 @@
"compareHelp1": "Maintenir <Kbd>Alt</Kbd> lors du clic d'une image dans la galerie ou en utilisant les flèches du clavier pour changer l'Image à comparer.",
"compareHelp3": "Appuyer sur <Kbd>C</Kbd> pour échanger les images à comparer.",
"image": "image",
"openViewer": "Ouvrir le Visualisateur",
"closeViewer": "Fermer le Visualisateur",
"currentlyInUse": "Cette image est actuellement utilisée dans ces fonctionalités :",
"jump": "Sauter",
"starImage": "Marquer l'Image",
"download": "Téléchargement",
"deleteImage_one": "Supprimer l'Image",
@@ -247,7 +234,6 @@
"metadata": "Métadonnées",
"scanFolder": "Scanner le dossier",
"inplaceInstallDesc": "Installez les modèles sans copier les fichiers. Lors de l'utilisation du modèle, il sera chargé depuis cet emplacement. Si cette option est désactivée, le(s) fichier(s) du modèle seront copiés dans le répertoire des modèles géré par Invoke lors de l'installation.",
"ipAdapters": "Adaptateurs IP",
"installQueue": "File d'attente d'installation",
"modelImageDeleteFailed": "Échec de la suppression de l'image du modèle",
"modelName": "Nom du modèle",
@@ -288,7 +274,6 @@
"scanFolderHelper": "Le dossier sera analysé de manière récursive à la recherche de modèles. Cela peut prendre quelques instants pour des dossiers très volumineux.",
"clipEmbed": "Intégration CLIP",
"spandrelImageToImage": "Image vers Image (Spandrel)",
"starterModelsInModelManager": "Les modèles de démarrage peuvent être trouvés dans le gestionnaire de modèles",
"t5Encoder": "Encodeur T5",
"learnMoreAboutSupportedModels": "En savoir plus sur les modèles que nous prenons en charge",
"includesNModels": "Contient {{n}} modèles et leurs dépendances",
@@ -346,12 +331,10 @@
"infillMethod": "Méthode de Remplissage",
"tileSize": "Taille des Tuiles",
"copyImage": "Copier Image",
"downloadImage": "Télécharger Image",
"usePrompt": "Utiliser la suggestion",
"useSeed": "Utiliser la graine",
"useAll": "Tout utiliser",
"info": "Info",
"showOptionsPanel": "Afficher le panneau latéral (O ou T)",
"invoke": {
"noPrompts": "Aucun prompts généré",
"missingInputForField": "entrée manquante",
@@ -362,21 +345,16 @@
"noModelSelected": "Aucun modèle sélectionné",
"noNodesInGraph": "Aucun nœud dans le graphique",
"systemDisconnected": "Système déconnecté",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la hauteur de la bounding box est {{height}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la hauteur de la bounding box est {{height}}",
"noFLUXVAEModelSelected": "Aucun modèle VAE sélectionné pour la génération FLUX",
"canvasIsTransforming": "La Toile est occupée (en transformation)",
"canvasIsRasterizing": "La Toile est occupée (en rastérisation)",
"noCLIPEmbedModelSelected": "Aucun modèle CLIP Embed sélectionné pour la génération FLUX",
"canvasIsFiltering": "La Toile est occupée (en filtration)",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box est {{width}}",
"noT5EncoderModelSelected": "Aucun modèle T5 Encoder sélectionné pour la génération FLUX",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box mise à l'échelle est {{width}}",
"canvasIsCompositing": "La Toile est occupée (en composition)",
"collectionTooFewItems": "trop peu d'éléments, minimum {{minItems}}",
"collectionTooManyItems": "trop d'éléments, maximum {{maxItems}}",
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)",
"emptyBatches": "lots vides",
"batchNodeNotConnected": "Noeud de lots non connecté : {{label}}",
"fluxModelMultipleControlLoRAs": "Vous ne pouvez utiliser qu'un seul Control LoRA à la fois",
"collectionNumberLTMin": "{{value}} < {{minimum}} (incl. min)",
@@ -468,9 +446,7 @@
"informationalPopoversDisabled": "Pop-ups d'information désactivés",
"informationalPopoversDisabledDesc": "Les pop-ups d'information ont été désactivés. Activez-les dans les paramètres.",
"confirmOnNewSession": "Confirmer lors d'une nouvelle session",
"modelDescriptionsDisabledDesc": "Les descriptions des modèles dans les menus déroulants ont été désactivées. Activez-les dans les paramètres.",
"enableModelDescriptions": "Activer les descriptions de modèle dans les menus déroulants",
"modelDescriptionsDisabled": "Descriptions de modèle dans les menus déroulants désactivés",
"showDetailedInvocationProgress": "Afficher les détails de progression"
},
"toast": {
@@ -486,17 +462,14 @@
"addedToBoard": "Ajouté aux ressources de la planche {{name}}",
"workflowLoaded": "Workflow chargé",
"connected": "Connecté au serveur",
"setNodeField": "Définir comme champ de nœud",
"imageUploadFailed": "Échec de l'importation de l'image",
"loadedWithWarnings": "Workflow chargé avec des avertissements",
"imageUploaded": "Image importée",
"modelAddedSimple": "Modèle ajouté à la file d'attente",
"setControlImage": "Définir comme image de contrôle",
"workflowDeleted": "Workflow supprimé",
"baseModelChangedCleared_one": "Effacé ou désactivé {{count}} sous-modèle incompatible",
"baseModelChangedCleared_many": "Effacé ou désactivé {{count}} sous-modèles incompatibles",
"baseModelChangedCleared_other": "Effacé ou désactivé {{count}} sous-modèles incompatibles",
"invalidUpload": "Importation invalide",
"problemDownloadingImage": "Impossible de télécharger l'image",
"problemRetrievingWorkflow": "Problème de récupération du Workflow",
"problemDeletingWorkflow": "Problème de suppression du Workflow",
@@ -510,12 +483,10 @@
"errorCopied": "Erreur copiée",
"parametersSet": "Paramètres rappelés",
"somethingWentWrong": "Quelque chose a échoué",
"imageSaved": "Image enregistrée",
"unableToLoadStylePreset": "Impossible de charger le préréglage de style",
"stylePresetLoaded": "Préréglage de style chargé",
"parameterNotSetDescWithMessage": "Impossible de rappeler {{parameter}} : {{message}}",
"importFailed": "Importation échouée",
"imageSavingFailed": "Échec de l'enregistrement de l'image",
"importSuccessful": "Importation réussie",
"outOfMemoryError": "Erreur de mémoire insuffisante",
"sessionRef": "Session : {{sessionId}}",
@@ -523,16 +494,11 @@
"parameterSetDesc": "Rappelé {{parameter}}",
"parameterNotSetDesc": "Impossible de rappeler {{parameter}}",
"layerCopiedToClipboard": "Calque copié dans le presse-papiers",
"layerSavedToAssets": "Calque enregistré dans les ressources",
"problemCopyingLayer": "Impossible de copier la couche",
"baseModelChanged": "Modèle de base changé",
"problemSavingLayer": "Impossible d'enregistrer la couche",
"imageNotLoadedDesc": "Image introuvable",
"linkCopied": "Lien copié",
"imagesWillBeAddedTo": "Les images Importées seront ajoutées au ressources de la Planche {{boardName}}.",
"uploadFailedInvalidUploadDesc_withCount_one": "Doit être au maximum une image PNG ou JPEG.",
"uploadFailedInvalidUploadDesc_withCount_many": "Doit être au maximum {{count}} images PNG ou JPEG.",
"uploadFailedInvalidUploadDesc_withCount_other": "Doit être au maximum {{count}} images PNG ou JPEG.",
"addedToUncategorized": "Ajouté aux ressources de la planche $t(boards.uncategorized)",
"pasteSuccess": "Collé à {{destination}}",
"pasteFailed": "Échec du collage",
@@ -580,8 +546,6 @@
"movingImagesToBoard_one": "Déplacer {{count}} image à cette planche :",
"movingImagesToBoard_many": "Déplacer {{count}} images à cette planche :",
"movingImagesToBoard_other": "Déplacer {{count}} image à cette planche :",
"viewBoards": "Voir les Planches",
"hideBoards": "Cacher les Planches",
"noBoards": "Pas de Planches {{boardType}}",
"shared": "Planches Partagées",
"searchBoard": "Chercher les Planches...",
@@ -681,7 +645,6 @@
"batchQueued": "Lot ajouté à la file d'attente",
"gallery": "Galerie",
"notReady": "Impossible d'ajouter à la file d'attente",
"batchFieldValues": "Valeurs Champ Lot",
"front": "début",
"graphQueued": "Graph ajouté à la file d'attente",
"other": "Autre",
@@ -712,13 +675,11 @@
"compatibleEmbeddings": "Embeddings Compatibles"
},
"hrf": {
"upscaleMethod": "Méthode d'Agrandissement",
"metadata": {
"enabled": "Correction Haute Résolution Activée",
"strength": "Force de la Correction Haute Résolution",
"method": "Méthode de la Correction Haute Résolution"
},
"enableHrf": "Activer la Correction Haute Résolution",
"hrf": "Correction Haute Résolution"
},
"invocationCache": {
@@ -901,10 +862,6 @@
"desc": "Définit le zoom de la toile à 400 %.",
"title": "Zoomer à 400 %"
},
"setFillToWhite": {
"title": "Définir la couleur sur blanc",
"desc": "Définir la couleur de l'outil actuel sur blanc."
},
"transformSelected": {
"title": "Transformer",
"desc": "Transforme la couche sélectionnée."
@@ -1490,8 +1447,7 @@
"showDynamicPrompts": "Afficher les Prompts dynamiques",
"dynamicPrompts": "Prompts Dynamiques",
"promptsPreview": "Prévisualisation des Prompts",
"loading": "Génération des Pompts Dynamiques...",
"promptsToGenerate": "Prompts à générer"
"loading": "Génération des Pompts Dynamiques..."
},
"metadata": {
"positivePrompt": "Prompt Positif",
@@ -1519,18 +1475,12 @@
"recallParameters": "Rappeler les paramètres",
"imageDimensions": "Dimensions de l'image",
"parameterSet": "Paramètre {{parameter}} défini",
"parsingFailed": "L'analyse a échoué",
"recallParameter": "Rappeler {{label}}",
"canvasV2Metadata": "Toile",
"guidance": "Guide",
"seamlessXAxis": "Axe X sans bords",
"seamlessYAxis": "Axe Y sans bords"
},
"sdxl": {
"freePromptStyle": "Écriture de Prompt manuelle",
"concatPromptStyle": "Lier Prompt & Style",
"negStylePrompt": "Style Prompt Négatif",
"posStylePrompt": "Style Prompt Positif",
"refinerStart": "Démarrer le Refiner",
"denoisingStrength": "Force de débruitage",
"steps": "Étapes",
@@ -1547,8 +1497,6 @@
"nodes": {
"showMinimapnodes": "Afficher la MiniCarte",
"fitViewportNodes": "Ajuster la Vue",
"hideLegendNodes": "Masquer la légende du type de champ",
"showLegendNodes": "Afficher la légende du type de champ",
"hideMinimapnodes": "Masquer MiniCarte",
"zoomOutNodes": "Dézoomer",
"zoomInNodes": "Zoomer",
@@ -1572,9 +1520,7 @@
"colorCodeEdges": "Code de couleur des connexions",
"colorCodeEdgesHelp": "Code couleur des connexions en fonction de leurs champs connectés",
"currentImage": "Image actuelle",
"noFieldsLinearview": "Aucun champ ajouté à la vue linéaire",
"float": "Flottant",
"mismatchedVersion": "Nœud invalide : le nœud {{node}} de type {{type}} a une version incompatible (essayez de mettre à jour?)",
"missingTemplate": "Nœud invalide : le nœud {{node}} de type {{type}} modèle manquant (non installé?)",
"noWorkflow": "Pas de Workflow",
"validateConnectionsHelp": "Prévenir la création de connexions invalides et l'invocation de graphes invalides",
@@ -1585,12 +1531,10 @@
"scheduler": "Planificateur",
"notes": "Notes",
"notesDescription": "Ajouter des notes sur votre workflow",
"unableToLoadWorkflow": "Impossible de charger le Workflow",
"addNode": "Ajouter un nœud",
"problemSettingTitle": "Problème lors de définition du Titre",
"connectionWouldCreateCycle": "La connexion créerait un cycle",
"currentImageDescription": "Affiche l'image actuelle dans l'éditeur de nœuds",
"versionUnknown": " Version inconnue",
"cannotConnectInputToInput": "Impossible de connecter l'entrée à l'entrée",
"addNodeToolTip": "Ajouter un nœud (Shift+A, Espace)",
"fullyContainNodesHelp": "Les nœuds doivent être entièrement à l'intérieur de la zone de sélection pour être sélectionnés",
@@ -1606,7 +1550,6 @@
"nodeSearch": "Rechercher des nœuds",
"collection": "Collection",
"noOutputRecorded": "Aucun résultat enregistré",
"removeLinearView": "Retirer de la vue linéaire",
"snapToGrid": "Aligner sur la grille",
"workflow": "Workflow",
"updateApp": "Mettre à jour l'application",
@@ -1615,7 +1558,6 @@
"noConnectionInProgress": "Aucune connexion en cours",
"nodeType": "Type de nœud",
"workflowContact": "Contact",
"unknownTemplate": "Modèle inconnu",
"unknownNode": "Nœud inconnu",
"workflowVersion": "Version",
"string": "Chaîne de caractères",
@@ -1629,7 +1571,6 @@
"cannotDuplicateConnection": "Impossible de créer des connexions en double",
"resetToDefaultValue": "Réinitialiser à la valeur par défaut",
"unknownNodeType": "Type de nœud inconnu",
"unknownInput": "Entrée inconnue : {{name}}",
"prototypeDesc": "Cette invocation est un prototype. Elle peut subir des modifications majeures lors des mises à jour de l'application et peut être supprimée à tout moment.",
"nodePack": "Paquet de nœuds",
"sourceNodeDoesNotExist": "Connexion invalide : le nœud source/de sortie {{node}} n'existe pas",
@@ -1644,7 +1585,6 @@
"clearWorkflow": "Effacer le Workflow",
"clearWorkflowDesc": "Effacer ce workflow et en commencer un nouveau?",
"unsupportedArrayItemType": "type d'élément de tableau non pris en charge \"{{type}}\"",
"addLinearView": "Ajouter à la vue linéaire",
"collectionOrScalarFieldType": "{{name}} (Unique ou Collection)",
"unableToExtractEnumOptions": "impossible d'extraire les options d'énumération",
"unsupportedAnyOfLength": "trop de membres dans l'union ({{count}})",
@@ -1652,7 +1592,6 @@
"viewMode": "Utiliser en vue linéaire",
"collectionFieldType": "{{name}} (Collection)",
"newWorkflow": "Nouveau Workflow",
"reorderLinearView": "Réorganiser la vue linéaire",
"outputFieldTypeParseError": "Impossible d'analyser le type du champ de sortie {{node}}.{{field}} ({{message}})",
"unsupportedMismatchedUnion": "type CollectionOrScalar non concordant avec les types de base {{firstType}} et {{secondType}}",
"unableToParseFieldType": "impossible d'analyser le type de champ",
@@ -1686,13 +1625,9 @@
"arithmeticSequence": "Séquence Arithmétique",
"uniformRandomDistribution": "Distribution Aléatoire Uniforme",
"noBatchGroup": "aucun groupe",
"generatorLoading": "chargement",
"generatorLoadFromFile": "Charger depuis un Fichier",
"dynamicPromptsRandom": "Prompts Dynamiques (Aléatoire)",
"integerRangeGenerator": "Générateur d'interval d'entiers",
"generateValues": "Générer Valeurs",
"linearDistribution": "Distribution Linéaire",
"floatRangeGenerator": "Générateur d'interval de nombres décimaux",
"generatorNRandomValues_one": "{{count}} valeur aléatoire",
"generatorNRandomValues_many": "{{count}} valeurs aléatoires",
"generatorNRandomValues_other": "{{count}} valeurs aléatoires",
@@ -1712,7 +1647,6 @@
"generatorImagesCategory": "Catégorie",
"generatorImagesFromBoard": "Images de la Planche",
"missingSourceOrTargetHandle": "Manque de gestionnaire source ou cible",
"loadingTemplates": "Chargement de {{name}}",
"loadWorkflowDesc2": "Votre workflow actuel contient des modifications non enregistrées.",
"generatorImages_one": "{{count}} image",
"generatorImages_many": "{{count}} images",
@@ -1723,10 +1657,8 @@
"noModelsAvailable": "Aucun modèle disponible",
"loading": "chargement",
"selectModel": "Sélectionner un modèle",
"noMatchingLoRAs": "Aucun LoRA correspondant",
"lora": "LoRA",
"noRefinerModelsInstalled": "Aucun modèle SDXL Refiner installé",
"noLoRAsInstalled": "Aucun LoRA installé",
"addLora": "Ajouter LoRA",
"defaultVAE": "VAE par défaut",
"concepts": "Concepts"
@@ -1734,11 +1666,8 @@
"workflows": {
"workflowLibrary": "Bibliothèque",
"loading": "Chargement des Workflows",
"searchWorkflows": "Chercher des Workflows",
"workflowCleared": "Workflow effacé",
"noDescription": "Aucune description",
"deleteWorkflow": "Supprimer le Workflow",
"openWorkflow": "Ouvrir le Workflow",
"uploadWorkflow": "Charger à partir d'un fichier",
"workflowName": "Nom du Workflow",
"unnamedWorkflow": "Workflow sans nom",
@@ -1751,8 +1680,6 @@
"problemSavingWorkflow": "Problème de sauvegarde du Workflow",
"workflowEditorMenu": "Menu de l'Éditeur de Workflow",
"newWorkflowCreated": "Nouveau Workflow créé",
"clearWorkflowSearchFilter": "Réinitialiser le filtre de recherche de Workflow",
"problemLoading": "Problème de chargement des Workflows",
"workflowSaved": "Workflow enregistré",
"noWorkflows": "Pas de Workflows",
"ascending": "Ascendant",
@@ -1765,9 +1692,6 @@
"opened": "Ouvert",
"name": "Nom",
"autoLayout": "Mise en page automatique",
"defaultWorkflows": "Workflows par défaut",
"userWorkflows": "Workflows de l'utilisateur",
"projectWorkflows": "Workflows du projet",
"copyShareLink": "Copier le lien de partage",
"chooseWorkflowFromLibrary": "Choisir le Workflow dans la Bibliothèque",
"edit": "Modifer",
@@ -1784,7 +1708,6 @@
"multiLine": "Multi Ligne",
"headingPlaceholder": "En-tête vide",
"emptyRootPlaceholderEditMode": "Faites glisser un élément de formulaire ou un champ de nœud ici pour commencer.",
"emptyRootPlaceholderViewMode": "Cliquez sur Modifier pour commencer à créer un formulaire pour ce workflow.",
"containerPlaceholder": "Conteneur Vide",
"row": "Ligne",
"column": "Colonne",
@@ -1798,10 +1721,8 @@
"builder": "Constructeur de Formulaire",
"resetAllNodeFields": "Réinitialiser tous les champs de nœud",
"deleteAllElements": "Supprimer tous les éléments de formulaire",
"workflowBuilderAlphaWarning": "Le constructeur de workflow est actuellement en version alpha. Il peut y avoir des changements majeurs avant la version stable.",
"showDescription": "Afficher la description"
},
"openLibrary": "Ouvrir la Bibliothèque"
}
},
"whatsNew": {
"whatsNewInInvoke": "Quoi de neuf dans Invoke",
@@ -1810,8 +1731,7 @@
"<StrongComponent>FLUX Guidage Régional (bêta)</StrongComponent> : Notre version bêta de FLUX Guidage Régional est en ligne pour le contrôle des prompt régionaux.",
"Autres améliorations : mise en file d'attente par lots plus rapide, meilleur redimensionnement, sélecteur de couleurs amélioré et nœuds de métadonnées."
],
"readReleaseNotes": "Notes de version",
"watchUiUpdatesOverview": "Aperçu des mises à jour de l'interface utilisateur"
"readReleaseNotes": "Notes de version"
},
"ui": {
"tabs": {
@@ -1828,7 +1748,6 @@
},
"controlLayers": {
"newLayerFromImage": "Nouvelle couche à partir de l'image",
"sendToGalleryDesc": "Appuyer sur Invoker génère et enregistre une image unique dans votre galerie.",
"sendToCanvas": "Envoyer vers la Toile",
"globalReferenceImage": "Image de référence globale",
"newCanvasFromImage": "Nouvelle Toile à partir de l'image",
@@ -1984,7 +1903,6 @@
},
"bookmark": "Marque-page pour Changement Rapide",
"saveLayerToAssets": "Enregistrer la couche dans les ressources",
"stagingOnCanvas": "Mise en attente des images sur",
"enableTransparencyEffect": "Activer l'effet de transparence",
"hidingType": "Masquer {{type}}",
"settings": {
@@ -2015,11 +1933,6 @@
"disableAutoNegative": "Désactiver l'Auto Négatif",
"addNegativePrompt": "Ajouter $t(controlLayers.negativePrompt)",
"addRegionalGuidance": "Ajouter $t(controlLayers.regionalGuidance)",
"controlLayers_withCount_hidden": "Control Layers ({{count}} cachées)",
"rasterLayers_withCount_hidden": "Couche de Rastérisation ({{count}} cachées)",
"regionalGuidance_withCount_hidden": "Guidage Régional ({{count}} caché)",
"rasterLayers_withCount_visible": "Couche de Rastérisation ({{count}})",
"inpaintMasks_withCount_visible": "Masques de remplissage ({{count}})",
"layer_one": "Couche",
"layer_many": "Couches",
"layer_other": "Couches",
@@ -2069,8 +1982,6 @@
"next": "Suivant",
"saveToGallery": "Enregistrer dans la galerie"
},
"viewProgressOnCanvas": "Voir les progrès et les sorties de la scène sur la <Btn>Toile</Btn>.",
"sendToCanvasDesc": "Appuyer sur Invoker met en attente votre travail en cours sur la toile.",
"mergeVisibleError": "Erreur lors de la fusion des calques visibles",
"mergeVisibleOk": "Couches visibles fusionnées",
"clearHistory": "Effacer l'historique",
@@ -2079,8 +1990,6 @@
"duplicate": "Dupliquer",
"enableAutoNegative": "Activer l'Auto Négatif",
"showHUD": "Afficher HUD",
"sendToGallery": "Envoyer à la galerie",
"sendingToGallery": "Envoi des générations à la galerie",
"disableTransparencyEffect": "Désactiver l'effet de transparence",
"HUD": {
"entityStatus": {
@@ -2097,16 +2006,11 @@
"opacity": "Opacité",
"savedToGalleryError": "Erreur lors de l'enregistrement dans la galerie",
"addInpaintMask": "Ajouter $t(controlLayers.inpaintMask)",
"newCanvasSessionDesc": "Cela effacera la toile et tous les paramètres, sauf votre sélection de modèle. Les générations seront mises en attente sur la toile.",
"canvas": "Toile",
"savedToGalleryOk": "Enregistré dans la galerie",
"addPositivePrompt": "Ajouter $t(controlLayers.prompt)",
"showProgressOnCanvas": "Afficher la progression sur la Toile",
"newGallerySession": "Nouvelle session de galerie",
"newCanvasSession": "Nouvelle session de toile",
"showingType": "Afficher {{type}}",
"viewProgressInViewer": "Voir les progrès et les résultats dans le <Btn>Visionneur d'images</Btn>.",
"deletePrompt": "Supprimer le prompt",
"addControlLayer": "Ajouter $t(controlLayers.controlLayer)",
"global": "Global",
"newGlobalReferenceImageOk": "Image de référence globale créée",
@@ -2120,16 +2024,6 @@
"newRasterLayerError": "Problème de création de couche de rastérisation",
"negativePrompt": "Prompt négatif",
"weight": "Poids",
"globalReferenceImages_withCount_hidden": "Images de référence globales ({{count}} cachées)",
"inpaintMasks_withCount_hidden": "Masques de remplissage ({{count}} cachés)",
"regionalGuidance_withCount_visible": "Guidage Régional ({{count}})",
"globalReferenceImage_withCount_one": "$t(controlLayers.globalReferenceImage)",
"globalReferenceImage_withCount_many": "Images de référence globales",
"globalReferenceImage_withCount_other": "Images de référence globales",
"layer_withCount_one": "Couche {{count}}",
"layer_withCount_many": "Couches {{count}}",
"layer_withCount_other": "Couches {{count}}",
"globalReferenceImages_withCount_visible": "Images de référence globales ({{count}})",
"controlMode": {
"controlMode": "Mode de contrôle",
"balanced": "Équilibré",
@@ -2153,18 +2047,14 @@
},
"fitBboxToLayers": "Ajuster la bounding box aux calques",
"regionIsEmpty": "La zone sélectionnée est vide",
"controlLayers_withCount_visible": "Couches de contrôle ({{count}})",
"cropLayerToBbox": "Rogner la couche selon la bounding box",
"sendingToCanvas": "Mise en attente des Générations sur la Toile",
"copyToClipboard": "Copier dans le presse-papiers",
"regionalGuidance_withCount_one": "$t(controlLayers.regionalGuidance)",
"regionalGuidance_withCount_many": "Guidage Régional",
"regionalGuidance_withCount_other": "Guidage Régional",
"newGallerySessionDesc": "Cela effacera la toile et tous les paramètres, sauf votre sélection de modèle. Les générations seront envoyées à la galerie.",
"inpaintMask_withCount_one": "$t(controlLayers.inpaintMask)",
"inpaintMask_withCount_many": "Remplir les masques",
"inpaintMask_withCount_other": "Remplir les masques",
"newImg2ImgCanvasFromImage": "Nouvelle Img2Img à partir de l'image",
"bboxOverlay": "Afficher la superposition des Bounding Box",
"moveToFront": "Déplacer vers le permier plan",
"moveToBack": "Déplacer vers l'arrière plan",
@@ -2179,7 +2069,6 @@
"inpaintMask": "Masque de remplissage",
"deleteReferenceImage": "Supprimer l'image de référence",
"addReferenceImage": "Ajouter $t(controlLayers.referenceImage)",
"addGlobalReferenceImage": "Ajouter $t(controlLayers.globalReferenceImage)",
"removeBookmark": "Supprimer le marque-page",
"regionalGuidance": "Guide régional",
"regionalReferenceImage": "Image de référence régionale",
@@ -2208,16 +2097,12 @@
"pointType": "Type de point",
"exclude": "Exclure",
"process": "Traiter",
"reset": "Réinitialiser",
"help1": "Sélectionnez un seul objet cible. Ajoutez des points <Bold>Inclure</Bold> et <Bold>Exclure</Bold> pour indiquer quelles parties de la couche font partie de l'objet cible.",
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
"reset": "Réinitialiser"
},
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",
"newControlLayer": "Nouveau $t(controlLayers.controlLayer)",
"newRegionalGuidance": "Nouveau $t(controlLayers.regionalGuidance)",
"replaceCurrent": "Remplacer Actuel",
"convertControlLayerTo": "Convertir $t(controlLayers.controlLayer) vers",
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
@@ -2263,9 +2148,7 @@
"pasteToBboxDesc": "Nouvelle couche (dans Bbox)",
"pasteToCanvasDesc": "Nouvelle couche (dans la Toile)",
"useImage": "Utiliser l'image",
"pastedTo": "Collé à {{destination}}",
"referenceImageEmptyState": "<UploadButton>Séléctionner une image</UploadButton> ou faites glisser une image depuis la <GalleryButton>galerie</GalleryButton> sur cette couche pour commencer.",
"referenceImageGlobal": "Image de référence (Globale)"
"referenceImageEmptyState": "<UploadButton>Séléctionner une image</UploadButton> ou faites glisser une image depuis la <GalleryButton>galerie</GalleryButton> sur cette couche pour commencer."
},
"upscaling": {
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",

View File

@@ -50,8 +50,7 @@
"gallery": {
"galleryImageSize": "גודל תמונה",
"gallerySettings": "הגדרות גלריה",
"autoSwitchNewImages": "החלף אוטומטית לתמונות חדשות",
"noImagesInGallery": "אין תמונות בגלריה"
"autoSwitchNewImages": "החלף אוטומטית לתמונות חדשות"
},
"parameters": {
"images": "תמונות",
@@ -70,12 +69,10 @@
"tileSize": "גודל אריח",
"symmetry": "סימטריה",
"copyImage": "העתקת תמונה",
"downloadImage": "הורדת תמונה",
"usePrompt": "שימוש בבקשה",
"useSeed": "שימוש בזרע",
"useAll": "שימוש בהכל",
"info": "פרטים",
"showOptionsPanel": "הצג חלונית אפשרויות",
"shuffle": "ערבוב",
"noiseThreshold": "סף רעש",
"perlinNoise": "רעש פרלין",

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,6 @@
"openInNewTab": "新しいタブで開く",
"controlNet": "コントロールネット",
"linear": "リニア",
"imageFailedToLoad": "画像が読み込めません",
"modelManager": "モデルマネージャー",
"learnMore": "もっと学ぶ",
"random": "ランダム",
@@ -56,7 +55,6 @@
"details": "詳細",
"inpaint": "inpaint",
"delete": "削除",
"nextPage": "次のページ",
"copy": "コピー",
"error": "エラー",
"file": "ファイル",
@@ -64,13 +62,10 @@
"input": "インプット",
"format": "形式",
"installed": "インストール済み",
"localSystem": "ローカルシステム",
"outputs": "アウトプット",
"prevPage": "前のページ",
"unknownError": "未知のエラー",
"orderBy": "並び順:",
"enabled": "有効",
"notInstalled": "未 $t(common.installed)",
"positivePrompt": "ポジティブプロンプト",
"negativePrompt": "ネガティブプロンプト",
"selected": "選択済み",
@@ -96,7 +91,6 @@
"close": "閉じる",
"warnings": "警告",
"dontShowMeThese": "次回から表示しない",
"goTo": "移動",
"generating": "生成中",
"loadingModel": "モデルをロード中",
"layout": "レイアウト",
@@ -107,7 +101,6 @@
"min": "最小",
"max": "最大",
"values": "値",
"resetToDefaults": "デフォルトに戻す",
"row": "行",
"column": "列",
"board": "ボード",
@@ -131,7 +124,6 @@
"gallery": {
"galleryImageSize": "画像のサイズ",
"gallerySettings": "ギャラリーの設定",
"noImagesInGallery": "表示する画像がありません",
"autoSwitchNewImages": "新しい画像に自動切替",
"copy": "コピー",
"image": "画像",
@@ -145,7 +137,6 @@
"deleteImage_other": "画像 {{count}} 枚を削除",
"deleteImagePermanent": "削除された画像は復元できません。",
"download": "ダウンロード",
"unableToLoad": "ギャラリーをロードできません",
"bulkDownloadRequested": "ダウンロード準備中",
"bulkDownloadRequestedDesc": "ダウンロードの準備中です。しばらくお待ちください。",
"bulkDownloadRequestFailed": "ダウンロード準備中に問題が発生",
@@ -160,7 +151,6 @@
"compareImage": "比較画像",
"openInViewer": "ビューアで開く",
"selectForCompare": "比較対象として選択",
"selectAnImageToCompare": "比較する画像を選択",
"slider": "スライダー",
"sideBySide": "横並び",
"hover": "ホバー",
@@ -172,8 +162,6 @@
"compareHelp4": "<Kbd>[Z</Kbd>]または<Kbd>[Esc</Kbd>]を押して終了します。",
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。",
"move": "移動",
"openViewer": "ビューアを開く",
"closeViewer": "ビューアを閉じる",
"exitSearch": "画像検索を終了",
"oldestFirst": "最古から",
"showStarredImagesFirst": "スター付き画像を最初に",
@@ -182,7 +170,6 @@
"searchImages": "メタデータで検索",
"gallery": "ギャラリー",
"newestFirst": "最新から",
"jump": "ジャンプ",
"go": "進む",
"sortDirection": "並び替え順",
"displayBoardSearch": "ボード検索",
@@ -325,10 +312,6 @@
"desc": "リスト内の前のレイヤーを選択します。",
"title": "前のレイヤー"
},
"setFillToWhite": {
"title": "ツール色を白に設定",
"desc": "現在のツールの色を白色に設定します。"
},
"selectViewTool": {
"title": "表示ツール",
"desc": "表示ツールを選択します。"
@@ -609,7 +592,6 @@
"scanResults": "結果をスキャン",
"scanPlaceholder": "ローカルフォルダへのパス",
"typePhraseHere": "ここにフレーズを入力",
"ipAdapters": "IPアダプター",
"modelImageUpdated": "モデル画像アップデート",
"installAll": "全てインストール",
"installRepo": "リポジトリをインストール",
@@ -651,7 +633,6 @@
"spandrelImageToImage": "Image to Image(スパンドレル)",
"starterBundles": "スターターバンドル",
"starterModels": "スターターモデル",
"starterModelsInModelManager": "スターターモデルがモデルマネージャーで見つかりました",
"modelImageDeleteFailed": "モデル画像の削除失敗",
"urlForbidden": "このモデルにアクセスできません",
"urlForbiddenErrorMessage": "このモデルを配布しているサイトからリクエスト権限が必要かもしれません.",
@@ -660,12 +641,10 @@
"inplaceInstall": "定位置にインストール",
"fileSize": "ファイルサイズ",
"modelPickerFallbackNoModelsInstalled2": "<LinkComponent>モデルマネージャー</LinkComponent> にアクセスしてモデルをインストールしてください.",
"filterModels": "フィルターモデル",
"modelPickerFallbackNoModelsInstalled": "モデルがインストールされていません.",
"manageModels": "モデル管理",
"hfTokenReset": "ハギングフェイストークンリセット",
"relatedModels": "関連のあるモデル",
"showOnlyRelatedModels": "関連している",
"installedModelsCount": "{{total}} モデルのうち {{installed}} 個がインストールされています。",
"allNModelsInstalled": "{{count}} 個のモデルがすべてインストールされています",
"nToInstall": "{{count}}個をインストールする",
@@ -682,12 +661,8 @@
"scanFolderDescription": "ローカルフォルダをスキャンしてモデルを自動的に検出し、インストールします。",
"recommendedModels": "推奨モデル",
"exploreStarter": "または、利用可能なすべてのスターターモデルを参照してください",
"quickStart": "クイックスタートバンドル",
"bundleDescription": "各バンドルには各モデルファミリーの必須モデルと、開始するための厳選されたベースモデルが含まれています。",
"browseAll": "または、利用可能なすべてのモデルを参照してください。",
"stableDiffusion15": "Stable Diffusion1.5",
"sdxl": "SDXL",
"fluxDev": "FLUX.1 dev"
"sdxl": "SDXL"
}
},
"parameters": {
@@ -703,12 +678,10 @@
"scaleBeforeProcessing": "処理前のスケール",
"scaledWidth": "幅のスケール",
"scaledHeight": "高さのスケール",
"downloadImage": "画像をダウンロード",
"usePrompt": "プロンプトを使用",
"useSeed": "シード値を使用",
"useAll": "すべてを使用",
"info": "情報",
"showOptionsPanel": "サイドパネルを表示 (O or T)",
"iterations": "生成回数",
"general": "基本設定",
"setToOptimalSize": "サイズをモデルに最適化",
@@ -722,7 +695,6 @@
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (最小値を除く)",
"missingInputForField": "入力の欠落",
"noModelSelected": "モデルが選択されていません",
"emptyBatches": "空のバッチ",
"collectionStringTooLong": "長すぎます,最大{{maxLength}}",
"batchNodeCollectionSizeMismatchNoGroupId": "バッチグループのコレクションサイズが合いません",
"invoke": "呼び出す",
@@ -734,7 +706,6 @@
"missingNodeTemplate": "ノードテンプレートの欠落",
"batchNodeNotConnected": "バッチノードが: {{label}}につながっていない",
"collectionNumberLTMin": "{{value}} < {{minimum}} (最小増加)",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), スケーリングされたbboxの高さは{{height}}です",
"fluxModelMultipleControlLoRAs": "コントロールLoRAは1度に1つしか使用できません",
"noPrompts": "プロンプトが生成されません",
"noNodesInGraph": "グラフにノードがありません",
@@ -742,7 +713,6 @@
"canvasIsFiltering": "キャンバスがビジー状態(フィルタリング)",
"canvasIsCompositing": "キャンバスがビジー状態(合成)",
"systemDisconnected": "システムが切断されました",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), 拡大縮小されたbboxの幅は{{width}}です",
"canvasIsTransforming": "キャンバスがビジー状態(変換)",
"canvasIsRasterizing": "キャンバスがビジー状態(ラスタライズ)",
"modelIncompatibleBboxHeight": "Bboxの高さは{{height}}ですが,{{model}}は{{multiple}}の倍数が必要です",
@@ -750,12 +720,9 @@
"modelIncompatibleBboxWidth": "Bboxの幅は{{width}}ですが, {{model}}は{{multiple}}の倍数が必要です",
"modelIncompatibleScaledBboxWidth": "bboxの幅は{{width}}ですが,{{model}}は{{multiple}}の倍数が必要です",
"canvasIsSelectingObject": "キャンバスがビジー状態(オブジェクトの選択)",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの幅は{{width}}です",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
"promptExpansionPending": "プロンプト拡張が進行中",
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
},
@@ -830,8 +797,6 @@
"enableHighlightFocusedRegions": "重点領域を強調表示",
"clearIntermediatesDesc1": "中間物をクリアすると、キャンバスとコントロールネットの状態がリセットされます.",
"showProgressInViewer": "ビューアで進行状況画像を表示する",
"modelDescriptionsDisabled": "ドロップダウンのモデル説明が無効になっています",
"modelDescriptionsDisabledDesc": "ドロップダウンのモデル説明が無効になっています.設定で有効にしてください.",
"clearIntermediatesDisabled": "中間物をクリアするにはキューが空でなければなりません",
"clearIntermediatesDesc2": "中間画像は生成時に生成される副産物であり、ギャラリーに表示される結果画像とは異なります.中間画像を削除するとディスク容量が解放されます.",
"intermediatesClearedFailed": "中間物をクリアする問題",
@@ -862,11 +827,9 @@
"imagesWillBeAddedTo": "アップロードされた画像はボード {{boardName}} のアセットに追加されます.",
"layerCopiedToClipboard": "レイヤーがクリップボードにコピーされました",
"pasteFailed": "貼り付け失敗",
"imageSavingFailed": "画像保存に失敗しました",
"importSuccessful": "インポートが成功しました",
"problemDownloadingImage": "画像をダウンロードできません",
"modelAddedSimple": "モデルがキューに追加されました",
"uploadFailedInvalidUploadDesc_withCount_other": "PNG、JPEG、または WEBP 画像は最大 1 つにする必要があります.",
"outOfMemoryErrorDesc": "現在の生成設定はシステム容量を超えています.設定を調整してもう一度お試しください.",
"parametersSet": "パラメーターが呼び出されました",
"modelImportCanceled": "モデルのインポートがキャンセルされました",
@@ -881,14 +844,11 @@
"linkCopied": "リンクがコピーされました",
"unableToLoadImage": "画像をロードできません",
"unableToLoadImageMetadata": "画像のメタデータをロードできません",
"imageSaved": "画像が保存されました",
"importFailed": "インポートに失敗しました",
"invalidUpload": "無効なアップロードです",
"outOfMemoryError": "メモリ不足エラー",
"parameterSetDesc": "{{parameter}}を呼び出し",
"errorCopied": "エラーがコピーされました",
"sentToCanvas": "キャンバスに送信",
"setControlImage": "コントロール画像としてセット",
"workflowLoaded": "ワークフローがロードされました",
"unableToCopy": "コピーできません",
"unableToCopyDesc": "あなたのブラウザはクリップボードアクセスをサポートしていません.Firefoxユーザーの場合は、以下の手順で修正できる可能性があります. ",
@@ -902,32 +862,23 @@
"parameterNotSetDescWithMessage": "{{parameter}}: {{message}}を呼び出せません",
"problemCopyingLayer": "レイヤーをコピーできません",
"problemSavingLayer": "レイヤー保存ができません",
"setNodeField": "ノードフィールドとしてセット",
"layerSavedToAssets": "レイヤーがアセットに保存されました",
"outOfMemoryErrorDescLocal": "OOM を削減するには、<LinkComponent>低 VRAM ガイド</LinkComponent> に従ってください.",
"parameterNotSet": "パラメーターが呼び出されていません",
"addedToBoard": "{{name}} 個の資産をボードに追加しました",
"addedToUncategorized": "$t(boards.uncategorized)個のアセットがボードに追加されました",
"problemDeletingWorkflow": "ワークフローが削除された問題",
"imageNotLoadedDesc": "画像を見つけられません",
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください.",
"noRasterLayers": "ラスターレイヤーが見つかりません",
"noRasterLayersDesc": "PSDにエクスポートするには、少なくとも1つのラスターレイヤーを作成します",
"noActiveRasterLayers": "アクティブなラスターレイヤーがありません",
"noActiveRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
"noVisibleRasterLayers": "表示されるラスター レイヤーがありません",
"noVisibleRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
"invalidCanvasDimensions": "キャンバスのサイズが無効です",
"canvasTooLarge": "キャンバスが大きすぎます",
"canvasTooLargeDesc": "キャンバスのサイズがPSDエクスポートの最大許容サイズを超えています。キャンバス全体の幅と高さを小さくしてから、もう一度お試しください。",
"failedToProcessLayers": "レイヤーの処理に失敗しました",
"psdExportSuccess": "PSDエクスポート完了",
"psdExportSuccessDesc": "{{count}} 個のレイヤーを PSD ファイルに正常にエクスポートしました",
"problemExportingPSD": "PSD のエクスポート中に問題が発生しました",
"canvasManagerNotAvailable": "キャンバスマネージャーは利用できません",
"noValidLayerAdapters": "有効なレイヤーアダプタが見つかりません",
"fluxKontextIncompatibleGenerationMode": "Flux Kontext はテキストから画像への変換のみをサポートしています。画像から画像への変換、インペインティング、アウトペインティングのタスクには他のモデルを使用してください。",
"promptGenerationStarted": "プロンプト生成が開始されました",
"uploadAndPromptGenerationFailed": "画像のアップロードとプロンプトの生成に失敗しました",
@@ -959,7 +910,6 @@
"positivePrompt": "ポジティブプロンプト",
"strength": "Image to Image 強度",
"recallParameters": "パラメータを再使用",
"recallParameter": "{{label}} を再使用",
"imageDimensions": "画像サイズ",
"imageDetails": "画像の詳細",
"model": "モデル",
@@ -974,7 +924,6 @@
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
"canvasV2Metadata": "キャンバス",
"guidance": "手引き",
"parsingFailed": "解析に失敗しました",
"seamlessXAxis": "シームレスX軸",
"seamlessYAxis": "シームレスY軸",
"parameterSet": "パラメーター {{parameter}} が設定されました",
@@ -1032,7 +981,6 @@
"clearQueueAlertDialog2": "キューをクリアしてもよろしいですか?",
"item": "項目",
"graphFailedToQueue": "グラフをキューに追加できませんでした",
"batchFieldValues": "バッチの詳細",
"openQueue": "キューを開く",
"time": "時間",
"completedIn": "完了まで",
@@ -1062,14 +1010,12 @@
"models": {
"noMatchingModels": "一致するモデルがありません",
"loading": "読み込み中",
"noMatchingLoRAs": "一致するLoRAがありません",
"noModelsAvailable": "使用可能なモデルがありません",
"selectModel": "モデルを選択してください",
"concepts": "コンセプト",
"addLora": "LoRAを追加",
"lora": "LoRA",
"defaultVAE": "デフォルトVAE",
"noLoRAsInstalled": "インストールされているLoRAはありません",
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません",
"noCompatibleLoRAs": "互換性のあるLoRAはありません"
},
@@ -1079,7 +1025,6 @@
"addNodeToolTip": "ノードを追加 (Shift+A, Space)",
"missingTemplate": "Invalid node: タイプ {{type}} のノード {{node}} にテンプレートがありません(未インストール?)",
"loadWorkflow": "ワークフローを読み込み",
"hideLegendNodes": "フィールドタイプの凡例を非表示",
"float": "浮動小数点",
"integer": "整数",
"nodeTemplate": "ノードテンプレート",
@@ -1123,13 +1068,11 @@
"enum": "Enum",
"arithmeticSequence": "等差数列",
"linearDistribution": "線形分布",
"addLinearView": "ライナービューに追加",
"animatedEdges": "アニメーションエッジ",
"uniformRandomDistribution": "一様ランダム分布",
"noBatchGroup": "グループなし",
"parseString": "文字列の解析",
"generatorImagesFromBoard": "ボードからの画像",
"generatorLoading": "読み込み中",
"missingNode": "呼び出しノードがありません",
"missingSourceOrTargetNode": "ソースまたはターゲットノードがありません",
"missingSourceOrTargetHandle": "ソースまたはターゲットハンドルがありません",
@@ -1150,7 +1093,6 @@
"missingInvocationTemplate": "呼び出しテンプレートがありません",
"nodePack": "ノードパック",
"targetNodeFieldDoesNotExist": "無効なエッジ:ターゲット/インプットフィールド{{node}}.{{field}} が存在しません",
"mismatchedVersion": "無効なノード:ノード {{node}} のタイプ {{type}} はバージョンとミスマッチしています (アップデートを試されますか?)",
"dynamicPromptsCombinatorial": "ダイナミックプロンプト(組み合わせ)",
"cannotMixAndMatchCollectionItemTypes": "コレクション・アイテムの種類を組み合わせることはできません",
"missingFieldTemplate": "フィールドテンプレートがありません",
@@ -1160,7 +1102,6 @@
"collectionOrScalarFieldType": "{{name}} (単数またはコレクション)",
"unableToUpdateNode": "ノードアップロード失敗:ノード {{node}} のタイプ {{type}} (削除か再生成が必要かもしれません)",
"deletedInvalidEdge": "無効なエッジを削除しました{{source}} -> {{target}}",
"noFieldsLinearview": "線形ビューに追加されたフィールドがありません",
"collectionFieldType": "{{name}} (コレクション)",
"colorCodeEdgesHelp": "接続されたフィールドによるカラーコードエッジ",
"showEdgeLabelsHelp": "エッジのラベルを表示,接続されているノードを示す",
@@ -1175,7 +1116,6 @@
"loadWorkflowDesc2": "現在のワークフローは保存されていない変更があります.",
"clearWorkflowDesc": "このワークフローをクリアして新しいワークフローにしますか?",
"updateNode": "ノードをアップデート",
"versionUnknown": " バージョン不明",
"graph": "グラフ",
"workflowContact": "お問い合わせ",
"outputFieldTypeParseError": "出力フィールド {{node}}.{{field}} の型を解析できません({{message}})",
@@ -1194,36 +1134,28 @@
"unableToExtractSchemaNameFromRef": "参照からスキーマ名を抽出できません",
"unableToUpdateNodes_other": "{{count}} 個のノードをアップデートできません",
"workflowSettings": "ワークフローエディター設定",
"generateValues": "値を生成",
"floatRangeGenerator": "浮動小数点レンジ生成器",
"integerRangeGenerator": "整数レンジ生成器",
"specialDesc": "この呼び出しは,アプリ内で特別な処理を行います.例えば,バッチードは1つのワークフローから複数のグラフをキューに入れるために使用されます.",
"modelAccessError": "モデル {{key}}が見つからないので,デフォルトにリセットします",
"betaDesc": "この呼び出しはベータ版です.安定するまでは,アプリのアップデートの際に変更される可能性があります.この呼び出しは長期的にサポートする予定です.",
"internalDesc": "この呼び出しはInvokeによって内部的に使用されます.アプリの更新時に変更される可能性があり,いつでも削除される可能性があります.",
"noFieldsViewMode": "このワークフローには表示する選択フィールドがありません.値を設定するためにはワークフロー全体を表示します.",
"clearWorkflow": "ワークフローをクリア",
"removeLinearView": "線形ビューから削除",
"snapToGrid": "グリッドにスナップ",
"showMinimapnodes": "ミニマップを表示",
"reorderLinearView": "線形ビューの並び替え",
"description": "説明",
"notesDescription": "ワークフローに関するメモを追加する",
"newWorkflowDesc2": "現在のワークフローに保存されていない変更があります.",
"unknownField": "不明なフィールド",
"unexpectedField_withName": "予期しないフィールド\"{{name}}\"",
"loadingTemplates": "読み込み中 {{name}}",
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします",
"validateConnections": "接続とグラフを確認する",
"saveToGallery": "ギャラリーに保存",
"newWorkflowDesc": "新しいワークフローを作りますか?",
"unknownFieldType": "$t(nodes.unknownField)型: {{type}}",
"unsupportedArrayItemType": "サポートされていない配列項目型です \"{{type}}\"",
"unableToLoadWorkflow": "ワークフローが読み込めません",
"unableToValidateWorkflow": "ワークフローを確認できません",
"unknownErrorValidatingWorkflow": "ワークフローの確認で不明なエラーが発生",
"clearWorkflowDesc2": "現在のワークフローは保存されていない変更があります.",
"showLegendNodes": "フィールドタイプの凡例を表示",
"unsupportedMismatchedUnion": "CollectionOrScalar型とベース型{{firstType}}および{{secondType}}が不一致です",
"updateApp": "アプリケーションをアップデート",
"noGraph": "グラフなし",
@@ -1241,10 +1173,8 @@
"workflowDescription": "短い説明",
"workflowValidation": "ワークフロー検証エラー",
"noOutputRecorded": "記録されたアウトプットがありません",
"unknownTemplate": "不明なテンプレート",
"nodeOpacity": "ノードの不透明度",
"unableToParseFieldType": "フィールドタイプを解析できません",
"unknownInput": "不明な入力: {{name}}"
"unableToParseFieldType": "フィールドタイプを解析できません"
},
"boards": {
"autoAddBoard": "自動追加するボード",
@@ -1268,7 +1198,6 @@
"deleteBoardOnly": "ボードのみ削除",
"deletedBoardsCannotbeRestored": "削除したボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は未分類の状態になります。",
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
"hideBoards": "ボードを隠す",
"assetsWithCount_other": "{{count}} のアセット",
"addPrivateBoard": "プライベートボードを追加",
"addSharedBoard": "共有ボードを追加",
@@ -1283,10 +1212,8 @@
"selectedForAutoAdd": "自動追加に選択済み",
"deletedPrivateBoardsCannotbeRestored": "削除されたボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は作成者に対して非公開の未分類状態になります。",
"noBoards": "{{boardType}} ボードがありません",
"viewBoards": "ボードを表示",
"uncategorizedImages": "分類されていない画像",
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除",
"deletedImagesCannotBeRestored": "削除した画像は復元できません."
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除"
},
"invocationCache": {
"invocationCache": "呼び出しキャッシュ",
@@ -1758,9 +1685,7 @@
"strength": "高解像修復の強度",
"enabled": "高解像修復が有効"
},
"enableHrf": "高解像修復を有効",
"hrf": "高解像修復",
"upscaleMethod": "アップスケール手法"
"hrf": "高解像修復"
},
"prompt": {
"addPromptTrigger": "プロンプトトリガーを追加",
@@ -1770,10 +1695,7 @@
"expandCurrentPrompt": "現在のプロンプトを展開",
"uploadImageForPromptGeneration": "プロンプト生成用の画像をアップロードする",
"expandingPrompt": "プロンプトを展開しています...",
"resultTitle": "プロンプト拡張完了",
"resultSubtitle": "拡張プロンプトの処理方法を選択します:",
"replace": "交換する",
"insert": "挿入する",
"discard": "破棄する"
},
"ui": {
@@ -1839,11 +1761,9 @@
}
},
"controlLayers": {
"globalReferenceImage_withCount_other": "全域参照画像",
"regionalReferenceImage": "領域参照画像",
"saveLayerToAssets": "レイヤーをアセットに保存",
"global": "全域",
"inpaintMasks_withCount_hidden": "インペイントマスク ({{count}} hidden)",
"opacity": "透明度",
"canvasContextMenu": {
"newRegionalGuidance": "新規領域ガイダンス",
@@ -1895,7 +1815,6 @@
"duplicate": "複製",
"addLayer": "レイヤーを追加",
"rasterLayer": "ラスターレイヤー",
"inpaintMasks_withCount_visible": "({{count}}) インペイントマスク",
"regional": "領域",
"rectangle": "矩形",
"moveBackward": "背面へ移動",
@@ -2097,7 +2016,6 @@
"autoNegative": "オートネガティブ",
"enableAutoNegative": "オートネガティブを有効にする",
"disableAutoNegative": "オートネガティブを無効にする",
"deletePrompt": "プロンプトを削除",
"deleteReferenceImage": "参照画像を削除",
"showHUD": "HUDを表示",
"maskFill": "マスク塗りつぶし",
@@ -2109,41 +2027,22 @@
"addControlLayer": "$t(controlLayers.controlLayer)を追加します",
"addInpaintMask": "$t(controlLayers.inpaintMask)を追加します",
"addRegionalGuidance": "$t(controlLayers.regionalGuidance)を追加します",
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage)を追加します",
"addDenoiseLimit": "$t(controlLayers.denoiseLimit)を追加します",
"controlLayer": "コントロールレイヤー",
"inpaintMask": "インペイントマスク",
"referenceImageRegional": "参考画像(地域別)",
"referenceImageGlobal": "参考画像(グローバル)",
"asRasterLayer": "$t(controlLayers.rasterLayer) として",
"asRasterLayerResize": "$t(controlLayers.rasterLayer) として (リサイズ)",
"asControlLayer": "$t(controlLayers.controlLayer) として",
"asControlLayerResize": "$t(controlLayers.controlLayer) として (リサイズ)",
"referenceImage": "参照画像",
"sendingToCanvas": "キャンバスに生成をのせる",
"sendingToGallery": "生成をギャラリーに送る",
"sendToGallery": "ギャラリーに送る",
"sendToGalleryDesc": "Invokeを押すとユニークな画像が生成され、ギャラリーに保存されます。",
"sendToCanvas": "キャンバスに送る",
"newLayerFromImage": "画像から新規レイヤー",
"newCanvasFromImage": "画像から新規キャンバス",
"newImg2ImgCanvasFromImage": "画像からの新規 Img2Img",
"copyToClipboard": "クリップボードにコピー",
"sendToCanvasDesc": "Invokeを押すと、進行中の作品がキャンバス上にステージされます。",
"viewProgressInViewer": "<Btn>画像ビューア</Btn>で進行状況と出力を表示します。",
"viewProgressOnCanvas": "<Btn>キャンバス</Btn> で進行状況とステージ出力を表示します。",
"rasterLayer_withCount_other": "ラスターレイヤー",
"controlLayer_withCount_other": "コントロールレイヤー",
"regionalGuidance_withCount_hidden": "地域ガイダンス({{count}} 件非表示)",
"controlLayers_withCount_hidden": "コントロールレイヤー({{count}} 個非表示)",
"rasterLayers_withCount_hidden": "ラスター レイヤー ({{count}} 個非表示)",
"globalReferenceImages_withCount_hidden": "グローバル参照画像({{count}} 枚非表示)",
"regionalGuidance_withCount_visible": "地域ガイダンス ({{count}})",
"controlLayers_withCount_visible": "コントロールレイヤー ({{count}})",
"rasterLayers_withCount_visible": "ラスターレイヤー({{count}}",
"globalReferenceImages_withCount_visible": "グローバル参照画像 ({{count}})",
"layer_other": "レイヤー",
"layer_withCount_other": "レイヤー ({{count}})",
"convertRasterLayerTo": "$t(controlLayers.rasterLayer) を変換する",
"convertControlLayerTo": "$t(controlLayers.controlLayer) を変換する",
"convertRegionalGuidanceTo": "$t(controlLayers.regionalGuidance) を変換する",
@@ -2161,7 +2060,6 @@
"pasteToBboxDesc": "新しいレイヤーBbox内",
"pasteToCanvas": "キャンバス",
"pasteToCanvasDesc": "新しいレイヤー(キャンバス内)",
"pastedTo": "{{destination}} に貼り付けました",
"transparency": "透明性",
"enableTransparencyEffect": "透明効果を有効にする",
"disableTransparencyEffect": "透明効果を無効にする",
@@ -2174,7 +2072,6 @@
"locked": "ロックされています",
"unlocked": "ロック解除",
"deleteSelected": "選択項目を削除",
"stagingOnCanvas": "ステージング画像",
"replaceLayer": "レイヤーの置き換え",
"pullBboxIntoLayer": "Bboxをレイヤーに引き込む",
"pullBboxIntoReferenceImage": "Bboxを参照画像に取り込む",
@@ -2182,17 +2079,11 @@
"useImage": "画像を使う",
"negativePrompt": "ネガティブプロンプト",
"beginEndStepPercentShort": "開始/終了 %",
"newGallerySession": "新しいギャラリーセッション",
"newGallerySessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成した画像はギャラリーに送信されます。",
"newCanvasSession": "新規キャンバスセッション",
"newCanvasSessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成はキャンバス上でステージングされます。",
"resetCanvasLayers": "キャンバスレイヤーをリセット",
"resetGenerationSettings": "生成設定をリセット",
"replaceCurrent": "現在のものを置き換える",
"controlLayerEmptyState": "<UploadButton>画像をアップロード</UploadButton>、<GalleryButton>ギャラリー</GalleryButton>からこのレイヤーに画像をドラッグ、<PullBboxButton>境界ボックスをこのレイヤーにプル</PullBboxButton>、またはキャンバスに描画して開始します。",
"referenceImageEmptyStateWithCanvasOptions": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグするか、<PullBboxButton>境界ボックスをこの参照画像にプル</PullBboxButton>します。",
"referenceImageEmptyState": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグします。",
"uploadOrDragAnImage": "ギャラリーから画像をドラッグするか、<UploadButton>画像をアップロード</UploadButton>します。",
"imageNoise": "画像ノイズ",
"denoiseLimit": "ノイズ除去制限",
"warnings": {
@@ -2258,9 +2149,6 @@
"saveAs": "名前を付けて保存",
"cancel": "キャンセル",
"process": "プロセス",
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
"dragToMove": "ポイントをドラッグして移動します",
"clickToRemove": "ポイントをクリックして削除します"
@@ -2361,12 +2249,8 @@
"loading": "ロード中...",
"steps": "ステップ",
"refiner": "Refiner",
"negStylePrompt": "ネガティブスタイルプロンプト",
"noModelsAvailable": "利用できるモデルがありません",
"posStylePrompt": "ポジティブスタイルプロンプト",
"cfgScale": "CFGスケール",
"concatPromptStyle": "リンキングプロンプトとスタイル",
"freePromptStyle": "手動スタイルプロンプト",
"posAestheticScore": "ポジティブ美的スコア",
"refinerSteps": "リファイナーステップ",
"refinerStart": "リファイナースタート",
@@ -2384,8 +2268,6 @@
"name": "名前",
"descending": "降順",
"searchPlaceholder": "名前、説明、タグで検索",
"projectWorkflows": "プロジェクトワークフロー",
"searchWorkflows": "ワークフローを検索",
"updated": "アップデート",
"published": "公表",
"builder": {
@@ -2411,10 +2293,8 @@
"addToForm": "フォームに追加",
"headingPlaceholder": "空の見出し",
"nodeFieldTooltip": "ノード フィールドを追加するには、ワークフロー エディターのフィールドにある小さなプラス記号ボタンをクリックするか、フィールド名をフォームにドラッグします。",
"workflowBuilderAlphaWarning": "ワークフロービルダーは現在アルファ版です。安定版リリースまでに互換性に影響する変更が発生する可能性があります。",
"component": "コンポーネント",
"textPlaceholder": "空のテキスト",
"emptyRootPlaceholderViewMode": "このワークフローのフォームの作成を開始するには、[編集] をクリックします。",
"addOption": "オプションを追加",
"singleLine": "単線",
"numberInput": "数値入力",
@@ -2465,20 +2345,15 @@
"convertGraph": "グラフを変換",
"downloadWorkflow": "ファイルに保存",
"saveWorkflow": "ワークフローを保存",
"userWorkflows": "ユーザーワークフロー",
"yourWorkflows": "あなたのワークフロー",
"edit": "編集",
"workflowLibrary": "ワークフローライブラリ",
"workflowSaved": "ワークフローが保存されました",
"clearWorkflowSearchFilter": "ワークフロー検索フィルタをクリア",
"workflowCleared": "ワークフローが作成されました",
"autoLayout": "オートレイアウト",
"view": "ビュー",
"saveChanges": "変更を保存",
"noDescription": "説明なし",
"recommended": "あなたへのおすすめ",
"noRecentWorkflows": "最近のワークフローがありません",
"problemLoading": "ワークフローのローディングに関する問題",
"newWorkflowCreated": "新しいワークフローが作成されました",
"noWorkflows": "ワークフローがありません",
"copyShareLink": "共有リンクをコピー",
@@ -2486,21 +2361,16 @@
"workflowThumbnail": "ワークフローサムネイル",
"loadWorkflow": "$t(common.load) ワークフロー",
"shared": "共有",
"openWorkflow": "ワークフローを開く",
"emptyStringPlaceholder": "<空の文字列>",
"browseWorkflows": "ワークフローを閲覧する",
"saveWorkflowAs": "ワークフローとして保存",
"private": "プライベート",
"deselectAll": "すべて選択解除",
"delete": "削除",
"openLibrary": "ライブラリを開く",
"loadMore": "もっと読み込む",
"saveWorkflowToProject": "ワークフローをプロジェクトに保存",
"created": "作成されました",
"workflowEditorMenu": "ワークフローエディターメニュー",
"defaultWorkflows": "デフォルトワークフロー",
"allLoaded": "すべてのワークフローが読み込まれました",
"filterByTags": "タグでフィルター",
"recentlyOpened": "最近開いた",
"opened": "オープン",
"deleteWorkflow": "ワークフローを削除",
@@ -2546,7 +2416,6 @@
"perIterationDesc": "それぞれのいてレーションに別のシードを使う"
},
"showDynamicPrompts": "ダイナミックプロンプトを表示する",
"promptsToGenerate": "生成するプロンプト",
"dynamicPrompts": "ダイナミックプロンプト",
"loading": "ダイナミックプロンプトを生成...",
"maxPrompts": "最大プロンプト"
@@ -2572,8 +2441,7 @@
"キャンバス: SDXL のアスペクト比がスマートになり、スクロールによるズームが改善されました。"
],
"readReleaseNotes": "リリースノートを読む",
"watchRecentReleaseVideos": "最近のリリースビデオを見る",
"watchUiUpdatesOverview": "Watch UI アップデートの概要"
"watchRecentReleaseVideos": "最近のリリースビデオを見る"
},
"supportVideos": {
"supportVideos": "サポートビデオ",

View File

@@ -27,7 +27,6 @@
"save": "저장",
"created": "생성됨",
"error": "에러",
"prevPage": "이전 페이지",
"ipAdapter": "IP 어댑터",
"installed": "설치됨",
"accept": "수락",
@@ -42,7 +41,6 @@
"outputs": "결과물",
"unknownError": "알려지지 않은 에러",
"linear": "선형",
"imageFailedToLoad": "이미지를 로드할 수 없음",
"direction": "방향",
"data": "데이터",
"somethingWentWrong": "뭔가 잘못됐어요",
@@ -52,7 +50,6 @@
"orderBy": "정렬 기준",
"copyError": "$t(gallery.copy) 에러",
"learnMore": "더 알아보기",
"nextPage": "다음 페이지",
"saveAs": "다른 이름으로 저장",
"loading": "불러오는 중",
"random": "랜덤",
@@ -60,18 +57,15 @@
"postprocessing": "후처리",
"advanced": "고급",
"input": "입력",
"details": "세부사항",
"notInstalled": "설치되지 않음"
"details": "세부사항"
},
"gallery": {
"galleryImageSize": "이미지 크기",
"gallerySettings": "갤러리 설정",
"deleteSelection": "선택 항목 삭제",
"featuresWillReset": "이 이미지를 삭제하면 해당 기능이 즉시 재설정됩니다.",
"noImagesInGallery": "보여줄 이미지가 없음",
"autoSwitchNewImages": "새로운 이미지로 자동 전환",
"loading": "불러오는 중",
"unableToLoad": "갤러리를 로드할 수 없음",
"image": "이미지",
"drop": "드랍",
"downloadSelection": "선택 항목 다운로드",
@@ -151,8 +145,6 @@
"loadWorkflow": "Workflow 불러오기",
"noOutputRecorded": "기록된 출력 없음",
"colorCodeEdgesHelp": "연결된 필드에 따른 색상 코드 선",
"hideLegendNodes": "필드 유형 범례 숨기기",
"addLinearView": "Linear View에 추가",
"float": "실수",
"targetNodeFieldDoesNotExist": "잘못된 모서리: 대상/입력 필드 {{node}}. {{field}}이(가) 없습니다",
"animatedEdges": "애니메이션 모서리",
@@ -160,7 +152,6 @@
"nodeTemplate": "노드 템플릿",
"nodeOpacity": "노드 불투명도",
"sourceNodeDoesNotExist": "잘못된 모서리: 소스/출력 노드 {{node}}이(가) 없습니다",
"noFieldsLinearview": "Linear View에 추가된 필드 없음",
"nodeSearch": "노드 검색",
"inputMayOnlyHaveOneConnection": "입력에 하나의 연결만 있을 수 있습니다",
"notes": "메모",
@@ -195,7 +186,6 @@
"notesDescription": "Workflow에 대한 메모 추가",
"colorCodeEdges": "색상-코드 선",
"targetNodeDoesNotExist": "잘못된 모서리: 대상/입력 노드 {{node}}이(가) 없습니다",
"mismatchedVersion": "잘못된 노드: {{type}} 유형의 {{node}} 노드에 일치하지 않는 버전이 있습니다(업데이트 해보시겠습니까?)",
"addNodeToolTip": "노드 추가(Shift+A, Space)",
"collectionOrScalarFieldType": "{{name}} 컬렉션|Scalar",
"nodeVersion": "노드 버전",
@@ -242,7 +232,6 @@
"next": "다음",
"cancelBatch": "Batch 취소",
"back": "back",
"batchFieldValues": "Batch 필드 값들",
"cancel": "취소",
"session": "세션",
"time": "시간",
@@ -296,8 +285,6 @@
"cacheSize": "캐시 크기"
},
"hrf": {
"enableHrf": "이용 가능한 고해상도 고정",
"upscaleMethod": "업스케일 방법",
"metadata": {
"strength": "고해상도 고정 강도",
"enabled": "고해상도 고정 사용",
@@ -308,12 +295,10 @@
"models": {
"noMatchingModels": "일치하는 모델 없음",
"loading": "로딩중",
"noMatchingLoRAs": "일치하는 LoRA 없음",
"noModelsAvailable": "사용 가능한 모델이 없음",
"addLora": "LoRA 추가",
"selectModel": "모델 선택",
"noRefinerModelsInstalled": "SDXL Refiner 모델이 설치되지 않음",
"noLoRAsInstalled": "설치된 LoRA 없음"
"noRefinerModelsInstalled": "SDXL Refiner 모델이 설치되지 않음"
},
"boards": {
"autoAddBoard": "자동 추가 Board",

View File

@@ -30,12 +30,10 @@
"ipAdapter": "IP-adapter",
"auto": "Autom.",
"controlNet": "ControlNet",
"imageFailedToLoad": "Kan afbeelding niet laden",
"learnMore": "Meer informatie",
"advanced": "Uitgebreid",
"file": "Bestand",
"installed": "Geïnstalleerd",
"notInstalled": "Niet $t(common.installed)",
"simple": "Eenvoudig",
"somethingWentWrong": "Er ging iets mis",
"add": "Voeg toe",
@@ -43,14 +41,12 @@
"details": "Details",
"outputs": "Uitvoeren",
"save": "Bewaar",
"nextPage": "Volgende pagina",
"blue": "Blauw",
"alpha": "Alfa",
"red": "Rood",
"editor": "Editor",
"folder": "Map",
"format": "structuur",
"goTo": "Ga naar",
"template": "Sjabloon",
"input": "Invoer",
"safetensors": "Safetensors",
@@ -62,7 +58,6 @@
"negativePrompt": "Negatieve prompt",
"selected": "Geselecteerd",
"orderBy": "Sorteer op",
"prevPage": "Vorige pagina",
"beta": "Bèta",
"copyError": "$t(gallery.copy) Fout",
"toResolve": "Op te lossen",
@@ -79,21 +74,18 @@
"delete": "Verwijder",
"direction": "Richting",
"error": "Fout",
"localSystem": "Lokaal systeem",
"unknownError": "Onbekende fout"
},
"gallery": {
"galleryImageSize": "Afbeeldingsgrootte",
"gallerySettings": "Instellingen galerij",
"autoSwitchNewImages": "Wissel autom. naar nieuwe afbeeldingen",
"noImagesInGallery": "Geen afbeeldingen om te tonen",
"deleteImage_one": "Verwijder afbeelding",
"deleteImage_other": "",
"deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.",
"autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken",
"featuresWillReset": "Als je deze afbeelding verwijdert, dan worden deze functies onmiddellijk teruggezet.",
"loading": "Bezig met laden",
"unableToLoad": "Kan galerij niet laden",
"downloadSelection": "Download selectie",
"currentlyInUse": "Deze afbeelding is momenteel in gebruik door de volgende functies:",
"copy": "Kopieer",
@@ -199,12 +191,10 @@
"scaledHeight": "Geschaalde H",
"infillMethod": "Infill-methode",
"tileSize": "Grootte tegel",
"downloadImage": "Download afbeelding",
"usePrompt": "Hergebruik invoertekst",
"useSeed": "Hergebruik seed",
"useAll": "Hergebruik alles",
"info": "Info",
"showOptionsPanel": "Toon deelscherm Opties (O of T)",
"symmetry": "Symmetrie",
"cancel": {
"cancel": "Annuleer"
@@ -293,15 +283,12 @@
"baseModelChangedCleared_one": "Basismodel is gewijzigd: {{count}} niet-compatibel submodel weggehaald of uitgeschakeld",
"baseModelChangedCleared_other": "Basismodel is gewijzigd: {{count}} niet-compatibele submodellen weggehaald of uitgeschakeld",
"loadedWithWarnings": "Werkstroom geladen met waarschuwingen",
"setControlImage": "Ingesteld als controle-afbeelding",
"setNodeField": "Ingesteld als knooppuntveld",
"imageUploaded": "Afbeelding geüpload",
"addedToBoard": "Toegevoegd aan bord",
"workflowLoaded": "Werkstroom geladen",
"modelAddedSimple": "Model toegevoegd aan wachtrij",
"imageUploadFailed": "Fout bij uploaden afbeelding",
"workflowDeleted": "Werkstroom verwijderd",
"invalidUpload": "Ongeldige upload",
"problemRetrievingWorkflow": "Fout bij ophalen van werkstroom",
"parameters": "Parameters",
"modelImportCanceled": "Importeren model geannuleerd",
@@ -325,17 +312,14 @@
"zoomOutNodes": "Uitzoomen",
"fitViewportNodes": "Aanpassen aan beeld",
"hideMinimapnodes": "Minimap verbergen",
"showLegendNodes": "Typelegende veld tonen",
"zoomInNodes": "Inzoomen",
"showMinimapnodes": "Minimap tonen",
"hideLegendNodes": "Typelegende veld verbergen",
"reloadNodeTemplates": "Herlaad knooppuntsjablonen",
"loadWorkflow": "Laad werkstroom",
"downloadWorkflow": "Download JSON van werkstroom",
"scheduler": "Planner",
"missingTemplate": "Ongeldig knooppunt: knooppunt {{node}} van het soort {{type}} heeft een ontbrekend sjabloon (niet geïnstalleerd?)",
"workflowDescription": "Korte beschrijving",
"versionUnknown": " Versie onbekend",
"noNodeSelected": "Geen knooppunt gekozen",
"addNode": "Voeg knooppunt toe",
"unableToValidateWorkflow": "Kan werkstroom niet valideren",
@@ -349,9 +333,7 @@
"integer": "Geheel getal",
"nodeTemplate": "Sjabloon knooppunt",
"nodeOpacity": "Dekking knooppunt",
"unableToLoadWorkflow": "Fout bij laden werkstroom",
"snapToGrid": "Lijn uit op raster",
"noFieldsLinearview": "Geen velden toegevoegd aan lineaire weergave",
"nodeSearch": "Zoek naar knooppunten",
"updateNode": "Werk knooppunt bij",
"version": "Versie",
@@ -370,9 +352,7 @@
"edge": "Rand",
"animatedEdgesHelp": "Animeer gekozen randen en randen verbonden met de gekozen knooppunten",
"cannotDuplicateConnection": "Kan geen dubbele verbindingen maken",
"unknownTemplate": "Onbekend sjabloon",
"noWorkflow": "Geen werkstroom",
"removeLinearView": "Verwijder uit lineaire weergave",
"workflowTags": "Labels",
"fullyContainNodesHelp": "Knooppunten moeten zich volledig binnen het keuzevak bevinden om te worden gekozen",
"workflowValidation": "Validatiefout werkstroom",
@@ -397,14 +377,11 @@
"unknownField": "Onbekend veld",
"colorCodeEdges": "Kleurgecodeerde randen",
"unknownNode": "Onbekend knooppunt",
"mismatchedVersion": "Ongeldig knooppunt: knooppunt {{node}} van het soort {{type}} heeft een niet-overeenkomende versie (probeer het bij te werken?)",
"addNodeToolTip": "Voeg knooppunt toe (Shift+A, spatie)",
"loadingNodes": "Bezig met laden van knooppunten...",
"snapToGridHelp": "Lijn knooppunten uit op raster bij verplaatsing",
"workflowSettings": "Instellingen werkstroomeditor",
"addLinearView": "Voeg toe aan lineaire weergave",
"nodePack": "Knooppuntpakket",
"unknownInput": "Onbekende invoer: {{name}}",
"sourceNodeFieldDoesNotExist": "Ongeldige rand: bron-/uitvoerveld {{node}}.{{field}} bestaat niet",
"collectionFieldType": "Verzameling {{name}}",
"deletedInvalidEdge": "Ongeldige hoek {{source}} -> {{target}} verwijderd",
@@ -419,7 +396,6 @@
"sourceNodeDoesNotExist": "Ongeldige rand: bron-/uitvoerknooppunt {{node}} bestaat niet",
"unsupportedArrayItemType": "niet-ondersteunde soort van het array-onderdeel \"{{type}}\"",
"targetNodeFieldDoesNotExist": "Ongeldige rand: doel-/invoerveld {{node}}.{{field}} bestaat niet",
"reorderLinearView": "Herorden lineaire weergave",
"newWorkflowDesc": "Een nieuwe werkstroom aanmaken?",
"collectionOrScalarFieldType": "Verzameling|scalair {{name}}",
"newWorkflow": "Nieuwe werkstroom",
@@ -734,27 +710,21 @@
"refinerStart": "Startwaarde verfijning",
"scheduler": "Planner",
"cfgScale": "CFG-schaal",
"negStylePrompt": "Negatieve-stijlprompt",
"noModelsAvailable": "Geen modellen beschikbaar",
"refiner": "Verfijning",
"negAestheticScore": "Negatieve esthetische score",
"denoisingStrength": "Sterkte ontruising",
"refinermodel": "Verfijningsmodel",
"posAestheticScore": "Positieve esthetische score",
"concatPromptStyle": "Koppelen van prompt en stijl",
"loading": "Bezig met laden...",
"steps": "Stappen",
"posStylePrompt": "Positieve-stijlprompt",
"freePromptStyle": "Handmatige stijlprompt",
"refinerSteps": "Aantal stappen verfijner"
},
"models": {
"noMatchingModels": "Geen overeenkomend modellen",
"loading": "bezig met laden",
"noMatchingLoRAs": "Geen overeenkomende LoRA's",
"noModelsAvailable": "Geen modellen beschikbaar",
"selectModel": "Kies een model",
"noLoRAsInstalled": "Geen LoRA's geïnstalleerd",
"noRefinerModelsInstalled": "Geen SDXL-verfijningsmodellen geïnstalleerd",
"defaultVAE": "Standaard-VAE",
"lora": "LoRA",
@@ -822,14 +792,12 @@
}
},
"hrf": {
"upscaleMethod": "Opschaalmethode",
"metadata": {
"strength": "Sterkte oplossing voor hoge resolutie",
"method": "Methode oplossing voor hoge resolutie",
"enabled": "Oplossing voor hoge resolutie ingeschakeld"
},
"hrf": "Oplossing voor hoge resolutie",
"enableHrf": "Schakel oplossing in voor hoge resolutie"
"hrf": "Oplossing voor hoge resolutie"
},
"prompt": {
"addPromptTrigger": "Voeg prompttrigger toe",

View File

@@ -41,11 +41,9 @@
"somethingWentWrong": "Coś poszło nie tak",
"green": "Zielony",
"red": "Czerwony",
"imageFailedToLoad": "Nie można załadować obrazu",
"saveAs": "Zapisz jako",
"outputs": "Wyjścia",
"data": "Dane",
"localSystem": "System Lokalny",
"t2iAdapter": "Adapter T2I",
"selected": "Zaznaczone",
"warnings": "Ostrzeżenia",
@@ -64,12 +62,10 @@
"openInViewer": "Otwórz podgląd",
"safetensors": "Bezpieczniki",
"ok": "Ok",
"goTo": "Idź do",
"loadingImage": "wczytywanie zdjęcia",
"input": "Wejście",
"view": "Podgląd",
"learnMore": "Dowiedz się więcej",
"notInstalled": "Nie $t(common.installed)",
"loadingModel": "Wczytywanie modelu",
"postprocessing": "Przetwarzanie końcowe",
"random": "Losowo",
@@ -83,10 +79,8 @@
"delete": "Usuń",
"template": "Szablon",
"txt2img": "Tekst na obraz",
"prevPage": "Poprzednia strona",
"file": "Plik",
"toResolve": "Do rozwiązania",
"nextPage": "Następna strona",
"unknownError": "Nieznany błąd",
"placeholderSelectAModel": "Wybierz model",
"new": "Nowy",
@@ -99,7 +93,6 @@
"galleryImageSize": "Rozmiar obrazów",
"gallerySettings": "Ustawienia galerii",
"autoSwitchNewImages": "Przełączaj na nowe obrazy",
"noImagesInGallery": "Brak obrazów w galerii",
"gallery": "Galeria",
"alwaysShowImageSizeBadge": "Zawsze pokazuj odznakę wielkości obrazu",
"assetsTab": "Pliki, które wrzuciłeś do użytku w twoich projektach.",
@@ -128,12 +121,10 @@
"scaledHeight": "Sk. do wys.",
"infillMethod": "Metoda wypełniania",
"tileSize": "Rozmiar kafelka",
"downloadImage": "Pobierz obraz",
"usePrompt": "Skopiuj sugestie",
"useSeed": "Skopiuj inicjator",
"useAll": "Skopiuj wszystko",
"info": "Informacje",
"showOptionsPanel": "Pokaż panel ustawień"
"info": "Informacje"
},
"settings": {
"models": "Modele",
@@ -186,8 +177,6 @@
"selectedForAutoAdd": "Wybrany do automatycznego dodania",
"deleteBoard": "Usuń tablicę",
"clearSearch": "Usuń historię",
"hideBoards": "Ukryj tablice",
"viewBoards": "Zobacz tablice",
"addSharedBoard": "Dodaj udostępnioną tablicę",
"boards": "Tablice",
"addPrivateBoard": "Dodaj prywatną tablicę",
@@ -233,8 +222,7 @@
"strength": "Moc poprawki wysokiej rozdzielczości",
"method": "Metoda High Resolution Fix"
},
"hrf": "Poprawka \"Wysoka rozdzielczość\"",
"enableHrf": "Włącz poprawkę wysokiej rozdzielczości"
"hrf": "Poprawka \"Wysoka rozdzielczość\""
},
"queue": {
"cancelTooltip": "Anuluj aktualną pozycję",
@@ -296,7 +284,6 @@
"completed": "Zakończono",
"item": "Pozycja",
"failed": "Niepowodzenie",
"batchFieldValues": "Masowe Wartości pól",
"graphFailedToQueue": "NIe udało się dodać tabeli do kolejki",
"workflows": "Przepływy pracy",
"next": "Następny",

View File

@@ -17,8 +17,7 @@
"gallery": {
"galleryImageSize": "Tamanho da Imagem",
"gallerySettings": "Configurações de Galeria",
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
"noImagesInGallery": "Sem Imagens na Galeria"
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente"
},
"modelManager": {
"modelManager": "Gerente de Modelo",
@@ -74,12 +73,10 @@
"scaledHeight": "A Escalada",
"infillMethod": "Método de Preenchimento",
"tileSize": "Tamanho do Ladrilho",
"downloadImage": "Baixar Imagem",
"usePrompt": "Usar Prompt",
"useSeed": "Usar Seed",
"useAll": "Usar Todos",
"info": "Informações",
"showOptionsPanel": "Mostrar Painel de Opções",
"symmetry": "Simetria",
"copyImage": "Copiar imagem",
"denoisingStrength": "A força de remoção de ruído",

View File

@@ -17,7 +17,6 @@
"gallery": {
"gallerySettings": "Configurações de Galeria",
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
"noImagesInGallery": "Sem Imagens na Galeria",
"galleryImageSize": "Tamanho da Imagem"
},
"modelManager": {
@@ -69,7 +68,6 @@
"tileSize": "Tamanho do Ladrilho",
"symmetry": "Simetria",
"usePrompt": "Usar Prompt",
"showOptionsPanel": "Mostrar Painel de Opções",
"strength": "Força",
"upscaling": "Redimensionando",
"scaleBeforeProcessing": "Escala Antes do Processamento",
@@ -81,7 +79,6 @@
"scaledHeight": "A Escalada",
"infillMethod": "Método de Preenchimento",
"copyImage": "Copiar imagem",
"downloadImage": "Descarregar Imagem",
"useSeed": "Usar Seed",
"useAll": "Usar Todos",
"info": "Informações"

View File

@@ -38,7 +38,6 @@
"save": "Сохранить",
"created": "Создано",
"error": "Ошибка",
"prevPage": "Предыдущая страница",
"simple": "Простой",
"ipAdapter": "IP Adapter",
"installed": "Установлено",
@@ -49,7 +48,6 @@
"template": "Шаблон",
"outputs": "результаты",
"unknownError": "Неизвестная ошибка",
"imageFailedToLoad": "Невозможно загрузить изображение",
"direction": "Направление",
"data": "Данные",
"somethingWentWrong": "Что-то пошло не так",
@@ -58,11 +56,9 @@
"orderBy": "Сортировать по",
"copyError": "Ошибка $t(gallery.copy)",
"learnMore": "Узнать больше",
"nextPage": "Следущая страница",
"saveAs": "Сохранить как",
"input": "Вход",
"details": "Детали",
"notInstalled": "Нет $t(common.installed)",
"or": "или",
"aboutHeading": "Владей своей творческой силой",
"red": "Красный",
@@ -71,7 +67,6 @@
"alpha": "Альфа",
"toResolve": "Чтоб решить",
"copy": "Копировать",
"localSystem": "Локальная система",
"aboutDesc": "Используя Invoke для работы? Проверьте это:",
"add": "Добавить",
"beta": "Бета",
@@ -79,7 +74,6 @@
"positivePrompt": "Позитивный запрос",
"negativePrompt": "Негативный запрос",
"editor": "Редактор",
"goTo": "Перейти к",
"tab": "Вкладка",
"enabled": "Включено",
"disabled": "Отключено",
@@ -101,7 +95,6 @@
"galleryImageSize": "Размер изображений",
"gallerySettings": "Настройка галереи",
"autoSwitchNewImages": "Автоматически выбирать новые",
"noImagesInGallery": "Изображений нет",
"deleteImagePermanent": "Удаленные изображения невозможно восстановить.",
"deleteImage_one": "Удалить изображение",
"deleteImage_few": "Удалить {{count}} изображения",
@@ -110,7 +103,6 @@
"deleteSelection": "Удалить выделенное",
"featuresWillReset": "Если вы удалите это изображение, эти функции будут немедленно сброшены.",
"loading": "Загрузка",
"unableToLoad": "Невозможно загрузить галерею",
"image": "изображение",
"drop": "перебросить",
"downloadSelection": "Скачать выделенное",
@@ -136,7 +128,6 @@
"compareHelp4": "Нажмите <Kbd>Z</Kbd> или <Kbd>Esc</Kbd> для выхода.",
"compareImage": "Сравнить изображение",
"viewerImage": "Изображение просмотрщика",
"selectAnImageToCompare": "Выберите изображение для сравнения",
"slider": "Слайдер",
"sideBySide": "Бок о бок",
"compareHelp1": "Удерживайте <Kbd>Alt</Kbd> при нажатии на изображение в галерее или при помощи клавиш со стрелками, чтобы изменить сравниваемое изображение.",
@@ -154,11 +145,8 @@
"exitBoardSearch": "Выйти из поиска досок",
"go": "Перейти",
"exitSearch": "Выйти из поиска изображений",
"jump": "Пыгнуть",
"move": "Двигать",
"gallery": "Галерея",
"openViewer": "Открыть просмотрщик",
"closeViewer": "Закрыть просмотрщик",
"imagesTab": "Изображения, созданные и сохраненные в Invoke.",
"assetsTab": "Файлы, которые вы загрузили для использования в своих проектах.",
"boardsSettings": "Настройки доски",
@@ -285,10 +273,6 @@
"title": "Next Layer",
"desc": "Select the next layer in the list."
},
"setFillToWhite": {
"title": "Set Color to White",
"desc": "Set the current tool color to white."
},
"applyFilter": {
"title": "Apply Filter",
"desc": "Apply the pending filter to the selected layer."
@@ -578,8 +562,6 @@
"noModelsInstalled": "Нет установленных моделей",
"noModelsInstalledDesc1": "Установите модели с помощью",
"noMatchingModels": "Нет подходящих моделей",
"ipAdapters": "IP адаптеры",
"starterModelsInModelManager": "Стартовые модели можно найти в Менеджере моделей",
"learnMoreAboutSupportedModels": "Подробнее о поддерживаемых моделях",
"t5Encoder": "T5 энкодер",
"spandrelImageToImage": "Image to Image (Spandrel)",
@@ -616,12 +598,10 @@
"scaledHeight": "Масштаб В",
"infillMethod": "Способ заполнения",
"tileSize": "Размер области",
"downloadImage": "Скачать",
"usePrompt": "Использовать запрос",
"useSeed": "Использовать сид",
"useAll": "Использовать все",
"info": "Метаданные",
"showOptionsPanel": "Показать панель настроек",
"cancel": {
"cancel": "Отмена"
},
@@ -647,10 +627,6 @@
"missingFieldTemplate": "Отсутствует шаблон поля",
"addingImagesTo": "Добавление изображений в",
"invoke": "Создать",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), ширина рамки {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), высота рамки {{height}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), масштабированная высота рамки {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16) масштабированная ширина рамки {{width}}",
"noFLUXVAEModelSelected": "Для генерации FLUX не выбрана модель VAE",
"noT5EncoderModelSelected": "Для генерации FLUX не выбрана модель T5 энкодера",
"canvasIsFiltering": "Холст фильтруется",
@@ -736,9 +712,6 @@
"baseModelChangedCleared_few": "Очищено или отключено {{count}} несовместимых подмодели",
"baseModelChangedCleared_many": "Очищено или отключено {{count}} несовместимых подмоделей",
"loadedWithWarnings": "Рабочий процесс загружен с предупреждениями",
"setControlImage": "Установить как контрольное изображение",
"setNodeField": "Установить как поле узла",
"invalidUpload": "Неверная загрузка",
"imageUploaded": "Изображение загружено",
"addedToBoard": "Добавлено в активы доски {{name}}",
"workflowLoaded": "Рабочий процесс загружен",
@@ -767,21 +740,14 @@
"sentToCanvas": "Отправить на холст",
"unableToLoadImage": "Невозможно загрузить изображение",
"unableToLoadImageMetadata": "Невозможно загрузить метаданные изображения",
"imageSaved": "Изображение сохранено",
"stylePresetLoaded": "Предустановка стиля загружена",
"imageNotLoadedDesc": "Не удалось найти изображение",
"imageSavingFailed": "Не удалось сохранить изображение",
"problemCopyingLayer": "Не удалось скопировать слой",
"unableToLoadStylePreset": "Невозможно загрузить предустановку стиля",
"layerCopiedToClipboard": "Слой скопирован в буфер обмена",
"sentToUpscale": "Отправить на увеличение",
"layerSavedToAssets": "Слой сохранен в активах",
"linkCopied": "Ссылка скопирована",
"addedToUncategorized": "Добавлено в активы доски $t(boards.uncategorized)",
"imagesWillBeAddedTo": "Загруженные изображения будут добавлены в активы доски {{boardName}}.",
"uploadFailedInvalidUploadDesc_withCount_one": "Должно быть не более {{count}} изображения в формате PNG или JPEG.",
"uploadFailedInvalidUploadDesc_withCount_few": "Должно быть не более {{count}} изображений в формате PNG или JPEG.",
"uploadFailedInvalidUploadDesc_withCount_many": "Должно быть не более {{count}} изображений в формате PNG или JPEG."
"imagesWillBeAddedTo": "Загруженные изображения будут добавлены в активы доски {{boardName}}."
},
"accessibility": {
"uploadImage": "Загрузить изображение",
@@ -803,15 +769,12 @@
"zoomInNodes": "Увеличьте масштаб",
"zoomOutNodes": "Уменьшите масштаб",
"fitViewportNodes": "Уместить вид",
"showLegendNodes": "Показать тип поля",
"hideMinimapnodes": "Скрыть миникарту",
"hideLegendNodes": "Скрыть тип поля",
"showMinimapnodes": "Показать миникарту",
"loadWorkflow": "Загрузить рабочий процесс",
"reloadNodeTemplates": "Перезагрузить шаблоны узлов",
"downloadWorkflow": "Скачать JSON рабочего процесса",
"addNode": "Добавить узел",
"addLinearView": "Добавить в линейный вид",
"animatedEdges": "Анимированные ребра",
"animatedEdgesHelp": "Анимация выбранных ребер и ребер, соединенных с выбранными узлами",
"boolean": "Логические значения",
@@ -823,7 +786,6 @@
"workflowDescription": "Краткое описание",
"inputFieldTypeParseError": "Невозможно разобрать тип поля ввода {{node}}.{{field}} ({{message}})",
"unsupportedAnyOfLength": "слишком много элементов объединения ({{count}})",
"versionUnknown": " Версия неизвестна",
"unsupportedArrayItemType": "неподдерживаемый тип элемента массива \"{{type}}\"",
"noNodeSelected": "Узел не выбран",
"unableToValidateWorkflow": "Невозможно проверить рабочий процесс",
@@ -841,10 +803,8 @@
"nodeTemplate": "Шаблон узла",
"nodeOpacity": "Непрозрачность узла",
"sourceNodeDoesNotExist": "Недопустимое ребро: исходный/выходной узел {{node}} не существует",
"unableToLoadWorkflow": "Невозможно загрузить рабочий процесс",
"unableToExtractEnumOptions": "невозможно извлечь параметры перечисления",
"snapToGrid": "Привязка к сетке",
"noFieldsLinearview": "Нет полей, добавленных в линейный вид",
"unableToParseFieldType": "невозможно проанализировать тип поля",
"nodeSearch": "Поиск узлов",
"updateNode": "Обновить узел",
@@ -865,9 +825,7 @@
"edge": "Край",
"sourceNodeFieldDoesNotExist": "Неверный край: поле источника/вывода {{node}}.{{field}} не существует",
"cannotDuplicateConnection": "Невозможно создать дубликаты соединений",
"unknownTemplate": "Неизвестный шаблон",
"noWorkflow": "Нет рабочего процесса",
"removeLinearView": "Удалить из линейного вида",
"workflowTags": "Теги",
"fullyContainNodesHelp": "Чтобы узлы были выбраны, они должны полностью находиться в поле выбора",
"unableToGetWorkflowVersion": "Не удалось получить версию схемы рабочего процесса",
@@ -900,7 +858,6 @@
"colorCodeEdges": "Ребра с цветовой кодировкой",
"unknownNode": "Неизвестный узел",
"targetNodeDoesNotExist": "Недопустимое ребро: целевой/входной узел {{node}} не существует",
"mismatchedVersion": "Недопустимый узел: узел {{node}} типа {{type}} имеет несоответствующую версию (попробовать обновить?)",
"unknownFieldType": "$t(nodes.unknownField) тип: {{type}}",
"collectionOrScalarFieldType": "{{name}} (Один или коллекция)",
"betaDesc": "Этот вызов находится в бета-версии. Пока он не станет стабильным, в нем могут происходить изменения при обновлении приложений. Мы планируем поддерживать этот вызов в течение длительного времени.",
@@ -909,14 +866,12 @@
"snapToGridHelp": "Привязка узлов к сетке при перемещении",
"workflowSettings": "Настройки редактора рабочих процессов",
"deletedInvalidEdge": "Удалено недопустимое ребро {{source}} -> {{target}}",
"unknownInput": "Неизвестный вход: {{name}}",
"newWorkflow": "Новый рабочий процесс",
"newWorkflowDesc": "Создать новый рабочий процесс?",
"clearWorkflow": "Очистить рабочий процесс",
"newWorkflowDesc2": "Текущий рабочий процесс имеет несохраненные изменения.",
"clearWorkflowDesc": "Очистить этот рабочий процесс и создать новый?",
"clearWorkflowDesc2": "Текущий рабочий процесс имеет несохраненные измерения.",
"reorderLinearView": "Изменить порядок линейного просмотра",
"viewMode": "Использовать в линейном представлении",
"editMode": "Открыть в редакторе узлов",
"resetToDefaultValue": "Сбросить к стандартному значкнию",
@@ -978,8 +933,6 @@
"addPrivateBoard": "Добавить личную доску",
"private": "Личные доски",
"shared": "Общие доски",
"hideBoards": "Скрыть доски",
"viewBoards": "Просмотреть доски",
"noBoards": "Нет досок {{boardType}}",
"deletedPrivateBoardsCannotbeRestored": "Удаленные доски не могут быть восстановлены. Выбор «Удалить только доску» переведет изображения в приватное состояние без категории для создателя изображения.",
"updateBoardError": "Ошибка обновления доски"
@@ -1408,8 +1361,6 @@
"noRecallParameters": "Параметры для вызова не найдены",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
"parameterSet": "Параметр {{parameter}} установлен",
"parsingFailed": "Не удалось выполнить синтаксический анализ",
"recallParameter": "Отозвать {{label}}",
"allPrompts": "Все запросы",
"imageDimensions": "Размеры изображения",
"canvasV2Metadata": "Холст",
@@ -1460,7 +1411,6 @@
"next": "Следующий",
"cancelBatch": "Отменить пакет",
"back": "задний",
"batchFieldValues": "Пакетные значения полей",
"cancel": "Отмена",
"session": "Сессия",
"time": "Время",
@@ -1495,18 +1445,14 @@
"refinerStart": "Запуск доработчика",
"scheduler": "Планировщик",
"cfgScale": "Шкала точности (CFG)",
"negStylePrompt": "Негативный запрос стиля",
"noModelsAvailable": "Нет доступных моделей",
"refiner": "Доработчик",
"negAestheticScore": "Отрицательная эстетическая оценка",
"denoisingStrength": "Шумоподавление",
"refinermodel": "Дорабатывающая модель",
"posAestheticScore": "Положительная эстетическая оценка",
"concatPromptStyle": "Связывание запроса и стиля",
"loading": "Загрузка...",
"steps": "Шаги",
"posStylePrompt": "Запрос стиля",
"freePromptStyle": "Ручной запрос стиля",
"refinerSteps": "Шаги доработчика"
},
"invocationCache": {
@@ -1531,20 +1477,15 @@
"workflowEditorMenu": "Меню редактора рабочего процесса",
"workflowName": "Имя рабочего процесса",
"saveWorkflow": "Сохранить рабочий процесс",
"openWorkflow": "Открытый рабочий процесс",
"clearWorkflowSearchFilter": "Очистить фильтр поиска рабочих процессов",
"workflowLibrary": "Библиотека",
"downloadWorkflow": "Сохранить в файл",
"workflowSaved": "Рабочий процесс сохранен",
"unnamedWorkflow": "Безымянный рабочий процесс",
"savingWorkflow": "Сохранение рабочего процесса...",
"problemLoading": "Проблема с загрузкой рабочих процессов",
"loading": "Загрузка рабочих процессов",
"searchWorkflows": "Поиск рабочих процессов",
"problemSavingWorkflow": "Проблема с сохранением рабочего процесса",
"deleteWorkflow": "Удалить рабочий процесс",
"workflows": "Рабочие процессы",
"noDescription": "Без описания",
"uploadWorkflow": "Загрузить из файла",
"newWorkflowCreated": "Создан новый рабочий процесс",
"saveWorkflowToProject": "Сохранить рабочий процесс в проект",
@@ -1560,9 +1501,6 @@
"convertGraph": "Конвертировать график",
"loadFromGraph": "Загрузка рабочего процесса из графика",
"autoLayout": "Автоматическое расположение",
"userWorkflows": "Пользовательские рабочие процессы",
"projectWorkflows": "Рабочие процессы проекта",
"defaultWorkflows": "Стандартные рабочие процессы",
"deleteWorkflow2": "Вы уверены, что хотите удалить этот рабочий процесс? Это нельзя отменить.",
"chooseWorkflowFromLibrary": "Выбрать рабочий процесс из библиотеки",
"edit": "Редактировать",
@@ -1572,8 +1510,6 @@
"delete": "Удалить"
},
"hrf": {
"enableHrf": "Включить исправление высокого разрешения",
"upscaleMethod": "Метод увеличения",
"metadata": {
"strength": "Сила исправления высокого разрешения",
"enabled": "Исправление высокого разрешения включено",
@@ -1584,12 +1520,10 @@
"models": {
"noMatchingModels": "Нет подходящих моделей",
"loading": "загрузка",
"noMatchingLoRAs": "Нет подходящих LoRA",
"noModelsAvailable": "Нет доступных моделей",
"addLora": "Добавить LoRA",
"selectModel": "Выберите модель",
"noRefinerModelsInstalled": "Дорабатывающие модели SDXL не установлены",
"noLoRAsInstalled": "Нет установленных LoRA",
"lora": "LoRA",
"defaultVAE": "Стандартное VAE",
"concepts": "LoRA"
@@ -1624,7 +1558,6 @@
"moveForward": "Переместить вперёд",
"moveBackward": "Переместить назад",
"autoNegative": "Авто негатив",
"deletePrompt": "Удалить запрос",
"rectangle": "Прямоугольник",
"addNegativePrompt": "Добавить $t(controlLayers.negativePrompt)",
"regionalGuidance": "Региональная точность",
@@ -1794,7 +1727,6 @@
},
"addReferenceImage": "Добавить $t(controlLayers.referenceImage)",
"inpaintMask": "Маска перерисовки",
"sendToGalleryDesc": "При нажатии кнопки Invoke создается изображение и сохраняется в вашей галерее.",
"sendToCanvas": "Отправить на холст",
"regionalGuidance_withCount_one": "$t(controlLayers.regionalGuidance)",
"regionalGuidance_withCount_few": "Региональных точности",
@@ -1806,7 +1738,6 @@
"inpaintMask_withCount_one": "$t(controlLayers.inpaintMask)",
"inpaintMask_withCount_few": "Маски перерисовки",
"inpaintMask_withCount_many": "Масок перерисовки",
"globalReferenceImages_withCount_visible": "Глобальные эталонные изображения ({{count}})",
"controlMode": {
"prompt": "Запрос",
"controlMode": "Режим контроля",
@@ -1842,7 +1773,6 @@
"pullBboxIntoReferenceImage": "Поместить рамку в эталонное изображение",
"enableAutoNegative": "Включить авто негатив",
"maskFill": "Заполнение маски",
"viewProgressInViewer": "Просматривайте прогресс и результаты в <Btn>Просмотрщике изображений</Btn>.",
"tool": {
"move": "Двигать",
"bbox": "Ограничительная рамка",
@@ -1853,18 +1783,10 @@
"colorPicker": "Подборщик цветов"
},
"rasterLayer": "Растровый слой",
"sendingToCanvas": "Постановка генераций на холст",
"rasterLayers_withCount_visible": "Растровые слои ({{count}})",
"regionalGuidance_withCount_hidden": "Региональная точность ({{count}} скрыто)",
"enableTransparencyEffect": "Включить эффект прозрачности",
"hidingType": "Скрыть {{type}}",
"addRegionalGuidance": "Добавить $t(controlLayers.regionalGuidance)",
"sendingToGallery": "Отправка генераций в галерею",
"viewProgressOnCanvas": "Просматривайте прогресс и результаты этапов на <Btn>Холсте</Btn>.",
"controlLayers_withCount_hidden": "Контрольные слои ({{count}} скрыто)",
"rasterLayers_withCount_hidden": "Растровые слои ({{count}} скрыто)",
"deleteSelected": "Удалить выбранное",
"stagingOnCanvas": "Постановка изображений на",
"pullBboxIntoLayer": "Поместить рамку в слой",
"locked": "Заблокировано",
"replaceLayer": "Заменить слой",
@@ -1873,16 +1795,10 @@
"addRasterLayer": "Добавить $t(controlLayers.rasterLayer)",
"addControlLayer": "Добавить $t(controlLayers.controlLayer)",
"addInpaintMask": "Добавить $t(controlLayers.inpaintMask)",
"inpaintMasks_withCount_hidden": "Маски перерисовки ({{count}} скрыто)",
"regionalGuidance_withCount_visible": "Региональная точность ({{count}})",
"newGallerySessionDesc": "Это очистит холст и все настройки, кроме выбранной модели. Генерации будут отправлены в галерею.",
"newCanvasSession": "Новая сессия холста",
"newCanvasSessionDesc": "Это очистит холст и все настройки, кроме выбора модели. Генерации будут размещены на холсте.",
"cropLayerToBbox": "Обрезать слой по ограничительной рамке",
"clipToBbox": "Обрезка штрихов в рамке",
"outputOnlyMaskedRegions": "Вывод только маскированных областей",
"duplicate": "Дублировать",
"inpaintMasks_withCount_visible": "Маски перерисовки ({{count}})",
"layer_one": "Слой",
"layer_few": "Слоя",
"layer_many": "Слоев",
@@ -1901,33 +1817,20 @@
},
"disableAutoNegative": "Отключить авто негатив",
"deleteReferenceImage": "Удалить эталонное изображение",
"controlLayers_withCount_visible": "Контрольные слои ({{count}})",
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
"rasterLayer_withCount_few": "Растровых слоя",
"rasterLayer_withCount_many": "Растровых слоев",
"transparency": "Прозрачность",
"weight": "Вес",
"newGallerySession": "Новая сессия галереи",
"sendToCanvasDesc": "Нажатие кнопки Invoke отображает вашу текущую работу на холсте.",
"globalReferenceImages_withCount_hidden": "Глобальные эталонные изображения ({{count}} скрыто)",
"layer_withCount_one": "Слой ({{count}})",
"layer_withCount_few": "Слои ({{count}})",
"layer_withCount_many": "Слои ({{count}})",
"disableTransparencyEffect": "Отключить эффект прозрачности",
"showingType": "Показать {{type}}",
"dynamicGrid": "Динамическая сетка",
"logDebugInfo": "Писать отладочную информацию",
"unlocked": "Разблокировано",
"showProgressOnCanvas": "Показать прогресс на холсте",
"globalReferenceImage_withCount_one": "$t(controlLayers.globalReferenceImage)",
"globalReferenceImage_withCount_few": "Глобальных эталонных изображения",
"globalReferenceImage_withCount_many": "Глобальных эталонных изображений",
"regionalReferenceImage": "Региональное эталонное изображение",
"globalReferenceImage": "Глобальное эталонное изображение",
"sendToGallery": "Отправить в галерею",
"referenceImage": "Эталонное изображение",
"addGlobalReferenceImage": "Добавить $t(controlLayers.globalReferenceImage)",
"newImg2ImgCanvasFromImage": "Новое img2img из изображения"
"referenceImage": "Эталонное изображение"
},
"ui": {
"tabs": {

View File

@@ -28,7 +28,6 @@
"gallery": {
"galleryImageSize": "Bildstorlek",
"gallerySettings": "Galleriinställningar",
"noImagesInGallery": "Inga bilder i galleriet",
"autoSwitchNewImages": "Ändra automatiskt till nya bilder"
}
}

View File

@@ -36,12 +36,10 @@
"communityLabel": "Topluluk",
"back": "Geri",
"areYouSure": "Emin misiniz?",
"notInstalled": "$t(common.installed) Değil",
"openInNewTab": "Yeni Sekmede Aç",
"aboutHeading": "Yaratıcı Gücünüzün Sahibi Olun",
"load": "Yükle",
"loading": "Yükleniyor",
"localSystem": "Yerel Sistem",
"inpaint": "içboyama",
"modelManager": "Model Yöneticisi",
"orderBy": "Sırala",
@@ -65,11 +63,8 @@
"format": "biçim",
"details": "Ayrıntılar",
"error": "Hata",
"imageFailedToLoad": "Görsel Yüklenemedi",
"safetensors": "Safetensors",
"upload": "Yükle",
"nextPage": "Sonraki Sayfa",
"prevPage": "Önceki Sayfa",
"dontAskMeAgain": "Bir daha sorma",
"delete": "Kaldır",
"direction": "Yön",
@@ -181,7 +176,6 @@
"session": "Oturum",
"batchQueued": "Toplu İş Sıraya Alındı",
"notReady": "Sıraya Alınamadı",
"batchFieldValues": "Toplu İş Değişkenleri",
"graphFailedToQueue": "Çizge sıraya alınamadı",
"graphQueued": "Çizge sıraya alındı"
},
@@ -207,12 +201,10 @@
"image": "görsel",
"galleryImageSize": "Görsel Boyutu",
"copy": "Kopyala",
"noImagesInGallery": "Gösterilecek Görsel Yok",
"autoSwitchNewImages": "Yeni Görseli Biter Bitmez Gör",
"currentlyInUse": "Bu görsel şurada kullanımda:",
"deleteImage_one": "Görseli Sil",
"deleteImage_other": "",
"unableToLoad": "Galeri Yüklenemedi",
"downloadSelection": "Seçileni İndir",
"dropOrUpload": "$t(gallery.drop) ya da Yükle",
"dropToUpload": "Yüklemek için $t(gallery.drop)",
@@ -220,13 +212,11 @@
},
"hrf": {
"hrf": "Yüksek Çözünürlük Kürü",
"enableHrf": "Yüksek Çözünürlük Kürünü Aç",
"metadata": {
"enabled": "Yüksek Çözünürlük Kürü Açık",
"strength": "Yüksek Çözünürlük Kürü Etkisi",
"method": "Yüksek Çözünürlük Kürü Yöntemi"
},
"upscaleMethod": "Büyütme Yöntemi"
}
},
"hotkeys": {
"noHotkeysFound": "Kısayol Tuşu Bulanamadı",
@@ -256,7 +246,6 @@
"unknownErrorValidatingWorkflow": "İş akışını doğrulamada bilinmeyen bir sorun",
"unableToGetWorkflowVersion": "İş akışı sürümüne ulaşılamadı",
"newWorkflowDesc2": "Geçerli iş akışında kaydedilmemiş değişiklikler var.",
"unableToLoadWorkflow": "İş Akışı Yüklenemedi",
"cannotConnectInputToInput": "Giriş girişe bağlanamaz",
"zoomInNodes": "Yakınlaştır",
"boolean": "Boole Değeri",
@@ -267,16 +256,12 @@
"cannotDuplicateConnection": "Kopya bağlantılar yaratılamaz"
},
"workflows": {
"searchWorkflows": "İş Akışlarında Ara",
"workflowName": "İş Akışı Adı",
"problemSavingWorkflow": "İş Akışını Kaydetmede Sorun",
"saveWorkflow": "İş Akışını Kaydet",
"uploadWorkflow": "Dosyadan Yükle",
"newWorkflowCreated": "Yeni İş Akışı Yaratıldı",
"problemLoading": "İş Akışlarını Yüklemede Sorun",
"loading": "İş Akışları Yükleniyor",
"noDescription": "Tanımsız",
"clearWorkflowSearchFilter": "İş Akışı Aramasını Resetle",
"workflowEditorMenu": "İş Akışı Düzenleyici Menüsü",
"downloadWorkflow": "İndir",
"saveWorkflowAs": "İş Akışını Farklı Kaydet",
@@ -328,7 +313,6 @@
"noiseThreshold": "Gürültü Eşiği",
"seed": "Tohum",
"imageActions": "Görsel İşlemleri",
"showOptionsPanel": "Yan Paneli Göster (O ya da T)",
"shuffle": "Kar",
"usePrompt": "İstemi Kullan",
"setToOptimalSizeTooSmall": "$t(parameters.setToOptimalSize) (çok küçük olabilir)",
@@ -346,7 +330,6 @@
"perlinNoise": "Perlin Gürültüsü",
"scaledWidth": "Ölçekli En",
"seamlessXAxis": "Dikişsiz Döşeme X Ekseni",
"downloadImage": "Görseli İndir",
"type": "Tür"
},
"modelManager": {
@@ -399,11 +382,9 @@
"defaultVAE": "Varsayılan VAE",
"lora": "LoRA",
"noModelsAvailable": "Model yok",
"noMatchingLoRAs": "Uygun LoRA Yok",
"noMatchingModels": "Uygun Model Yok",
"loading": "yükleniyor",
"selectModel": "Model Seçin",
"noLoRAsInstalled": "LoRA Yok"
"selectModel": "Model Seçin"
},
"settings": {
"generation": "Oluşturma"
@@ -411,7 +392,6 @@
"sdxl": {
"cfgScale": "CFG Ölçeği",
"loading": "Yükleniyor...",
"denoisingStrength": "Arındırma Ölçüsü",
"concatPromptStyle": "İstem ve Stili Bitiştir"
"denoisingStrength": "Arındırma Ölçüsü"
}
}

View File

@@ -22,8 +22,7 @@
"gallery": {
"galleryImageSize": "Розмір зображень",
"gallerySettings": "Налаштування галереї",
"autoSwitchNewImages": "Автоматично вибирати нові",
"noImagesInGallery": "Зображень немає"
"autoSwitchNewImages": "Автоматично вибирати нові"
},
"modelManager": {
"modelManager": "Менеджер моделей",
@@ -80,12 +79,10 @@
"scaledHeight": "Масштаб В",
"infillMethod": "Засіб заповнення",
"tileSize": "Розмір області",
"downloadImage": "Завантажити",
"usePrompt": "Використати запит",
"useSeed": "Використати сід",
"useAll": "Використати все",
"info": "Метадані",
"showOptionsPanel": "Показати панель налаштувань",
"general": "Основне",
"denoisingStrength": "Сила шумоподавлення",
"copyImage": "Копіювати зображення",

View File

@@ -20,8 +20,6 @@
"addBoard": "Thêm Bảng",
"downloadBoard": "Tải Xuống Bảng",
"movingImagesToBoard_other": "Di chuyển {{count}} ảnh vào Bảng:",
"viewBoards": "Xem Bảng",
"hideBoards": "Ẩn Bảng",
"noBoards": "Không Có Bảng Thuộc Loại {{boardType}}",
"noMatching": "Không Có Bảng Tương Ứng",
"searchBoard": "Tìm Bảng...",
@@ -55,7 +53,12 @@
"assetsWithCount_other": "{{count}} tài nguyên",
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
"locateInGalery": "Vị Trí Ở Thư Viện Ảnh",
"deletedImagesCannotBeRestored": "Ảnh đã xóa không thể khôi phục lại.",
"hideBoards": "Ẩn Bảng",
"movingVideosToBoard_other": "Di chuyển {{count}} video vào bảng:",
"viewBoards": "Xem Bảng",
"videosWithCount_other": "{{count}} video"
},
"gallery": {
"swapImages": "Đổi Hình Ảnh",
@@ -83,33 +86,27 @@
"galleryImageSize": "Kích Thước Ảnh",
"downloadSelection": "Tải xuống Phần Được Lựa Chọn",
"bulkDownloadRequested": "Chuẩn Bị Tải Xuống",
"unableToLoad": "Không Thể Tải Thư viện Ảnh",
"newestFirst": "Mới Nhất Trước",
"showStarredImagesFirst": "Hiển Thị Ảnh Gắn Sao Trước",
"bulkDownloadRequestedDesc": "Yêu cầu tải xuống đang được chuẩn bị. Vui lòng chờ trong giây lát.",
"starImage": "Gắn Sao Cho Ảnh",
"openViewer": "Mở Trình Xem",
"starImage": "Gắn Sao",
"viewerImage": "Trình Xem Ảnh",
"sideBySide": "Cạnh Nhau",
"alwaysShowImageSizeBadge": "Luôn Hiển Thị Kích Thước Ảnh",
"autoAssignBoardOnClick": "Tự Động Gán Vào Bảng Khi Nhấp Chuột",
"jump": "Nhảy Đến",
"go": "Đi",
"autoSwitchNewImages": "Tự Động Đổi Sang Hình Ảnh Mới",
"featuresWillReset": "Nếu bạn xoá hình ảnh này, những tính năng đó sẽ lập tức được khởi động lại.",
"openInViewer": "Mở Trong Trình Xem",
"searchImages": "Tìm Theo Metadata",
"selectForCompare": "Chọn Để So Sánh",
"closeViewer": "Đóng Trình Xem",
"move": "Di Chuyển",
"displayBoardSearch": "Tìm Kiếm Bảng",
"displaySearch": "Tìm Kiếm Hình Ảnh",
"selectAnImageToCompare": "Chọn Ảnh Để So Sánh",
"slider": "Thanh Trượt",
"gallerySettings": "Cài Đặt Thư Viện Ảnh",
"image": "hình ảnh",
"noImageSelected": "Không Có Ảnh Được Chọn",
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
"imagesTab": "Ảnh bạn vừa được tạo và lưu trong Invoke.",
"loading": "Đang Tải",
@@ -117,13 +114,24 @@
"exitCompare": "Ngừng So Sánh",
"stretchToFit": "Kéo Dài Cho Vừa Vặn",
"sortDirection": "Cách Sắp Xếp",
"unstarImage": "Ngừng Gắn Sao Cho Ảnh",
"unstarImage": "Bỏ Gắn Sao",
"compareHelp2": "Nhấn <Kbd>M</Kbd> để tuần hoàn trong chế độ so sánh.",
"boardsSettings": "Thiết Lập Bảng",
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
"assets": "Tài Nguyên",
"images": "Hình Ảnh",
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh",
"deleteVideo_other": "Xóa {{count}} Video",
"deleteVideoPermanent": "Video đã xóa không thể khôi phục lại.",
"jump": "Nhảy Đến",
"noVideoSelected": "Không Có Video Được Chọn",
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
"unableToLoad": "Không Thể Tải Thư Viện Ảnh",
"selectAnImageToCompare": "Chọn Ảnh Để So Sánh",
"openViewer": "Mở Trình Xem",
"closeViewer": "Đóng Trình Xem",
"videos": "Video",
"videosTab": "Video bạn tạo và được lưu trong Invoke."
},
"common": {
"ipAdapter": "IP Adapter",
@@ -134,14 +142,12 @@
"clipboard": "Clipboard",
"learnMore": "Tìm Hiểu Thêm",
"openInViewer": "Mở Trong Trình Xem",
"nextPage": "Trang Sau",
"alpha": "Alpha",
"edit": "Sửa",
"nodes": "Workflow",
"format": "Định Dạng",
"delete": "Xoá",
"details": "Chi Tiết",
"imageFailedToLoad": "Không Thể Tải Hình Ảnh",
"img2img": "Hình ảnh sang Hình ảnh",
"upload": "Tải Lên",
"somethingWentWrong": "Có vấn đề phát sinh",
@@ -157,7 +163,7 @@
"dontAskMeAgain": "Không hỏi lại",
"error": "Lỗi",
"or": "hoặc",
"installed": ã Tải Xuống",
"installed": ược Tải Xuống Sẵn",
"simple": "Cơ Bản",
"linear": "Tuyến Tính",
"safetensors": "Safetensors",
@@ -179,19 +185,15 @@
"on": "Bật",
"checkpoint": "Checkpoint",
"txt2img": "Từ Ngữ Sang Hình Ảnh",
"prevPage": "Trang Trước",
"unknown": "Không Rõ",
"githubLabel": "Github",
"folder": "Thư mục",
"goTo": "Đến",
"hotkeysLabel": "Phím Tắt",
"loadingImage": "Đang Tải Hình ảnh",
"localSystem": "Hệ Thống Máy Chủ",
"input": "Đầu Vào",
"languagePickerLabel": "Ngôn Ngữ",
"openInNewTab": "Mở Trong Tab Mới",
"outpaint": "outpaint",
"notInstalled": "Chưa $t(common.installed)",
"save": "Lưu",
"saveAs": "Lưu Như",
"auto": "Tự Động",
@@ -233,7 +235,6 @@
"end": "Kết Thúc",
"min": "Tối Thiểu",
"max": "Tối Đa",
"resetToDefaults": "Đặt Lại Về Mặc Định",
"seed": "Hạt Giống",
"combinatorial": "Tổ Hợp",
"column": "Cột",
@@ -252,7 +253,17 @@
"clear": "Dọn Dẹp",
"compactView": "Chế Độ Xem Gọn",
"fullView": "Chế Độ Xem Đầy Đủ",
"options_withCount_other": "{{count}} thiết lập"
"options_withCount_other": "{{count}} thiết lập",
"removeNegativePrompt": "Xóa Lệnh Tiêu Cực",
"addNegativePrompt": "Thêm Lệnh Tiêu Cực",
"selectYourModel": "Chọn Model",
"goTo": "Đi Đến",
"imageFailedToLoad": "Không Thể Tải Ảnh",
"localSystem": "Hệ Thống Máy Chủ",
"notInstalled": "Chưa $t(common.installed)",
"prevPage": "Trang Trước",
"nextPage": "Trang Sau",
"resetToDefaults": "Tải Lại Mặc Định"
},
"prompt": {
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
@@ -262,11 +273,11 @@
"expandCurrentPrompt": "Mở Rộng Lệnh Hiện Tại",
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
"expandingPrompt": "Đang mở rộng lệnh...",
"replace": "Thay Thế",
"discard": "Huỷ Bỏ",
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
"replace": "Thay Thế",
"insert": "Chèn",
"discard": "Huỷ Bỏ"
"insert": "Chèn"
},
"queue": {
"resume": "Tiếp Tục",
@@ -280,7 +291,6 @@
"clearQueueAlertDialog2": "Bạn chắc chắn muốn dọn sạch hàng không?",
"queueEmpty": "Hàng Trống",
"queueBack": "Thêm Vào Hàng",
"batchFieldValues": "Giá Trị Vùng Theo Lô",
"openQueue": "Mở Queue",
"pause": "Dừng Lại",
"pauseFailed": "Có Vấn Đề Khi Dừng Lại Bộ Xử Lý",
@@ -344,7 +354,13 @@
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
"retryItem": "Thử Lại Mục",
"credits": "Nguồn",
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại"
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại",
"createdAt": "Tạo tại",
"completedAt": "Hoàn Thành Tại",
"sortColumn": "Sắp Xếp Cột",
"sortBy": "Sắp Xếp Theo {{column}}",
"sortOrderAscending": "Tăng Dần",
"sortOrderDescending": "Giảm Dần"
},
"hotkeys": {
"canvas": {
@@ -356,10 +372,6 @@
"desc": "Phóng to canvas lên 800%.",
"title": "Phóng To Vào 800%"
},
"setFillToWhite": {
"title": "Chỉnh Màu Sang Trắng",
"desc": "Chỉnh màu hiện tại sang màu trắng."
},
"transformSelected": {
"title": "Biến Đổi",
"desc": "Biến đổi layer được chọn."
@@ -492,6 +504,22 @@
"title": "Huỷ Segment Anything",
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
"key": "esc"
},
"fitBboxToLayers": {
"title": "Xếp Vừa Hộp Giới Hạn Vào Layer",
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào layer nhìn thấy được"
},
"toggleBbox": {
"title": "Bật/Tắt Hiển Thị Hộp Giới Hạn",
"desc": "Ẩn hoặc hiện hộp giới hạn tạo sinh"
},
"setFillColorsToDefault": {
"title": "Đặt Màu Lại Mặc Định",
"desc": "Chỉnh công cụ màu hiện tại về mặc định."
},
"toggleFillColor": {
"title": "Bật/Tắt Màu Lấp Đầy",
"desc": "Bật/Tắt công cụ đổ màu hiện tại."
}
},
"workflows": {
@@ -689,12 +717,19 @@
"title": "Chọn Tab Tạo Sinh",
"desc": "Chọn tab Tạo Sinh.",
"key": "1"
},
"selectVideoTab": {
"title": "Chọn Thẻ Video",
"desc": "Chọn thẻ Video."
}
},
"searchHotkeys": "Tìm Phím tắt",
"noHotkeysFound": "Không Tìm Thấy Phím Tắt",
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
"hotkeys": "Phím Tắt"
"hotkeys": "Phím Tắt",
"video": {
"title": "Video"
}
},
"modelManager": {
"modelConverted": "Model Đã Được Chuyển Đổi",
@@ -778,7 +813,6 @@
"hfTokenUnableToVerifyErrorMessage": "Không thể xác minh HuggingFace token. Khả năng cao lỗi mạng. Vui lòng thử lại sau.",
"inplaceInstall": "Tải Xuống Tại Chỗ",
"installRepo": "Tải Xuống Kho Lưu Trữ (Repository)",
"ipAdapters": "IP Adapters",
"loraModels": "LoRA",
"main": "Chính",
"modelConversionFailed": "Chuyển Đổi Model Thất Bại",
@@ -824,7 +858,6 @@
"textualInversions": "Bộ Đảo Ngược Văn Bản",
"loraTriggerPhrases": "Từ Ngữ Kích Hoạt Cho LoRA",
"width": "Chiều Rộng",
"starterModelsInModelManager": "Model khởi đầu có thể tìm thấy ở Trình Quản Lý Model",
"clipLEmbed": "CLIP-L Embed",
"clipGEmbed": "CLIP-G Embed",
"controlLora": "LoRA Điều Khiển Được",
@@ -836,13 +869,11 @@
"sigLip": "SigLIP",
"llavaOnevision": "LLaVA OneVision",
"fileSize": "Kích Thước Tệp",
"filterModels": "Lọc Model",
"modelPickerFallbackNoModelsInstalled2": "Nhấp vào <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải.",
"modelPickerFallbackNoModelsInstalled": "Không Có Sẵn Model.",
"manageModels": "Quản Lý Model",
"hfTokenReset": "Làm Mới HF Token",
"relatedModels": "Model Liên Quan",
"showOnlyRelatedModels": "Liên Quan",
"installedModelsCount": "Đã tải {{installed}} trên {{total}} model.",
"allNModelsInstalled": "Đã tải tất cả {{count}} model",
"nToInstall": "Còn {{count}} để tải",
@@ -859,27 +890,32 @@
"scanFolderDescription": "Quét một thư mục trên máy để tự động tra và tải model.",
"recommendedModels": "Model Khuyến Nghị",
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
"quickStart": "Gói Khởi Đầu Nhanh",
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
"sdxl": "SDXL",
"quickStart": "Gói Khởi Đầu Nhanh",
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
"stableDiffusion15": "Stable Diffusion 1.5",
"sdxl": "SDXL",
"fluxDev": "FLUX.1 dev"
}
},
"installBundle": "Tải Xuống Gói",
"installBundleMsg1": "Bạn có chắc chắn muốn tải xuống gói {{bundleName}}?",
"installBundleMsg2": "Gói này sẽ tải xuống {{count}} model sau đây:",
"filterModels": "Lọc Model",
"ipAdapters": "IP Adapters",
"showOnlyRelatedModels": "Liên Quan",
"starterModelsInModelManager": "Model Khởi Đầu có thể tìm thấy ở Trình Quản Lý Model"
},
"metadata": {
"guidance": "Hướng Dẫn",
"noRecallParameters": "Không tìm thấy tham số",
"imageDetails": "Chi Tiết Ảnh",
"createdBy": "Được Tạo Bởi",
"parsingFailed": "Lỗi Cú Pháp",
"canvasV2Metadata": "Layer Canvas",
"parameterSet": "Dữ liệu tham số {{parameter}}",
"positivePrompt": "Lệnh Tích Cực",
"recallParameter": "Gợi Nhớ {{label}}",
"seed": "Hạt Giống",
"negativePrompt": "Lệnh Tiêu Cực",
"noImageDetails": "Không tìm thấy chí tiết ảnh",
"noImageDetails": "Không tìm thấy chi tiết ảnh",
"strength": "Mức độ mạnh từ ảnh sang ảnh",
"Threshold": "Ngưỡng Nhiễu",
"width": "Chiều Rộng",
@@ -898,7 +934,16 @@
"recallParameters": "Gợi Nhớ Tham Số",
"scheduler": "Scheduler",
"noMetaData": "Không tìm thấy metadata",
"imageDimensions": "Kích Thước Ảnh"
"imageDimensions": "Kích Thước Ảnh",
"clipSkip": "$t(parameters.clipSkip)",
"videoDetails": "Chi Tiết Video",
"noVideoDetails": "Không tìm thấy chi tiết video",
"parsingFailed": "Lỗi Cú Pháp",
"recallParameter": "Gợi Nhớ {{label}}",
"videoModel": "Model",
"videoDuration": "Thời Lượng",
"videoAspectRatio": "Tỉ Lệ",
"videoResolution": "Độ Phân Giải"
},
"accordions": {
"generation": {
@@ -944,8 +989,8 @@
"method": "Cách Thức Sửa Độ Phân Giải Cao"
},
"hrf": "Sửa Độ Phân Giải Cao",
"enableHrf": "Cho Phép Sửa Độ Phân Giải Cao",
"upscaleMethod": "Cách Thức Upscale"
"enableHrf": "Bật Chế Độ Chỉnh Sửa Phân Giải Cao",
"upscaleMethod": "Phương Thức Upscale"
},
"nodes": {
"validateConnectionsHelp": "Ngăn chặn những kết nối không hợp lý được tạo ra, và đồ thị không hợp lệ bị kích hoạt",
@@ -971,9 +1016,7 @@
"float": "Số Thực",
"missingNode": "Thiếu node kích hoạt",
"currentImage": "Hình Ảnh Hiện Tại",
"removeLinearView": "Xoá Khỏi Chế Độ Xem Tuyến Tính",
"unknownErrorValidatingWorkflow": "Lỗi không rõ khi xác thực workflow",
"unableToLoadWorkflow": "Không Thể Tải Workflow",
"workflowSettings": "Cài Đặt Biên Tập Workflow",
"workflowVersion": "Phiên Bản",
"unableToGetWorkflowVersion": "Không thể tìm phiên bản của lược đồ workflow",
@@ -983,7 +1026,6 @@
"ipAdapter": "IP Adapter",
"cannotDuplicateConnection": "Không thể tạo hai kết nối trùng lặp",
"workflowValidation": "Lỗi Xác Thực Workflow",
"mismatchedVersion": "Node không hợp lệ: node {{node}} thuộc loại {{type}} có phiên bản không khớp (thử cập nhật?)",
"sourceNodeFieldDoesNotExist": "Kết nối không phù hợp: nguồn/đầu ra của vùng {{node}}.{{field}} không tồn tại",
"targetNodeFieldDoesNotExist": "Kết nối không phù hợp: đích đến/đầu vào của vùng {{node}}.{{field}} không tồn tại",
"missingTemplate": "Node không hợp lệ: node {{node}} thuộc loại {{type}} bị thiếu mẫu trình bày (chưa tải?)",
@@ -997,7 +1039,6 @@
"edge": "Kết Nối",
"graph": "Đồ Thị",
"workflowAuthor": "Tác Giả",
"addLinearView": "Thêm Vào Chế Độ Xem Tuyến Tính",
"showEdgeLabels": "Hiển Thị Tên Kết Nối",
"unknownField": "Vùng Dữ Liệu Không Rõ",
"executionStateCompleted": "Đã Hoàn Tất",
@@ -1027,7 +1068,6 @@
"node": "Node",
"nodeTemplate": "Mẫu Trình Bày Của Node",
"nodeType": "Loại Node",
"noFieldsLinearview": "Không có vùng được thêm vào Chế Độ Xem Tuyến Tính",
"notes": "Ghi Chú",
"updateApp": "Cập Nhật Ứng Dụng",
"updateAllNodes": "Cập Nhật Các Node",
@@ -1035,7 +1075,6 @@
"imageAccessError": "Không thể tìm thấy ảnh {{image_name}}, chuyển về mặc định",
"unknownNode": "Node Không Rõ",
"unknownNodeType": "Loại Node Không Rõ",
"unknownTemplate": "Mẫu Trình Bày Không Rõ",
"cannotConnectOutputToOutput": "Không thế kết nối đầu ra với đầu ra",
"cannotConnectToSelf": "Không thể kết nối với chính nó",
"workflow": "Workflow",
@@ -1051,7 +1090,6 @@
"fitViewportNodes": "Chế Độ Xem Vừa Khớp",
"fullyContainNodes": "Bao Phủ Node Hoàn Toàn Để Chọn",
"fullyContainNodesHelp": "Node phải được phủ kín hoàn toàn trong hộp lựa chọn để được lựa chọn",
"hideLegendNodes": "Ẩn Vùng Nhập",
"hideMinimapnodes": "Ẩn Bản Đồ Thu Nhỏ",
"inputMayOnlyHaveOneConnection": "Đầu vào chỉ có thể có một kết nối",
"noWorkflows": "Không Có Workflow",
@@ -1062,34 +1100,27 @@
"problemSettingTitle": "Có Vấn Đề Khi Thiết Lập Tiêu Đề",
"resetToDefaultValue": "Đặt lại giá trị mặc định",
"reloadNodeTemplates": "Tải Lại Mẫu Trình Bày Node",
"reorderLinearView": "Sắp Xếp Lại Chế Độ Xem Tuyến Tính",
"viewMode": "Dùng Chế Độ Xem Tuyến Tính",
"newWorkflowDesc": "Tạo workflow mới?",
"string": "Chuỗi Ký Tự",
"version": "Phiên Bản",
"versionUnknown": " Phiên Bản Không Rõ",
"workflowContact": "Thông Tin Liên Lạc",
"workflowName": "Tên",
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
"connectionWouldCreateCycle": "Kết nối này sẽ tạo ra vòng lặp",
"addNode": "Thêm Node",
"unsupportedAnyOfLength": "quá nhiều dữ liệu hợp nhất: {{count}}",
"unknownInput": "Đầu Vào Không Rõ: {{name}}",
"validateConnections": "Xác Thực Kết Nối Và Đồ Thị",
"workflowNotes": "Ghi Chú",
"workflowTags": "Nhãn",
"editMode": "Chỉnh sửa trong Trình Biên Tập Workflow",
"edit": "Chỉnh Sửa",
"executionStateInProgress": "Đang Xử Lý",
"showLegendNodes": "Hiển Thị Vùng Nhập",
"outputFieldTypeParseError": "Không thể phân tích loại dữ liệu đầu ra của {{node}}.{{field}} ({{message}})",
"modelAccessError": "Không thể tìm thấy model {{key}}, chuyển về mặc định",
"internalDesc": "Trình kích hoạt này được dùng bên trong bởi Invoke. Nó có thể phá hỏng thay đổi trong khi cập nhật ứng dụng và có thể bị xoá bất cứ lúc nào.",
"specialDesc": "Trình kích hoạt này có một số xử lý đặc biệt trong ứng dụng. Ví dụ, Node Hàng Loạt được dùng để xếp vào nhiều đồ thị từ một workflow.",
"addItem": "Thêm Mục",
"generateValues": "Cho Ra Giá Trị",
"floatRangeGenerator": "Phạm Vị Tạo Ra Số Thực",
"integerRangeGenerator": "Phạm Vị Tạo Ra Số Nguyên",
"linearDistribution": "Phân Bố Tuyến Tính",
"uniformRandomDistribution": "Phân Bố Ngẫu Nhiên Đồng Nhất",
"parseString": "Phân Tích Chuỗi",
@@ -1098,7 +1129,6 @@
"splitOn": "Tách Ở",
"arithmeticSequence": "Cấp Số Cộng",
"generatorNRandomValues_other": "{{count}} giá trị ngẫu nhiên",
"generatorLoading": "đang tải",
"generatorLoadFromFile": "Tải Từ Tệp",
"dynamicPromptsRandom": "Dynamic Prompts (Ngẫu Nhiên)",
"dynamicPromptsCombinatorial": "Dynamic Prompts (Tổ Hợp)",
@@ -1108,7 +1138,6 @@
"description": "Mô Tả",
"loadWorkflowDesc": "Tải workflow?",
"loadWorkflowDesc2": "Workflow hiện tại của bạn có những điều chỉnh chưa được lưu.",
"loadingTemplates": "Đang Tải {{name}}",
"nodeName": "Tên Node",
"unableToUpdateNode": "Cập nhật node thất bại: node {{node}} thuộc dạng {{type}} (có thể cần xóa và tạo lại)",
"downloadWorkflowError": "Lỗi tải xuống workflow",
@@ -1134,7 +1163,23 @@
"alignmentDL": "Dưới Cùng Bên Trái",
"alignmentUR": "Trên Cùng Bên Phải",
"alignmentDR": "Dưới Cùng Bên Phải"
}
},
"generatorLoading": "đang tải",
"addLinearView": "Thêm Vào Chế Độ Xem Tuyến Tính (Linear View)",
"hideLegendNodes": "Ẩn Vùng Nhập",
"mismatchedVersion": "Node không hợp lệ: node {{node}} thuộc loại {{type}} có phiên bản không khớp (thử cập nhật?)",
"noFieldsLinearview": "Không có vùng được thêm vào Chế Độ Xem Tuyến Tính",
"removeLinearView": "Xoá Khỏi Chế Độ Xem Tuyến Tính",
"reorderLinearView": "Sắp Xếp Lại Chế Độ Xem Tuyến Tính",
"showLegendNodes": "Hiển Thị Vùng Nhập",
"unableToLoadWorkflow": "Không Thể Tải Workflow",
"unknownTemplate": "Mẫu Trình Bày Không Rõ",
"unknownInput": "Đầu Vào Không Rõ: {{name}}",
"loadingTemplates": "Đang Tải {{name}}",
"versionUnknown": " Phiên Bản Không Rõ",
"generateValues": "Giá Trị Tạo Sinh",
"floatRangeGenerator": "Phạm Vị Tạo Sinh Số Thực",
"integerRangeGenerator": "Phạm Vị Tạo Sinh Số Nguyên"
},
"popovers": {
"paramCFGRescaleMultiplier": {
@@ -1582,14 +1627,14 @@
"concepts": "LoRA",
"loading": "đang tải",
"lora": "LoRA",
"noMatchingLoRAs": "Không có LoRA phù hợp",
"noRefinerModelsInstalled": "Chưa có model SDXL Refiner được tải xuống",
"noLoRAsInstalled": "Chưa có LoRA được tải xuống",
"defaultVAE": "VAE Mặc Định",
"noMatchingModels": "Không có Model phù hợp",
"noModelsAvailable": "Không có model",
"selectModel": "Chọn Model",
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích",
"noMatchingLoRAs": "Không có LoRA phù hợp",
"noLoRAsInstalled": "Chưa có LoRA được tải xuống"
},
"parameters": {
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
@@ -1599,9 +1644,7 @@
"processImage": "Xử Lý Hình Ảnh",
"useSize": "Dùng Kích Thước",
"invoke": {
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều rộng hộp giới hạn là {{width}}",
"noModelSelected": "Không có model được lựa chọn",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều dài hộp giới hạn là {{height}}",
"canvasIsFiltering": "Canvas đang bận (đang lọc)",
"canvasIsRasterizing": "Canvas đang bận (đang raster hoá)",
"canvasIsTransforming": "Canvas đang bận (đang biến đổi)",
@@ -1615,8 +1658,6 @@
"systemDisconnected": "Hệ thống mất kết nối",
"invoke": "Kích Hoạt",
"missingNodeTemplate": "Thiếu mẫu trình bày node",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều dài hộp giới hạn là {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều rộng hộp giới hạn là {{width}}",
"missingInputForField": "thiếu đầu vào",
"missingFieldTemplate": "Thiếu vùng mẫu trình bày",
"collectionTooFewItems": "quá ít mục, tối thiểu là {{minItems}}",
@@ -1631,7 +1672,6 @@
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (giá trị chọn lọc tối thiểu)",
"collectionNumberGTExclusiveMax": "{{value}} >= {{exclusiveMaximum}} (giá trị chọn lọc tối đa)",
"batchNodeCollectionSizeMismatch": "Kích cỡ tài nguyên không phù hợp với Lô {{batchGroupId}}",
"emptyBatches": "lô trống",
"batchNodeNotConnected": "Node Hàng Loạt chưa được kết nối: {{label}}",
"batchNodeEmptyCollection": "Một vài node hàng loạt có tài nguyên rỗng",
"collectionEmpty": "tài nguyên trống",
@@ -1641,9 +1681,16 @@
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với LUX Kontext thông qua BFL API",
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn",
"emptyBatches": "lô trống",
"noStartingFrameImage": "Chưa có khung hình ảnh đầu",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều rộng hộp giới hạn là {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều cao hộp giới hạn là {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều rộng hộp giới hạn là {{width}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều cao hộp giới hạn là {{height}}",
"incompatibleLoRAs": "LoRA không tương thích bị thêm vào",
"videoIsDisabled": "Trình tạo sinh Video không được mở cho tài khoản {{accountType}}."
},
"cfgScale": "Thang CFG",
"useSeed": "Dùng Hạt Giống",
@@ -1690,7 +1737,6 @@
"useAll": "Dùng Tất Cả",
"useCpuNoise": "Dùng Độ Nhiễu CPU",
"remixImage": "Phối Lại Hình Ảnh",
"showOptionsPanel": "Hiển Thị Bảng Bên Cạnh (O hoặc T)",
"shuffle": "Xáo Trộn",
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (lớn quá)",
"cfgRescaleMultiplier": "Hệ Số Nhân Thang CFG",
@@ -1700,14 +1746,24 @@
"lockAspectRatio": "Khoá Tỉ Lệ",
"swapDimensions": "Hoán Đổi Kích Thước",
"copyImage": "Sao Chép Hình Ảnh",
"downloadImage": "Tải Xuống Hình Ảnh",
"imageFit": "Căn Chỉnh Ảnh Ban Đầu Thành Kích Thước Đầu Ra",
"info": "Thông Tin",
"usePrompt": "Dùng Lệnh",
"upscaling": "Upscale",
"tileSize": "Kích Thước Khối",
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp."
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp.",
"useClipSkip": "Dùng CLIP Skip",
"duration": "Thời Lượng",
"downloadImage": "Tải Xuống Hình Ảnh",
"images_withCount_other": "Hình Ảnh",
"videos_withCount_other": "Video",
"startingFrameImage": "Khung Hình Bắt Đầu",
"videoActions": "Hành Động Với Video",
"sendToVideo": "Gửi Vào Video",
"showOptionsPanel": "Hiển Thị Bảng Bên Cạnh (O hoặc T)",
"video": "Video",
"resolution": "Độ Phân Giải"
},
"dynamicPrompts": {
"seedBehaviour": {
@@ -1733,9 +1789,7 @@
"antialiasProgressImages": "Xử Lý Khử Răng Cưa Hình Ảnh",
"models": "Models",
"informationalPopoversDisabledDesc": "Hộp thoại hỗ trợ thông tin đã tắt. Bật lại trong Cài đặt.",
"modelDescriptionsDisabled": "Trình Mô Tả Model Bằng Hộp Thả Đã Tắt",
"enableModelDescriptions": "Bật Trình Mô Tả Model Bằng Hộp Thả",
"modelDescriptionsDisabledDesc": "Trình mô tả model bằng hộp thả đã tắt. Bật lại trong Cài đặt.",
"enableNSFWChecker": "Bật Trình Kiểm Tra NSFW",
"clearIntermediatesWithCount_other": "Dọn sạch {{count}} sản phẩm trung gian",
"reloadingIn": "Tải lại trong",
@@ -1758,7 +1812,9 @@
"intermediatesClearedFailed": "Có Vấn Đề Khi Dọn Sạch Sản Phẩm Trung Gian",
"enableInvisibleWatermark": "Bật Chế Độ Ẩn Watermark",
"showDetailedInvocationProgress": "Hiện Dữ Liệu Xử Lý",
"enableHighlightFocusedRegions": "Nhấn Mạnh Khu Vực Chỉ Định"
"enableHighlightFocusedRegions": "Nhấn Mạnh Khu Vực Chỉ Định",
"modelDescriptionsDisabled": "Trình Mô Tả Model Bằng Hộp Thả Đã Tắt",
"modelDescriptionsDisabledDesc": "Trình mô tả model bằng hộp thả đã tắt. Bật lại trong Cài đặt."
},
"sdxl": {
"loading": "Đang Tải...",
@@ -1768,15 +1824,15 @@
"refinermodel": "Model Refiner",
"refinerStart": "Bắt Đầu Refiner",
"denoisingStrength": "Sức Mạnh Khử Nhiễu",
"posStylePrompt": "Điểm Tích Cực Cho Lệnh Phong Cách",
"scheduler": "Scheduler",
"refiner": "Refiner",
"cfgScale": "Thang CFG",
"concatPromptStyle": "Liên Kết Lệnh & Phong Cách",
"freePromptStyle": "Viết Lệnh Thủ Công Cho Phong Cách",
"negStylePrompt": "Điểm Tiêu Cực Cho Lệnh Phong Cách",
"negAestheticScore": "Điểm Khác Tiêu Chuẩn",
"noModelsAvailable": "Không có sẵn model"
"noModelsAvailable": "Không có sẵn model",
"concatPromptStyle": "Liên Kết Lệnh & Phong Cách",
"freePromptStyle": "Viết Thủ Công Lệnh Phong Cách",
"negStylePrompt": "Điểm Tiêu Cực Cho Lệnh Phong Cách",
"posStylePrompt": "Điểm Tích Cực Cho Lệnh Phong Cách"
},
"controlLayers": {
"width": "Chiều Rộng",
@@ -1791,7 +1847,6 @@
"saveLayerToAssets": "Lưu Layer Vào Khu Tài Nguyên",
"canvas": "Canvas",
"savedToGalleryOk": "Đã Lưu Vào Thư Viện Ảnh",
"addGlobalReferenceImage": "Thêm $t(controlLayers.globalReferenceImage)",
"clipToBbox": "Chuyển Nét Thành Hộp Giới Hạn",
"moveToFront": "Chuyển Lên Trước",
"mergeVisible": "Gộp Layer Đang Hiển Thị",
@@ -1836,7 +1891,6 @@
"stylePrecise": "Phong Cách (Chính Xác)",
"stylePreciseDesc": "Áp dụng cách trình bày chính xác, loại bỏ các chủ thể ảnh hưởng."
},
"deletePrompt": "Xoá Lệnh",
"rasterLayer": "Layer Dạng Raster",
"disableAutoNegative": "Tắt Tự Động Đảo Chiều",
"controlLayer": "Layer Điều Khiển Được",
@@ -1847,8 +1901,6 @@
"replaceLayer": "Thay Đổi Layer",
"regionalGuidance": "Chỉ Dẫn Khu Vực",
"newCanvasFromImage": "Canvas Mới Từ Ảnh",
"rasterLayers_withCount_visible": "Layer Dạng Raster ({{count}})",
"regionalGuidance_withCount_visible": "Chỉ Dẫn Khu Vực ({{count}})",
"convertRasterLayerTo": "Chuyển Đổi $t(controlLayers.rasterLayer) Thành",
"convertControlLayerTo": "Chuyển Đổi $t(controlLayers.controlLayer) Thành",
"convertInpaintMaskTo": "Chuyển Đổi $t(controlLayers.inpaintMask) Thành",
@@ -1859,12 +1911,7 @@
"newRasterLayer": "$t(controlLayers.rasterLayer) Mới",
"enableAutoNegative": "Bật Tự Động Đảo Chiều",
"sendToCanvas": "Chuyển Tới Canvas",
"inpaintMasks_withCount_hidden": "Lớp Phủ Inpaint ({{count}} đang ẩn)",
"globalReferenceImages_withCount_visible": "Ảnh Mẫu Toàn Vùng ({{count}})",
"replaceCurrent": "Thay Đổi Cái Hiện Tại",
"controlLayers_withCount_visible": "Layer Điều Khiển Được ({{count}})",
"hidingType": "Ẩn {{type}}",
"newImg2ImgCanvasFromImage": "Chuyển Đổi Ảnh Sang Ảnh Mới Từ Ảnh",
"copyToClipboard": "Sao Chép Vào Clipboard",
"logDebugInfo": "Thông Tin Log Gỡ Lỗi",
"regionalReferenceImage": "Ảnh Mẫu Khu Vực",
@@ -1877,37 +1924,28 @@
"horizontal": "Đường Ngang",
"crosshatch": "Đường Chéo Song Song (Crosshatch)",
"vertical": "Đường Dọc",
"solid": "Chắc Chắn"
"solid": "Chắc Chắn",
"bgFillColor": "Màu Nền",
"fgFillColor": "Màu Nổi"
},
"addControlLayer": "Thêm $t(controlLayers.controlLayer)",
"inpaintMask": "Lớp Phủ Inpaint",
"dynamicGrid": "Lưới Dynamic",
"layer_other": "Layer",
"layer_withCount_other": "Layer ({{count}})",
"pullBboxIntoLayer": "Chuyển Hộp Giới Hạn Vào Layer",
"addInpaintMask": "Thêm $t(controlLayers.inpaintMask)",
"addRegionalGuidance": "Thêm $t(controlLayers.regionalGuidance)",
"sendToGallery": "Đã Chuyển Tới Thư Viện Ảnh",
"unlocked": "Mở Khoá",
"addReferenceImage": "Thêm $t(controlLayers.referenceImage)",
"sendingToCanvas": "Chuyển Ảnh Tạo Sinh Vào Canvas",
"sendingToGallery": "Chuyển Ảnh Tạo Sinh Vào Thư Viện Ảnh",
"viewProgressOnCanvas": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Canvas</Btn>.",
"inpaintMask_withCount_other": "Lớp Phủ Inpaint",
"regionalGuidance_withCount_other": "Chỉ Dẫn Khu Vực",
"controlLayers_withCount_hidden": "Layer Điều Khiển Được ({{count}} đang ẩn)",
"globalReferenceImages_withCount_hidden": "Ảnh Mẫu Toàn Vùng ({{count}} đang ẩn)",
"rasterLayer_withCount_other": "Layer Dạng Raster",
"globalReferenceImage_withCount_other": "Ảnh Mẫu Toàn Vùng",
"copyRasterLayerTo": "Sao Chép $t(controlLayers.rasterLayer) Tới",
"copyControlLayerTo": "Sao Chép $t(controlLayers.controlLayer) Tới",
"newRegionalGuidance": "$t(controlLayers.regionalGuidance) Mới",
"newGallerySessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến thư viện ảnh.",
"stagingOnCanvas": "Hiển thị hình ảnh lên",
"pullBboxIntoReferenceImage": "Chuyển Hộp Giới Hạn Vào Ảnh Mẫu",
"maskFill": "Lấp Đầy Lớp Phủ",
"addRasterLayer": "Thêm $t(controlLayers.rasterLayer)",
"rasterLayers_withCount_hidden": "Layer Dạng Raster ({{count}} đang ẩn)",
"referenceImage": "Ảnh Mẫu",
"showProgressOnCanvas": "Hiện Quá Trình Xử Lý Lên Canvas",
"prompt": "Lệnh",
@@ -1922,34 +1960,23 @@
},
"addPositivePrompt": "Thêm $t(controlLayers.prompt)",
"deleteReferenceImage": "Xoá Ảnh Mẫu",
"inpaintMasks_withCount_visible": "Lớp Phủ Inpaint ({{count}})",
"disableTransparencyEffect": "Tắt Hiệu Ứng Trong Suốt",
"newGallerySession": "Phiên Thư Viện Ảnh Mới",
"sendToGalleryDesc": "Bấm 'Kích Hoạt' sẽ tiến hành tạo sinh và lưu ảnh vào thư viện ảnh.",
"opacity": "Độ Mờ Đục",
"rectangle": "Hình Chữ Nhật",
"addNegativePrompt": "Thêm $t(controlLayers.negativePrompt)",
"globalReferenceImage": "Ảnh Mẫu Toàn Vùng",
"sendToCanvasDesc": "Bấm 'Kích Hoạt' sẽ hiển thị công việc đang xử lý của bạn lên canvas.",
"viewProgressInViewer": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Trình Xem Ảnh</Btn>.",
"regionalGuidance_withCount_hidden": "Chỉ Dẫn Khu Vực ({{count}} đang ẩn)",
"controlLayer_withCount_other": "Layer Điều Khiển Được",
"newInpaintMask": "$t(controlLayers.inpaintMask) Mới",
"locked": "Khoá",
"newCanvasSession": "Phiên Canvas Mới",
"transparency": "Độ Trong Suốt",
"showingType": "Hiển Thị {{type}}",
"newCanvasSessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến canvas.",
"selectObject": {
"help2": "Bắt đầu mới một điểm <Bold>Bao Gồm</Bold> trong đối tượng được chọn. Cho thêm điểm để tinh chế phần chọn. Ít điểm hơn thường mang lại kết quả tốt hơn.",
"invertSelection": "Đảo Ngược Phần Chọn",
"include": "Bao Gồm",
"exclude": "Loại Trừ",
"reset": "Làm Mới",
"saveAs": "Lưu Như",
"help1": "Chọn một đối tượng. Thêm điểm <Bold>Bao Gồm</Bold> và <Bold>Loại Trừ</Bold> để chỉ ra phần nào trong layer là đối tượng mong muốn.",
"dragToMove": "Kéo kiểm để di chuyển nó",
"help3": "Đảo ngược phần chọn để chọn mọi thứ trừ đối tượng được chọn.",
"clickToAdd": "Nhấp chuột vào layer để thêm điểm",
"clickToRemove": "Nhấp chuột vào một điểm để xoá",
"selectObject": "Chọn Đối Tượng",
@@ -2183,7 +2210,6 @@
"newSession": "Phiên Làm Việc Mới",
"resetGenerationSettings": "Khởi Động Lại Cài Đặt Tạo Sinh",
"referenceImageRegional": "Ảnh Mẫu (Khu Vực)",
"referenceImageGlobal": "Ảnh Mẫu (Toàn Vùng)",
"warnings": {
"problemsFound": "Phát hiện vấn đề",
"unsupportedModel": "layer không được hỗ trợ cho model cơ sở này",
@@ -2198,7 +2224,8 @@
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
"rgNoRegion": "không có khu vực được vẽ",
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill"
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill",
"bboxHidden": "Hộp giới hạn đang ẩn (shift+o để bật/tắt)"
},
"pasteTo": "Dán Vào",
"pasteToAssets": "Tài Nguyên",
@@ -2207,7 +2234,6 @@
"pasteToBboxDesc": "Layer Mới (Trong Hộp Giới Hạn)",
"pasteToCanvas": "Canvas",
"pasteToCanvasDesc": "Layer Mới (Trong Canvas)",
"pastedTo": "Dán Vào {{destination}}",
"regionCopiedToClipboard": "Sao Chép {{region}} Vào Clipboard",
"copyRegionError": "Lỗi khi sao chép {{region}}",
"errors": {
@@ -2227,7 +2253,6 @@
"denoiseLimit": "Giới Hạn Khử Nhiễu",
"addImageNoise": "Thêm $t(controlLayers.imageNoise)",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ thư viện ảnh vào Ảnh Mẫu này, hoặc <PullBboxButton>kéo hộp giới hạn vào Ảnh Mẫu này</PullBboxButton> để bắt đầu.",
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>.",
"exportCanvasToPSD": "Xuất Canvas Thành File PSD",
"ruleOfThirds": "Hiển Thị Quy Tắc Một Phần Ba",
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
@@ -2240,7 +2265,38 @@
"fitBboxToMasks": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
"invertMask": "Đảo Ngược Lớp Phủ",
"maxRefImages": "Ảnh Mẫu Tối Đa",
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu"
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu",
"deletePrompt": "Xoá Lệnh",
"addGlobalReferenceImage": "Thêm $t(controlLayers.globalReferenceImage)",
"referenceImageGlobal": "Ảnh Mẫu (Toàn Vùng)",
"sendingToCanvas": "Chuyển Ảnh Tạo Sinh Vào Canvas",
"sendingToGallery": "Chuyển Ảnh Tạo Sinh Vào Thư Viện Ảnh",
"sendToGallery": "Chuyển Tới Thư Viện Ảnh",
"sendToGalleryDesc": "Bấm 'Kích Hoạt' sẽ tiến hành tạo sinh và lưu ảnh vào thư viện ảnh.",
"newImg2ImgCanvasFromImage": "Chuyển Đổi Ảnh Sang Ảnh Mới Từ Ảnh",
"sendToCanvasDesc": "Bấm 'Kích Hoạt' sẽ hiển thị công việc đang xử lý của bạn lên canvas.",
"viewProgressInViewer": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Trình Xem Ảnh</Btn>.",
"viewProgressOnCanvas": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Canvas</Btn>.",
"globalReferenceImage_withCount_other": "$t(controlLayers.globalReferenceImage)",
"regionalGuidance_withCount_hidden": "Chỉ Dẫn Khu Vực ({{count}} đang ẩn)",
"controlLayers_withCount_hidden": "Layer Điều Khiển Được ({{count}} đang ẩn)",
"rasterLayers_withCount_hidden": "Layer Dạng Raster ({{count}} đang ẩn)",
"globalReferenceImages_withCount_hidden": "Ảnh Mẫu Toàn Vùng ({{count}} đang ẩn)",
"inpaintMasks_withCount_hidden": "Lớp Phủ Inpaint ({{count}} đang ẩn)",
"regionalGuidance_withCount_visible": "Chỉ Dẫn Khu Vực ({{count}})",
"controlLayers_withCount_visible": "Layer Điều Khiển Được ({{count}})",
"rasterLayers_withCount_visible": "Layer Dạng Raster ({{count}})",
"globalReferenceImages_withCount_visible": "Ảnh Mẫu Toàn Vùng ({{count}})",
"inpaintMasks_withCount_visible": "Lớp Phủ Inpaint ({{count}})",
"layer_withCount_other": "Layer ({{count}})",
"pastedTo": "Dán Vào {{destination}}",
"stagingOnCanvas": "Hiển thị hình ảnh lên",
"newGallerySession": "Phiên Thư Viện Ảnh Mới",
"newGallerySessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến thư viện ảnh.",
"newCanvasSession": "Phiên Canvas Mới",
"newCanvasSessionDesc": "Nó sẽ dọn sạch canvas và các thiết lập trừ model được chọn. Các ảnh được tạo sinh sẽ được chuyển đến canvas.",
"replaceCurrent": "Thay Đổi Cái Hiện Tại",
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>."
},
"stylePresets": {
"negativePrompt": "Lệnh Tiêu Cực",
@@ -2318,15 +2374,11 @@
"toast": {
"imageUploadFailed": "Tải Lên Ảnh Thất Bại",
"layerCopiedToClipboard": "Sao Chép Layer Vào Clipboard",
"uploadFailedInvalidUploadDesc_withCount_other": "Tối đa là {{count}} ảnh PNG, JPEG hoặc WEBP.",
"imageCopied": "Ảnh Đã Được Sao Chép",
"sentToUpscale": "Chuyển Vào Upscale",
"unableToLoadImage": "Không Thể Tải Hình Ảnh",
"unableToLoadStylePreset": "Không Thể Tải Phong Cách Được Cài Đặt Trước",
"stylePresetLoaded": "Phong Cách Được Cài Đặt Trước Đã Tải",
"imageNotLoadedDesc": "Không thể tìm thấy ảnh",
"imageSaved": "Ảnh Đã Lưu",
"imageSavingFailed": "Lưu Ảnh Thất Bại",
"unableToLoadImageMetadata": "Không Thể Tải Metadata Của Ảnh",
"workflowLoaded": "Workflow Đã Tải",
"uploadFailed": "Tải Lên Thất Bại",
@@ -2338,17 +2390,14 @@
"importFailed": "Nhập Vào Thất Bại",
"importSuccessful": "Nhập Vào Thành Công",
"workflowDeleted": "Workflow Đã Xoá",
"setControlImage": "Đặt làm ảnh điều khiển được",
"connected": "Kết Nối Đến Server",
"imageUploaded": "Ảnh Đã Được Tải Lên",
"invalidUpload": "Dữ Liệu Tải Lên Không Hợp Lệ",
"modelImportCanceled": "Nhập Vào Model Thất Bại",
"parameters": "Tham Số",
"parameterSet": "Gợi Lại Tham Số",
"parameterSetDesc": "Gợi lại {{parameter}}",
"loadedWithWarnings": "Đã Tải Workflow Với Cảnh Báo",
"outOfMemoryErrorDesc": "Thiết lập tạo sinh hiện tại đã vượt mức cho phép của thiết bị. Hãy điều chỉnh thiết lập và thử lại.",
"setNodeField": "Đặt làm vùng node",
"problemRetrievingWorkflow": "Có Vấn Đề Khi Lấy Lại Workflow",
"somethingWentWrong": "Có Vấn Đề Phát Sinh",
"problemDeletingWorkflow": "Có Vấn Đề Khi Xoá Workflow",
@@ -2358,13 +2407,12 @@
"errorCopied": "Lỗi Khi Sao Chép",
"prunedQueue": "Cắt Bớt Hàng Đợi",
"imagesWillBeAddedTo": "Ảnh đã tải lên sẽ được thêm vào tài nguyên của bảng {{boardName}}.",
"baseModelChangedCleared_other": "Dọn sạch hoặc tắt {{count}} model phụ không tương thích",
"baseModelChangedCleared_other": "Cập nhật, dọn sạch hoặc tắt {{count}} model phụ không tương thích",
"canceled": "Quá Trình Xử Lý Đã Huỷ",
"baseModelChanged": "Model Cơ Sở Đã Đổi",
"addedToUncategorized": "Thêm vào tài nguyên của bảng $t(boards.uncategorized)",
"linkCopied": "Đường Liên Kết Đã Được Sao Chép",
"outOfMemoryError": "Lỗi Vượt Quá Bộ Nhớ",
"layerSavedToAssets": "Lưu Layer Vào Khu Tài Nguyên",
"modelAddedSimple": "Đã Thêm Model Vào Hàng Đợi",
"parametersSet": "Tham Số Đã Được Gợi Lại",
"parameterNotSetDesc": "Không thể gợi lại {{parameter}}",
@@ -2385,21 +2433,15 @@
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext không hỗ trợ tạo sinh từ hình ảnh từ canvas. Thử sử dụng Ảnh Mẫu và tắt các Layer Dạng Raster.",
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
"noActiveRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
"noVisibleRasterLayers": "Không Có Layer Dạng Raster Hiển Thị",
"noVisibleRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
"invalidCanvasDimensions": "Kích Thước Canvas Không Phù Hợp",
"canvasTooLarge": "Canvas Quá Lớn",
"canvasTooLargeDesc": "Kích thước canvas vượt mức tối đa cho phép để xuất file PSD. Giảm cả chiều dài và chiều rộng chủa canvas và thử lại.",
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
"psdExportSuccess": "Xuất File PSD Hoàn Tất",
"psdExportSuccessDesc": "Thành công xuất {{count}} layer sang file PSD",
"problemExportingPSD": "Có Vấn Đề Khi Xuất File PSD",
"canvasManagerNotAvailable": "Trình Quản Lý Canvas Không Có Sẵn",
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
"promptGenerationStarted": "Trình tạo sinh lệnh khởi động",
"uploadAndPromptGenerationFailed": "Thất bại khi tải lên ảnh để tạo sinh lệnh",
"promptExpansionFailed": "Có vấn đề xảy ra. Hãy thử mở rộng lệnh lại.",
@@ -2407,6 +2449,20 @@
"maskInvertFailed": "Thất Bại Khi Đảo Ngược Lớp Phủ",
"noVisibleMasks": "Không Có Lớp Phủ Đang Hiển Thị",
"noVisibleMasksDesc": "Tạo hoặc bật ít nhất một lớp phủ inpaint để đảo ngược",
"imageNotLoadedDesc": "Không thể tìm thấy ảnh",
"imageSaved": "Ảnh Đã Lưu",
"imageSavingFailed": "Lưu Ảnh Thất Bại",
"invalidUpload": "Dữ Liệu Tải Lên Không Hợp Lệ",
"layerSavedToAssets": "Lưu Layer Vào Khu Tài Nguyên",
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
"noActiveRasterLayersDesc": "Bật ít nhất một layer dạng raster để xuất file PSD",
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
"setControlImage": "Đặt làm ảnh điều khiển được",
"setNodeField": "Đặt làm vùng node",
"uploadFailedInvalidUploadDesc_withCount_other": "Cần tối đa {{count}} ảnh PNG, JPEG, hoặc WEBP.",
"noInpaintMaskSelected": "Không Có Lớp Phủ Inpant Được Chọn",
"noInpaintMaskSelectedDesc": "Chọn một lớp phủ inpaint để đảo ngược",
"invalidBbox": "Hộp Giới Hạn Không Hợp Lệ",
@@ -2423,7 +2479,8 @@
"queue": "Queue (Hàng Đợi)",
"workflows": "Workflow (Luồng Làm Việc)",
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)",
"generate": "Tạo Sinh"
"generate": "Tạo Sinh",
"video": "Video"
},
"launchpad": {
"workflowsTitle": "Đi sâu hơn với Workflow.",
@@ -2501,13 +2558,23 @@
"generate": {
"canvasCalloutTitle": "Đang tìm cách để điều khiển, chỉnh sửa, và làm lại ảnh?",
"canvasCalloutLink": "Vào Canvas cho nhiều tính năng hơn."
},
"videoTitle": "Tạo sinh video từ lệnh chữ.",
"video": {
"startingFrameCalloutTitle": "Thêm Khung Hình Bắt Đầu",
"startingFrameCalloutDesc": "Thêm ảnh nhằm điều khiển khung hình đầu của video."
},
"addStartingFrame": {
"title": "Thêm Khung Hình Bắt Đầu",
"description": "Thêm ảnh nhằm điều khiển khung hình đầu của video."
}
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Trình Biên Tập Workflow",
"imageViewer": "Trình Xem Ảnh",
"canvas": "Canvas"
"imageViewer": "Trình Xem",
"canvas": "Canvas",
"video": "Video"
}
},
"workflows": {
@@ -2522,28 +2589,20 @@
"saveWorkflowAs": "Lưu Workflow Như",
"downloadWorkflow": "Lưu Vào Tệp",
"noWorkflows": "Không Có Workflow",
"problemLoading": "Có Vấn Đề Khi Tải Workflow",
"clearWorkflowSearchFilter": "Xoá Workflow Khỏi Bộ Lọc Tìm Kiếm",
"defaultWorkflows": "Workflow Mặc Định",
"userWorkflows": "Workflow Của Người Dùng",
"projectWorkflows": "Dự Án Workflow",
"savingWorkflow": "Đang Lưu Workflow...",
"ascending": "Tăng Dần",
"loading": "Đang Tải Workflow",
"chooseWorkflowFromLibrary": "Chọn Workflow Từ Thư Viện",
"workflows": "Workflow",
"copyShareLinkForWorkflow": "Sao Chép Liên Kết Chia Sẻ Cho Workflow",
"openWorkflow": "Mở Workflow",
"name": "Tên",
"unnamedWorkflow": "Workflow Vô Danh",
"saveWorkflow": "Lưu Workflow",
"problemSavingWorkflow": "Có Vấn Đề Khi Lưu Workflow",
"noDescription": "Không có mô tả",
"updated": "Đã Cập Nhật",
"uploadWorkflow": "Tải Từ Tệp",
"autoLayout": "Bố Trí Tự Động",
"loadWorkflow": "$t(common.load) Workflow",
"searchWorkflows": "Tìm Workflow",
"newWorkflowCreated": "Workflow Mới Được Tạo",
"workflowCleared": "Đã Dọn Dẹp Workflow",
"loadFromGraph": "Tải Workflow Từ Đồ Thị",
@@ -2554,7 +2613,6 @@
"opened": "Đã Mở",
"deleteWorkflow": "Xoá Workflow",
"workflowEditorMenu": "Menu Biên Tập Workflow",
"openLibrary": "Mở Thư Viện",
"builder": {
"resetAllNodeFields": "Tải Lại Các Vùng Node",
"builder": "Trình Tạo Vùng Nhập",
@@ -2570,7 +2628,6 @@
"multiLine": "Nhiều Dòng",
"slider": "Thanh Trượt",
"both": "Cả Hai",
"emptyRootPlaceholderViewMode": "Chọn Chỉnh Sửa để bắt đầu tạo nên một vùng nhập cho workflow này.",
"emptyRootPlaceholderEditMode": "Kéo thành phần vùng nhập hoặc vùng node vào đây để bắt đầu.",
"containerPlaceholder": "Hộp Chứa Trống",
"headingPlaceholder": "Đầu Dòng Trống",
@@ -2579,7 +2636,6 @@
"deleteAllElements": "Xóa Tất Cả Thành Phần",
"nodeField": "Vùng Node",
"nodeFieldTooltip": "Để thêm vùng node, bấm vào dấu cộng nhỏ trên vùng trong Trình Biên Tập Workflow, hoặc kéo vùng theo tên của nó vào vùng nhập.",
"workflowBuilderAlphaWarning": "Trình tạo vùng nhập đang trong giai đoạn alpha. Nó có thể xuất hiện những thay đổi đột ngột trước khi chính thức được phát hành.",
"container": "Hộp Chứa",
"heading": "Đầu Dòng",
"text": "Văn Bản",
@@ -2622,25 +2678,39 @@
"publishingValidationRunInProgress": "Quá trình kiểm tra tính hợp lệ đang diễn ra.",
"selectingOutputNodeDesc": "Bấm vào node để biến nó thành node đầu ra của workflow.",
"selectingOutputNode": "Chọn node đầu ra",
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata"
"errorWorkflowHasUnpublishableNodes": "Workflow có lô node, node sản sinh, hoặc node tách metadata",
"removeFromForm": "Xóa Khỏi Vùng Nhập",
"showShuffle": "Hiện Xáo Trộn",
"shuffle": "Xáo Trộn",
"emptyRootPlaceholderViewMode": "Chọn Chỉnh Sửa để bắt đầu tạo nên một vùng nhập cho workflow này.",
"workflowBuilderAlphaWarning": "Trình tạo vùng nhập đang trong giai đoạn alpha. Nó có thể xuất hiện những thay đổi đột ngột trước khi chính thức được phát hành."
},
"yourWorkflows": "Workflow Của Bạn",
"browseWorkflows": "Khám Phá Workflow",
"workflowThumbnail": "Ảnh Minh Họa Workflow",
"saveChanges": "Lưu Thay Đổi",
"allLoaded": "Đã Tải Tất Cả Workflow",
"shared": "Nhóm",
"searchPlaceholder": "Tìm theo tên, mô tả, hoặc nhãn",
"filterByTags": "Lọc Theo Nhãn",
"recentlyOpened": "Mở Gần Đây",
"private": "Cá Nhân",
"loadMore": "Tải Thêm",
"view": "Xem",
"deselectAll": "Huỷ Chọn Tất Cả",
"noRecentWorkflows": "Không Có Workflows Gần Đây",
"recommended": "Có Thể Bạn Sẽ Cần",
"emptyStringPlaceholder": "<xâu ký tự trống>",
"published": "Đã Đăng"
"published": "Đã Đăng",
"defaultWorkflows": "Workflow Mặc Định",
"userWorkflows": "Workflow Của Người Dùng",
"projectWorkflows": "Dự Án Workflow",
"allLoaded": "Đã Tải Tất Cả Workflow",
"filterByTags": "Lọc Theo Nhãn",
"noRecentWorkflows": "Không Có Workflows Gần Đây",
"openWorkflow": "Mở Workflow",
"problemLoading": "Có Vấn Đề Khi Tải Workflow",
"noDescription": "Không có mô tả",
"searchWorkflows": "Tìm Workflow",
"clearWorkflowSearchFilter": "Xoá Workflow Khỏi Bộ Lọc Tìm Kiếm",
"openLibrary": "Mở Thư Viện"
},
"upscaling": {
"missingUpscaleInitialImage": "Thiếu ảnh dùng để upscale",
@@ -2677,11 +2747,11 @@
"whatsNewInInvoke": "Có Gì Mới Ở Invoke",
"readReleaseNotes": "Đọc Ghi Chú Phát Hành",
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"Trạng thái Studio được lưu vào server, giúp bạn tiếp tục công việc ở mọi thiết bị.",
"Hỗ trợ nhiều ảnh mẫu cho FLUX KONTEXT (chỉ cho model trên máy)."
]
"Canvas: Chia tách màu nổi và màu nền - bật/tắt với 'x', khởi động lại về dạng đen trắng với 'd'",
"LoRA: Đặt khối lượng mặc định cho LoRA trong Trình Quản Lý Model"
],
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng"
},
"upsell": {
"professional": "Chuyên Nghiệp",
@@ -2709,5 +2779,12 @@
"clearSucceeded": "Cache Model Đã Được Dọn",
"clearFailed": "Có Vấn Đề Khi Dọn Cache Model",
"clear": "Dọn Cache Model"
},
"lora": {
"weight": "Trọng Lượng"
},
"video": {
"noVideoSelected": "Không có video được chọn",
"selectFromGallery": "Chọn một video trong thư viện để xem"
}
}

View File

@@ -25,7 +25,6 @@
"batch": "批次管理器",
"communityLabel": "社区",
"modelManager": "模型管理器",
"imageFailedToLoad": "无法加载图像",
"learnMore": "了解更多",
"advanced": "高级",
"t2iAdapter": "T2I Adapter",
@@ -51,23 +50,19 @@
"somethingWentWrong": "出了点问题",
"copyError": "$t(gallery.copy) 错误",
"input": "输入",
"notInstalled": "非 $t(common.installed)",
"delete": "删除",
"updated": "已上传",
"save": "保存",
"created": "已创建",
"prevPage": "上一页",
"unknownError": "未知错误",
"direction": "指向",
"orderBy": "排序方式:",
"nextPage": "下一页",
"saveAs": "保存为",
"ai": "ai",
"or": "或",
"aboutDesc": "使用 Invoke 工作?来看看:",
"add": "添加",
"copy": "复制",
"localSystem": "本地系统",
"aboutHeading": "掌握你的创造力",
"enabled": "已启用",
"disabled": "已禁用",
@@ -78,7 +73,6 @@
"selected": "选中的",
"green": "绿",
"blue": "蓝",
"goTo": "前往",
"dontShowMeThese": "请勿显示这些内容",
"beta": "测试版",
"toResolve": "解决",
@@ -104,13 +98,11 @@
"galleryImageSize": "预览大小",
"gallerySettings": "预览设置",
"autoSwitchNewImages": "自动切换到新图像",
"noImagesInGallery": "无图像可用于显示",
"deleteImage_other": "删除{{count}}张图片",
"deleteImagePermanent": "删除的图片无法被恢复。",
"autoAssignBoardOnClick": "点击后自动分配面板",
"featuresWillReset": "如果您删除该图像,这些功能会立即被重置。",
"loading": "加载中",
"unableToLoad": "无法加载图库",
"currentlyInUse": "该图像目前在以下功能中使用:",
"copy": "复制",
"download": "下载",
@@ -125,7 +117,6 @@
"starImage": "收藏图像",
"alwaysShowImageSizeBadge": "始终显示图像尺寸",
"selectForCompare": "选择以比较",
"selectAnImageToCompare": "选择一个图像进行比较",
"slider": "滑块",
"sideBySide": "并排",
"bulkDownloadFailed": "下载失败",
@@ -148,7 +139,6 @@
"newestFirst": "最新在前",
"compareHelp4": "按 <Kbd>Z</Kbd>或 <Kbd>Esc</Kbd> 键退出。",
"searchImages": "按元数据搜索",
"jump": "跳过",
"compareHelp2": "按 <Kbd>M</Kbd> 键切换不同的比较模式。",
"displayBoardSearch": "板块搜索",
"displaySearch": "图像搜索",
@@ -161,8 +151,6 @@
"gallery": "画廊",
"move": "移动",
"imagesTab": "您在Invoke中创建和保存的图片。",
"openViewer": "打开查看器",
"closeViewer": "关闭查看器",
"assetsTab": "您已上传用于项目的文件。"
},
"hotkeys": {
@@ -310,10 +298,6 @@
"title": "移动工具",
"desc": "选择移动工具。"
},
"setFillToWhite": {
"title": "将颜色设置为白色",
"desc": "将当前工具的颜色设置为白色。"
},
"cancelTransform": {
"desc": "取消待处理的变换。",
"title": "取消变换"
@@ -577,9 +561,7 @@
"huggingFacePlaceholder": "所有者或模型名称",
"huggingFaceRepoID": "HuggingFace仓库ID",
"loraTriggerPhrases": "LoRA 触发词",
"ipAdapters": "IP适配器",
"spandrelImageToImage": "图生图(Spandrel)",
"starterModelsInModelManager": "您可以在模型管理器中找到初始模型",
"noDefaultSettings": "此模型没有配置默认设置。请访问模型管理器添加默认设置。",
"clipEmbed": "CLIP 嵌入",
"defaultSettingsOutOfSync": "某些设置与模型的默认值不匹配:",
@@ -630,12 +612,10 @@
"scaledHeight": "缩放长度",
"infillMethod": "填充方法",
"tileSize": "方格尺寸",
"downloadImage": "下载图像",
"usePrompt": "使用提示",
"useSeed": "使用种子",
"useAll": "使用所有参数",
"info": "信息",
"showOptionsPanel": "显示侧栏浮窗 (O 或 T)",
"seamlessYAxis": "无缝平铺 Y 轴",
"seamlessXAxis": "无缝平铺 X 轴",
"denoisingStrength": "去噪强度",
@@ -661,15 +641,11 @@
"addingImagesTo": "添加图像到",
"noPrompts": "没有已生成的提示词",
"canvasIsFiltering": "画布正在过滤",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",
"noFLUXVAEModelSelected": "未为FLUX生成选择VAE模型",
"canvasIsRasterizing": "画布正在栅格化",
"canvasIsCompositing": "画布正在合成",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框宽度为 {{width}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框宽度为 {{width}}",
"noT5EncoderModelSelected": "未为FLUX生成选择T5编码器模型",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框高度为 {{height}}",
"canvasIsTransforming": "画布正在变换"
},
"patchmatchDownScaleSize": "缩小",
@@ -733,8 +709,6 @@
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用.",
"enableModelDescriptions": "在下拉菜单中启用模型描述",
"confirmOnNewSession": "新会话时确认",
"modelDescriptionsDisabledDesc": "下拉菜单中的模型描述已被禁用。可在设置中启用。",
"modelDescriptionsDisabled": "下拉菜单中的模型描述已禁用",
"showDetailedInvocationProgress": "显示进度详情"
},
"toast": {
@@ -750,14 +724,11 @@
"problemCopyingImage": "无法复制图像",
"modelAddedSimple": "模型已加入队列",
"loadedWithWarnings": "已加载带有警告的工作流",
"setControlImage": "设为控制图像",
"setNodeField": "设为节点字段",
"imageUploaded": "图像已上传",
"addedToBoard": "添加到{{name}}的资产中",
"workflowLoaded": "工作流已加载",
"imageUploadFailed": "图像上传失败",
"baseModelChangedCleared_other": "已清除或禁用{{count}}个不兼容的子模型",
"invalidUpload": "无效的上传",
"problemDeletingWorkflow": "删除工作流时出现问题",
"workflowDeleted": "已删除工作流",
"problemRetrievingWorkflow": "检索工作流时发生问题",
@@ -777,21 +748,16 @@
"modelImportCanceled": "模型导入已取消",
"importFailed": "导入失败",
"importSuccessful": "导入成功",
"layerSavedToAssets": "图层已保存到资产",
"sentToUpscale": "已发送到放大处理",
"addedToUncategorized": "已添加到看板 $t(boards.uncategorized) 的资产中",
"linkCopied": "链接已复制",
"uploadFailedInvalidUploadDesc_withCount_other": "最多只能上传 {{count}} 张 PNG 或 JPEG 图像。",
"problemSavingLayer": "无法保存图层",
"unableToLoadImage": "无法加载图像",
"imageNotLoadedDesc": "无法找到图像",
"unableToLoadStylePreset": "无法加载样式预设",
"stylePresetLoaded": "样式预设已加载",
"problemCopyingLayer": "无法复制图层",
"sentToCanvas": "已发送到画布",
"unableToLoadImageMetadata": "无法加载图像元数据",
"imageSaved": "图像已保存",
"imageSavingFailed": "图像保存失败",
"layerCopiedToClipboard": "图层已复制到剪贴板",
"imagesWillBeAddedTo": "上传的图像将添加到看板 {{boardName}} 的资产中。"
},
@@ -819,11 +785,8 @@
"fitViewportNodes": "自适应视图",
"showMinimapnodes": "显示缩略图",
"hideMinimapnodes": "隐藏缩略图",
"showLegendNodes": "显示字段类型图例",
"hideLegendNodes": "隐藏字段类型图例",
"downloadWorkflow": "下载工作流 JSON",
"workflowDescription": "简述",
"versionUnknown": " 未知版本",
"noNodeSelected": "无选中的节点",
"addNode": "添加节点",
"unableToValidateWorkflow": "无法验证工作流",
@@ -833,9 +796,7 @@
"workflowContact": "联系",
"animatedEdges": "边缘动效",
"nodeTemplate": "节点模板",
"unableToLoadWorkflow": "无法加载工作流",
"snapToGrid": "对齐网格",
"noFieldsLinearview": "线性视图中未添加任何字段",
"nodeSearch": "检索节点",
"version": "版本",
"validateConnections": "验证连接和节点图",
@@ -850,8 +811,6 @@
"fieldTypesMustMatch": "类型必须匹配",
"workflow": "工作流",
"animatedEdgesHelp": "为选中边缘和其连接的选中节点的边缘添加动画",
"unknownTemplate": "未知模板",
"removeLinearView": "从线性视图中移除",
"workflowTags": "标签",
"fullyContainNodesHelp": "节点必须完全位于选择框中才能被选中",
"workflowValidation": "工作流验证错误",
@@ -885,7 +844,6 @@
"node": "节点",
"collection": "合集",
"string": "字符串",
"mismatchedVersion": "无效的节点:类型为 {{type}} 的节点 {{node}} 版本不匹配(是否尝试更新?)",
"cannotDuplicateConnection": "无法创建重复的连接",
"enum": "Enum (枚举)",
"float": "浮点",
@@ -896,7 +854,6 @@
"unableToUpdateNodes_other": "{{count}} 个节点无法完成更新",
"inputFieldTypeParseError": "无法解析 {{node}} 的输入类型 {{field}}。({{message}})",
"unsupportedArrayItemType": "不支持的数组类型 \"{{type}}\"",
"addLinearView": "添加到线性视图",
"targetNodeFieldDoesNotExist": "无效的边缘:{{node}} 的目标/输入区域 {{field}} 不存在",
"unsupportedMismatchedUnion": "合集或标量类型与基类 {{firstType}} 和 {{secondType}} 不匹配",
"allNodesUpdated": "已更新所有节点",
@@ -916,7 +873,6 @@
"collectionOrScalarFieldType": "{{name}} (单一项目或项目集合)",
"nodeVersion": "节点版本",
"deletedInvalidEdge": "已删除无效的边缘 {{source}} -> {{target}}",
"unknownInput": "未知输入:{{name}}",
"prototypeDesc": "此调用是一个原型 (prototype)。它可能会在本项目更新期间发生破坏性更改,并且随时可能被删除。",
"betaDesc": "此调用尚处于测试阶段。在稳定之前,它可能会在项目更新期间发生破坏性更改。本项目计划长期支持这种调用。",
"newWorkflow": "新建工作流",
@@ -928,7 +884,6 @@
"missingNode": "缺少调用节点",
"missingInvocationTemplate": "缺少调用模版",
"noFieldsViewMode": "此工作流程未选择任何要显示的字段.请查看完整工作流程以进行配置.",
"reorderLinearView": "调整线性视图顺序",
"viewMode": "在线性视图中使用",
"showEdgeLabelsHelp": "在边缘上显示标签,指示连接的节点",
"cannotMixAndMatchCollectionItemTypes": "集合项目类型不能混用",
@@ -1002,7 +957,6 @@
"session": "会话",
"enqueueing": "队列中的批次",
"graphFailedToQueue": "节点图加入队列失败",
"batchFieldValues": "批处理值",
"time": "时间",
"openQueue": "打开队列",
"prompts_other": "提示词",
@@ -1021,18 +975,14 @@
"refinerStart": "Refiner 开始作用时机",
"scheduler": "调度器",
"cfgScale": "CFG 等级",
"negStylePrompt": "负向样式提示词",
"noModelsAvailable": "无可用模型",
"negAestheticScore": "负向美学评分",
"denoisingStrength": "去噪强度",
"refinermodel": "Refiner 模型",
"posAestheticScore": "正向美学评分",
"concatPromptStyle": "链接提示词 & 样式",
"loading": "加载中...",
"steps": "步数",
"posStylePrompt": "正向样式提示词",
"refiner": "Refiner",
"freePromptStyle": "手动输入样式提示词",
"refinerSteps": "精炼步数"
},
"metadata": {
@@ -1059,8 +1009,6 @@
"vae": "VAE",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
"allPrompts": "所有提示",
"parsingFailed": "解析失败",
"recallParameter": "调用{{label}}",
"imageDimensions": "图像尺寸",
"parameterSet": "已设置参数{{parameter}}",
"guidance": "指导",
@@ -1071,11 +1019,9 @@
"models": {
"noMatchingModels": "无相匹配的模型",
"loading": "加载中",
"noMatchingLoRAs": "无相匹配的 LoRA",
"noModelsAvailable": "无可用模型",
"selectModel": "选择一个模型",
"noRefinerModelsInstalled": "无已安装的 SDXL Refiner 模型",
"noLoRAsInstalled": "无已安装的 LoRA",
"addLora": "添加 LoRA",
"lora": "LoRA",
"defaultVAE": "默认 VAE",
@@ -1104,10 +1050,8 @@
"deletedBoardsCannotbeRestored": "删除的面板无法恢复。选择“仅删除面板”选项后,相关图片将会被移至未分类区域。",
"movingImagesToBoard_other": "移动 {{count}} 张图像到面板:",
"selectedForAutoAdd": "已选中自动添加",
"hideBoards": "隐藏面板",
"noBoards": "没有{{boardType}}类型的面板",
"unarchiveBoard": "恢复面板",
"viewBoards": "查看面板",
"addPrivateBoard": "创建私密面板",
"addSharedBoard": "创建共享面板",
"boards": "面板",
@@ -1576,8 +1520,6 @@
"useCache": "使用缓存"
},
"hrf": {
"enableHrf": "启用高分辨率修复",
"upscaleMethod": "放大方法",
"metadata": {
"strength": "高分辨率修复强度",
"enabled": "高分辨率修复已启用",
@@ -1590,20 +1532,15 @@
"workflowEditorMenu": "工作流编辑器菜单",
"workflowName": "工作流名称",
"saveWorkflow": "保存工作流",
"openWorkflow": "打开工作流",
"clearWorkflowSearchFilter": "清除工作流检索过滤器",
"workflowLibrary": "工作流库",
"downloadWorkflow": "保存到文件",
"workflowSaved": "已保存工作流",
"unnamedWorkflow": "未命名的工作流",
"savingWorkflow": "保存工作流中...",
"problemLoading": "加载工作流时出现问题",
"loading": "加载工作流中",
"searchWorkflows": "检索工作流",
"problemSavingWorkflow": "保存工作流时出现问题",
"deleteWorkflow": "删除工作流",
"workflows": "工作流",
"noDescription": "无描述",
"uploadWorkflow": "从文件中加载",
"newWorkflowCreated": "已创建新的工作流",
"name": "名称",
@@ -1623,9 +1560,6 @@
"copyShareLinkForWorkflow": "复制工作流程的分享链接",
"delete": "删除",
"download": "下载",
"defaultWorkflows": "默认工作流程",
"userWorkflows": "用户工作流程",
"projectWorkflows": "项目工作流程",
"copyShareLink": "复制分享链接",
"chooseWorkflowFromLibrary": "从库中选择工作流程",
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
@@ -1663,7 +1597,6 @@
"moveToBack": "移动到后面",
"moveToFront": "移动到前面",
"addLayer": "添加层",
"deletePrompt": "删除提示词",
"addPositivePrompt": "添加 $t(controlLayers.prompt)",
"addNegativePrompt": "添加 $t(controlLayers.negativePrompt)",
"rectangle": "矩形",
@@ -1687,7 +1620,6 @@
"maskFill": "遮罩填充",
"newCanvasFromImage": "从图像创建新画布",
"pullBboxIntoReferenceImageOk": "边界框已导入到参考图像",
"globalReferenceImage_withCount_other": "全局参考图像",
"addInpaintMask": "添加 $t(controlLayers.inpaintMask)",
"referenceImage": "参考图像",
"globalReferenceImage": "全局参考图像",
@@ -1696,14 +1628,10 @@
"copyRasterLayerTo": "复制 $t(controlLayers.rasterLayer) 到",
"clearHistory": "清除历史记录",
"inpaintMask": "修复遮罩",
"regionalGuidance_withCount_visible": "区域引导({{count}} 个)",
"inpaintMasks_withCount_hidden": "修复遮罩({{count}} 个已隐藏)",
"enableAutoNegative": "启用自动负面提示",
"disableAutoNegative": "禁用自动负面提示",
"deleteReferenceImage": "删除参考图像",
"sendToCanvas": "发送到画布",
"controlLayers_withCount_visible": "控制图层({{count}} 个)",
"rasterLayers_withCount_visible": "栅格图层({{count}} 个)",
"convertRegionalGuidanceTo": "将 $t(controlLayers.regionalGuidance) 转换为",
"newInpaintMask": "新建 $t(controlLayers.inpaintMask)",
"regionIsEmpty": "选定区域为空",
@@ -1715,14 +1643,12 @@
"addRasterLayer": "添加 $t(controlLayers.rasterLayer)",
"newRasterLayerOk": "已创建栅格层",
"newRasterLayerError": "创建栅格层时出现问题",
"inpaintMasks_withCount_visible": "修复遮罩({{count}} 个)",
"convertRasterLayerTo": "将 $t(controlLayers.rasterLayer) 转换为",
"copyControlLayerTo": "复制 $t(controlLayers.controlLayer) 到",
"copyInpaintMaskTo": "复制 $t(controlLayers.inpaintMask) 到",
"copyRegionalGuidanceTo": "复制 $t(controlLayers.regionalGuidance) 到",
"newRasterLayer": "新建 $t(controlLayers.rasterLayer)",
"newControlLayer": "新建 $t(controlLayers.controlLayer)",
"newImg2ImgCanvasFromImage": "从图像创建新的图生图",
"rasterLayer": "栅格层",
"controlLayer": "控制层",
"outputOnlyMaskedRegions": "仅输出生成的区域",
@@ -1735,36 +1661,22 @@
"bboxOverlay": "显示边界框覆盖层",
"clipToBbox": "将Clip限制到边界框",
"width": "宽度",
"addGlobalReferenceImage": "添加 $t(controlLayers.globalReferenceImage)",
"inpaintMask_withCount_other": "修复遮罩",
"regionalGuidance_withCount_other": "区域引导",
"newRegionalReferenceImageError": "创建局部参考图像时出现问题",
"pullBboxIntoLayerError": "将边界框导入图层时出现问题",
"pullBboxIntoLayerOk": "边界框已导入到图层",
"sendToCanvasDesc": "按下“Invoke”按钮会将您的工作进度暂存到画布上。",
"sendToGallery": "发送到图库",
"sendToGalleryDesc": "按下“Invoke”键会生成并保存一张唯一的图像到您的图库中。",
"rasterLayer_withCount_other": "栅格图层",
"mergeDown": "向下合并",
"clearCaches": "清除缓存",
"recalculateRects": "重新计算矩形",
"duplicate": "复制",
"regionalGuidance_withCount_hidden": "区域引导({{count}} 个已隐藏)",
"convertControlLayerTo": "将 $t(controlLayers.controlLayer) 转换为",
"convertInpaintMaskTo": "将 $t(controlLayers.inpaintMask) 转换为",
"viewProgressInViewer": "在 <Btn>图像查看器</Btn> 中查看进度和输出结果。",
"viewProgressOnCanvas": "在 <Btn>画布</Btn> 上查看进度和暂存的输出内容。",
"sendingToGallery": "将生成内容发送到图库",
"copyToClipboard": "复制到剪贴板",
"controlLayer_withCount_other": "控制图层",
"sendingToCanvas": "在画布上准备生成",
"addReferenceImage": "添加 $t(controlLayers.referenceImage)",
"addRegionalGuidance": "添加 $t(controlLayers.regionalGuidance)",
"controlLayers_withCount_hidden": "控制图层({{count}} 个已隐藏)",
"rasterLayers_withCount_hidden": "栅格图层({{count}} 个已隐藏)",
"globalReferenceImages_withCount_hidden": "全局参考图像({{count}} 个已隐藏)",
"globalReferenceImages_withCount_visible": "全局参考图像({{count}} 个)",
"layer_withCount_other": "图层({{count}} 个)",
"enableTransparencyEffect": "启用透明效果",
"disableTransparencyEffect": "禁用透明效果",
"hidingType": "隐藏 {{type}}",

View File

@@ -19,7 +19,6 @@
"folder": "資料夾",
"installed": "已安裝",
"accept": "接受",
"goTo": "前往",
"input": "輸入",
"random": "隨機",
"selected": "已選擇",
@@ -29,8 +28,7 @@
"copy": "複製",
"error": "錯誤",
"file": "檔案",
"format": "格式",
"imageFailedToLoad": "無法載入圖片"
"format": "格式"
},
"accessibility": {
"invokeProgressBar": "Invoke 進度條",
@@ -179,8 +177,7 @@
"workflowAuthor": "作者",
"version": "版本",
"executionStateCompleted": "已完成",
"edge": "邊緣",
"versionUnknown": " 版本未知"
"edge": "邊緣"
},
"sdxl": {
"steps": "步數",

View File

@@ -2,12 +2,12 @@ import { useAppSelector } from 'app/store/storeHooks';
import { useIsRegionFocused } from 'common/hooks/focus';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { useLoadWorkflow } from 'features/gallery/hooks/useLoadWorkflow';
import { useRecallAll } from 'features/gallery/hooks/useRecallAll';
import { useRecallAll } from 'features/gallery/hooks/useRecallAllImageMetadata';
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { memo } from 'react';
import { useImageDTO } from 'services/api/endpoints/images';
@@ -15,8 +15,8 @@ import type { ImageDTO } from 'services/api/types';
export const GlobalImageHotkeys = memo(() => {
useAssertSingleton('GlobalImageHotkeys');
const imageName = useAppSelector(selectLastSelectedImage);
const imageDTO = useImageDTO(imageName);
const lastSelectedItem = useAppSelector(selectLastSelectedItem);
const imageDTO = useImageDTO(lastSelectedItem?.type === 'image' ? lastSelectedItem.id : null);
if (!imageDTO) {
return null;

View File

@@ -2,11 +2,14 @@ import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { CropImageModal } from 'features/cropper/components/CropImageModal';
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
import { DeleteVideoModal } from 'features/deleteVideoModal/components/DeleteVideoModal';
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
import { ImageContextMenu } from 'features/gallery/components/ContextMenu/ImageContextMenu';
import { VideoContextMenu } from 'features/gallery/components/ContextMenu/VideoContextMenu';
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/ShareWorkflowModal';
import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal';
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
@@ -31,6 +34,7 @@ export const GlobalModalIsolator = memo(() => {
return (
<>
<DeleteImageModal />
<DeleteVideoModal />
<ChangeBoardModal />
<DynamicPromptsModal />
<StylePresetModal />
@@ -47,6 +51,7 @@ export const GlobalModalIsolator = memo(() => {
<DeleteBoardModal />
<GlobalImageHotkeys />
<ImageContextMenu />
<VideoContextMenu />
<FullscreenDropzone />
<VideosModal />
<SaveWorkflowAsDialog />
@@ -54,6 +59,7 @@ export const GlobalModalIsolator = memo(() => {
<CanvasPasteModal />
</CanvasManagerProviderGate>
<LoadWorkflowFromGraphModal />
<CropImageModal />
</>
);
});

View File

@@ -1,16 +1,14 @@
import 'i18n';
import type { Middleware } from '@reduxjs/toolkit';
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import type { InvokeAIUIProps } from 'app/components/types';
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
import type { LoggingOverrides } from 'app/logging/logger';
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver';
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
import { $accountTypeText } from 'app/store/nanostores/accountTypeText';
import { $authToken } from 'app/store/nanostores/authToken';
import { $baseUrl } from 'app/store/nanostores/baseUrl';
import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
import type { CustomStarUi } from 'app/store/nanostores/customStarUI';
import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { $isDebugging } from 'app/store/nanostores/isDebugging';
import { $logo } from 'app/store/nanostores/logo';
@@ -20,11 +18,10 @@ import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/proj
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
import { $store } from 'app/store/nanostores/store';
import { $toastMap } from 'app/store/nanostores/toastMap';
import { $videoUpsellComponent } from 'app/store/nanostores/videoUpsellComponent';
import { $whatsNew } from 'app/store/nanostores/whatsNew';
import { createStore } from 'app/store/store';
import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
import {
$workflowLibraryCategoriesOptions,
$workflowLibrarySortOptions,
@@ -33,47 +30,13 @@ import {
DEFAULT_WORKFLOW_LIBRARY_SORT_OPTIONS,
DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES,
} from 'features/nodes/store/workflowLibrarySlice';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import type { ToastConfig } from 'features/toast/toast';
import type { PropsWithChildren, ReactNode } from 'react';
import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react';
import { Provider } from 'react-redux';
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
import { $socketOptions } from 'services/events/stores';
import type { ManagerOptions, SocketOptions } from 'socket.io-client';
const App = lazy(() => import('./App'));
interface Props extends PropsWithChildren {
apiUrl?: string;
openAPISchemaUrl?: string;
token?: string;
config?: PartialAppConfig;
customNavComponent?: ReactNode;
accountSettingsLink?: string;
middleware?: Middleware[];
projectId?: string;
projectName?: string;
projectUrl?: string;
queueId?: string;
studioInitAction?: StudioInitAction;
customStarUi?: CustomStarUi;
socketOptions?: Partial<ManagerOptions & SocketOptions>;
isDebugging?: boolean;
logo?: ReactNode;
toastMap?: Record<string, ToastConfig>;
whatsNew?: ReactNode[];
workflowCategories?: WorkflowCategory[];
workflowTagCategories?: WorkflowTagCategory[];
workflowSortOptions?: WorkflowSortOption[];
loggingOverrides?: LoggingOverrides;
/**
* If provided, overrides in-app navigation to the model manager
*/
onClickGoToModelManager?: () => void;
storagePersistDebounce?: number;
}
const InvokeAIUI = ({
apiUrl,
openAPISchemaUrl,
@@ -92,14 +55,16 @@ const InvokeAIUI = ({
isDebugging = false,
logo,
toastMap,
accountTypeText,
videoUpsellComponent,
workflowCategories,
workflowTagCategories,
workflowSortOptions,
loggingOverrides,
onClickGoToModelManager,
whatsNew,
storagePersistDebounce = 2000,
}: Props) => {
storagePersistDebounce = 300,
}: InvokeAIUIProps) => {
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
const [didRehydrate, setDidRehydrate] = useState(false);
@@ -180,6 +145,26 @@ const InvokeAIUI = ({
};
}, [customStarUi]);
useEffect(() => {
if (accountTypeText) {
$accountTypeText.set(accountTypeText);
}
return () => {
$accountTypeText.set('');
};
}, [accountTypeText]);
useEffect(() => {
if (videoUpsellComponent) {
$videoUpsellComponent.set(videoUpsellComponent);
}
return () => {
$videoUpsellComponent.set(undefined);
};
}, [videoUpsellComponent]);
useEffect(() => {
if (customNavComponent) {
$customNavComponent.set(customNavComponent);

View File

@@ -0,0 +1,43 @@
import type { Middleware } from '@reduxjs/toolkit';
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import type { LoggingOverrides } from 'app/logging/logger';
import type { CustomStarUi } from 'app/store/nanostores/customStarUI';
import type { PartialAppConfig } from 'app/types/invokeai';
import type { SocketOptions } from 'dgram';
import type { WorkflowSortOption, WorkflowTagCategory } from 'features/nodes/store/workflowLibrarySlice';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import type { ToastConfig } from 'features/toast/toast';
import type { PropsWithChildren, ReactNode } from 'react';
import type { ManagerOptions } from 'socket.io-client';
export interface InvokeAIUIProps extends PropsWithChildren {
apiUrl?: string;
openAPISchemaUrl?: string;
token?: string;
config?: PartialAppConfig;
customNavComponent?: ReactNode;
accountSettingsLink?: string;
middleware?: Middleware[];
projectId?: string;
projectName?: string;
projectUrl?: string;
queueId?: string;
studioInitAction?: StudioInitAction;
customStarUi?: CustomStarUi;
socketOptions?: Partial<ManagerOptions & SocketOptions>;
isDebugging?: boolean;
logo?: ReactNode;
toastMap?: Record<string, ToastConfig>;
accountTypeText?: string;
videoUpsellComponent?: ReactNode;
whatsNew?: ReactNode[];
workflowCategories?: WorkflowCategory[];
workflowTagCategories?: WorkflowTagCategory[];
workflowSortOptions?: WorkflowSortOption[];
loggingOverrides?: LoggingOverrides;
/**
* If provided, overrides in-app navigation to the model manager
*/
onClickGoToModelManager?: () => void;
storagePersistDebounce?: number;
}

View File

@@ -4,7 +4,6 @@ import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { withResultAsync } from 'common/util/result';
import { canvasReset } from 'features/controlLayers/store/actions';
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import { sentImageToCanvas } from 'features/gallery/store/actions';
@@ -42,6 +41,7 @@ type StudioDestinationAction = _StudioInitAction<
| 'canvas'
| 'workflows'
| 'upscaling'
| 'video'
| 'viewAllWorkflows'
| 'viewAllWorkflowsRecommended'
| 'viewAllStylePresets';
@@ -118,7 +118,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const metadata = getImageMetadataResult.value;
store.dispatch(canvasReset());
// This shows a toast
await MetadataUtils.recallAll(metadata, store);
await MetadataUtils.recallAllImageMetadata(metadata, store);
},
[store, t]
);
@@ -163,7 +163,6 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
case 'generation':
// Go to the generate tab, open the launchpad
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
store.dispatch(paramsReset());
break;
case 'canvas':
// Go to the canvas tab, open the launchpad
@@ -177,6 +176,10 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
// Go to the upscaling tab
navigationApi.switchToTab('upscaling');
break;
case 'video':
// Go to the video tab
await navigationApi.focusPanel('video', LAUNCHPAD_PANEL_ID);
break;
case 'viewAllWorkflows':
// Go to the workflows tab and open the workflow library modal
navigationApi.switchToTab('workflows');

View File

@@ -26,6 +26,7 @@ export const zLogNamespace = z.enum([
'system',
'queue',
'workflows',
'video',
]);
export type LogNamespace = z.infer<typeof zLogNamespace>;

View File

@@ -1,7 +1,7 @@
import { createAction } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/store';
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
import { imageSelected } from 'features/gallery/store/gallerySlice';
import { selectLastSelectedItem } from 'features/gallery/store/gallerySelectors';
import { itemSelected } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
export const appStarted = createAction('app/appStarted');
@@ -18,11 +18,13 @@ export const addAppStartedListener = (startAppListening: AppStartListening) => {
const firstImageLoad = await take(imagesApi.endpoints.getImageNames.matchFulfilled);
if (firstImageLoad !== null) {
const [{ payload }] = firstImageLoad;
const selectedImage = selectLastSelectedImage(getState());
const selectedImage = selectLastSelectedItem(getState());
if (selectedImage) {
return;
}
dispatch(imageSelected(payload.image_names.at(0) ?? null));
if (payload.image_names[0]) {
dispatch(itemSelected({ type: 'image', id: payload.image_names[0] }));
}
}
},
});

View File

@@ -1,8 +1,14 @@
import { isAnyOf } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/store';
import { selectGetImageNamesQueryArgs, selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
import {
selectGalleryView,
selectGetImageNamesQueryArgs,
selectGetVideoIdsQueryArgs,
selectSelectedBoardId,
} from 'features/gallery/store/gallerySelectors';
import { boardIdSelected, galleryViewChanged, itemSelected } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
import { videosApi } from 'services/api/endpoints/videos';
export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => {
startAppListening({
@@ -11,35 +17,65 @@ export const addBoardIdSelectedListener = (startAppListening: AppStartListening)
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
cancelActiveListeners();
if (boardIdSelected.match(action) && action.payload.selectedImageName) {
// This action already has a selected image name, we trust it is valid
if (boardIdSelected.match(action) && action.payload.select) {
// This action already has a resource selection - skip the below auto-selection logic
return;
}
const state = getState();
const board_id = selectSelectedBoardId(state);
const view = selectGalleryView(state);
const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id };
if (view === 'images' || view === 'assets') {
const queryArgs = { ...selectGetImageNamesQueryArgs(state), board_id };
// wait until the board has some images - maybe it already has some from a previous fetch
// must use getState() to ensure we do not have stale state
const isSuccess = await condition(
() => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
5000
);
// wait until the board has some images - maybe it already has some from a previous fetch
// must use getState() to ensure we do not have stale state
const isSuccess = await condition(
() => imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).isSuccess,
5000
);
if (!isSuccess) {
dispatch(itemSelected(null));
return;
}
if (!isSuccess) {
dispatch(imageSelected(null));
return;
// the board was just changed - we can select the first image
const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
const imageToSelect = imageNames && imageNames.length > 0 ? imageNames[0] : null;
if (imageToSelect) {
dispatch(itemSelected({ type: 'image', id: imageToSelect }));
} else {
dispatch(itemSelected(null));
}
} else {
const queryArgs = { ...selectGetVideoIdsQueryArgs(state), board_id };
// wait until the board has some images - maybe it already has some from a previous fetch
// must use getState() to ensure we do not have stale state
const isSuccess = await condition(
() => videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).isSuccess,
5000
);
if (!isSuccess) {
dispatch(itemSelected(null));
return;
}
// the board was just changed - we can select the first image
const videoIds = videosApi.endpoints.getVideoIds.select(queryArgs)(getState()).data?.video_ids;
const videoToSelect = videoIds && videoIds.length > 0 ? videoIds[0] : null;
if (videoToSelect) {
dispatch(itemSelected({ type: 'video', id: videoToSelect }));
} else {
dispatch(itemSelected(null));
}
}
// the board was just changed - we can select the first image
const imageNames = imagesApi.endpoints.getImageNames.select(queryArgs)(getState()).data?.image_names;
const imageToSelect = imageNames?.at(0) ?? null;
dispatch(imageSelected(imageToSelect));
},
});
};

View File

@@ -2,7 +2,7 @@ import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/store';
import { bboxSyncedToOptimalDimension, rgRefImageModelChanged } from 'features/controlLayers/store/canvasSlice';
import { buildSelectIsStaging, selectCanvasSessionId } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
import { loraIsEnabledChanged } from 'features/controlLayers/store/lorasSlice';
import { modelChanged, syncedToOptimalDimension, vaeSelected } from 'features/controlLayers/store/paramsSlice';
import { refImageModelChanged, selectReferenceImageEntities } from 'features/controlLayers/store/refImagesSlice';
import {
@@ -12,6 +12,7 @@ import {
} from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { modelSelected } from 'features/parameters/store/actions';
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
@@ -22,6 +23,7 @@ import {
isFluxKontextApiModelConfig,
isFluxKontextModelConfig,
isFluxReduxModelConfig,
isGemini2_5ModelConfig,
} from 'services/api/types';
const log = logger('models');
@@ -44,13 +46,13 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
if (didBaseModelChange) {
// we may need to reset some incompatible submodels
let modelsCleared = 0;
let modelsUpdatedDisabledOrCleared = 0;
// handle incompatible loras
state.loras.loras.forEach((lora) => {
if (lora.model.base !== newBase) {
dispatch(loraDeleted({ id: lora.id }));
modelsCleared += 1;
dispatch(loraIsEnabledChanged({ id: lora.id, isEnabled: false }));
modelsUpdatedDisabledOrCleared += 1;
}
});
@@ -58,52 +60,57 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
const { vae } = state.params;
if (vae && vae.base !== newBase) {
dispatch(vaeSelected(null));
modelsCleared += 1;
modelsUpdatedDisabledOrCleared += 1;
}
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
// to choose the best available model based on the new main model.
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
if (SUPPORTS_REF_IMAGES_BASE_MODELS.includes(newModel.base)) {
// Handle incompatible reference image models - switch to first compatible model, with some smart logic
// to choose the best available model based on the new main model.
const allRefImageModels = selectGlobalRefImageModels(state).filter(({ base }) => base === newBase);
let newGlobalRefImageModel = null;
let newGlobalRefImageModel = null;
// Certain models require the ref image model to be the same as the main model - others just need a matching
// base. Helper to grab the first exact match or the first available model if no exact match is found.
const exactMatchOrFirst = <T extends AnyModelConfig>(candidates: T[]): T | null =>
candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
// Certain models require the ref image model to be the same as the main model - others just need a matching
// base. Helper to grab the first exact match or the first available model if no exact match is found.
const exactMatchOrFirst = <T extends AnyModelConfig>(candidates: T[]): T | null =>
candidates.find(({ key }) => key === newModel.key) ?? candidates[0] ?? null;
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
} else if (newModel.base === 'chatgpt-4o') {
const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
} else if (newModel.base === 'flux-kontext') {
const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
} else if (newModel.base === 'flux') {
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
} else {
newGlobalRefImageModel = allRefImageModels[0] ?? null;
}
// The only way we can differentiate between FLUX and FLUX Kontext is to check for "kontext" in the name
if (newModel.base === 'flux' && newModel.name.toLowerCase().includes('kontext')) {
const fluxKontextDevModels = allRefImageModels.filter(isFluxKontextModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextDevModels);
} else if (newModel.base === 'chatgpt-4o') {
const chatGPT4oModels = allRefImageModels.filter(isChatGPT4oModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(chatGPT4oModels);
} else if (newModel.base === 'gemini-2.5') {
const gemini2_5Models = allRefImageModels.filter(isGemini2_5ModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(gemini2_5Models);
} else if (newModel.base === 'flux-kontext') {
const fluxKontextApiModels = allRefImageModels.filter(isFluxKontextApiModelConfig);
newGlobalRefImageModel = exactMatchOrFirst(fluxKontextApiModels);
} else if (newModel.base === 'flux') {
const fluxReduxModels = allRefImageModels.filter(isFluxReduxModelConfig);
newGlobalRefImageModel = fluxReduxModels[0] ?? null;
} else {
newGlobalRefImageModel = allRefImageModels[0] ?? null;
}
// All ref image entities are updated to use the same new model
const refImageEntities = selectReferenceImageEntities(state);
for (const entity of refImageEntities) {
const shouldUpdateModel =
(entity.config.model && entity.config.model.base !== newBase) ||
(!entity.config.model && newGlobalRefImageModel);
// All ref image entities are updated to use the same new model
const refImageEntities = selectReferenceImageEntities(state);
for (const entity of refImageEntities) {
const shouldUpdateModel =
(entity.config.model && entity.config.model.base !== newBase) ||
(!entity.config.model && newGlobalRefImageModel);
if (shouldUpdateModel) {
dispatch(
refImageModelChanged({
id: entity.id,
modelConfig: newGlobalRefImageModel,
})
);
modelsCleared += 1;
if (shouldUpdateModel) {
dispatch(
refImageModelChanged({
id: entity.id,
modelConfig: newGlobalRefImageModel,
})
);
modelsUpdatedDisabledOrCleared += 1;
}
}
}
@@ -128,17 +135,17 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
modelConfig: newRegionalRefImageModel,
})
);
modelsCleared += 1;
modelsUpdatedDisabledOrCleared += 1;
}
}
}
if (modelsCleared > 0) {
if (modelsUpdatedDisabledOrCleared > 0) {
toast({
id: 'BASE_MODEL_CHANGED',
title: t('toast.baseModelChanged'),
description: t('toast.baseModelChangedCleared', {
count: modelsCleared,
count: modelsUpdatedDisabledOrCleared,
}),
status: 'warning',
});

View File

@@ -12,13 +12,21 @@ import {
} from 'features/controlLayers/store/paramsSlice';
import { refImageModelChanged, selectRefImagesSlice } from 'features/controlLayers/store/refImagesSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier, isFLUXReduxConfig, isIPAdapterConfig } from 'features/controlLayers/store/types';
import {
getEntityIdentifier,
isFLUXReduxConfig,
isIPAdapterConfig,
isRegionalGuidanceFLUXReduxConfig,
isRegionalGuidanceIPAdapterConfig,
} from 'features/controlLayers/store/types';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { modelSelected } from 'features/parameters/store/actions';
import {
postProcessingModelChanged,
tileControlnetModelChanged,
upscaleModelChanged,
} from 'features/parameters/store/upscaleSlice';
import { videoModelChanged } from 'features/parameters/store/videoSlice';
import {
zParameterCLIPEmbedModel,
zParameterSpandrelImageToImageModel,
@@ -41,6 +49,7 @@ import {
isRefinerMainModelModelConfig,
isSpandrelImageToImageModelConfig,
isT5EncoderModelConfig,
isVideoModelConfig,
} from 'services/api/types';
import type { JsonObject } from 'type-fest';
@@ -81,6 +90,7 @@ export const addModelsLoadedListener = (startAppListening: AppStartListening) =>
handleCLIPEmbedModels(models, state, dispatch, log);
handleFLUXVAEModels(models, state, dispatch, log);
handleFLUXReduxModels(models, state, dispatch, log);
handleVideoModels(models, state, dispatch, log);
},
});
};
@@ -193,6 +203,22 @@ const handleLoRAModels: ModelHandler = (models, state, dispatch, log) => {
});
};
const handleVideoModels: ModelHandler = (models, state, dispatch, log) => {
const videoModels = models.filter(isVideoModelConfig);
const selectedVideoModel = state.video.videoModel;
if (selectedVideoModel && videoModels.some((m) => m.key === selectedVideoModel.key)) {
return;
}
const firstModel = videoModels[0] || null;
if (firstModel) {
log.debug({ firstModel }, 'No video model selected, selecting first available video model');
dispatch(videoModelChanged({ videoModel: zModelIdentifierField.parse(firstModel) }));
return;
}
};
const handleControlAdapterModels: ModelHandler = (models, state, dispatch, log) => {
const caModels = models.filter(isControlLayerModelConfig);
selectCanvasSlice(state).controlLayers.entities.forEach((entity) => {
@@ -232,7 +258,7 @@ const handleIPAdapterModels: ModelHandler = (models, state, dispatch, log) => {
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, config }) => {
if (!isIPAdapterConfig(config)) {
if (!isRegionalGuidanceIPAdapterConfig(config)) {
return;
}
@@ -275,7 +301,7 @@ const handleFLUXReduxModels: ModelHandler = (models, state, dispatch, log) => {
selectCanvasSlice(state).regionalGuidance.entities.forEach((entity) => {
entity.referenceImages.forEach(({ id: referenceImageId, config }) => {
if (!isFLUXReduxConfig(config)) {
if (!isRegionalGuidanceFLUXReduxConfig(config)) {
return;
}

View File

@@ -0,0 +1,3 @@
import { atom } from 'nanostores';
export const $accountTypeText = atom<string>('');

View File

@@ -1,6 +1,11 @@
import { atom } from 'nanostores';
import { atom, computed } from 'nanostores';
/**
* The user's auth token.
*/
export const $authToken = atom<string | undefined>();
/**
* The crossOrigin value to use for all image loading. Depends on whether the user is authenticated.
*/
export const $crossOrigin = computed($authToken, (token) => (token ? 'use-credentials' : 'anonymous'));

View File

@@ -0,0 +1,4 @@
import { atom } from 'nanostores';
import type { ReactNode } from 'react';
export const $videoUpsellComponent = atom<ReactNode | undefined>(undefined);

View File

@@ -18,7 +18,8 @@ import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
import { deepClone } from 'common/util/deepClone';
import { keys, mergeWith, omit, pick } from 'es-toolkit/compat';
import { merge } from 'es-toolkit';
import { omit, pick } from 'es-toolkit/compat';
import { changeBoardModalSliceConfig } from 'features/changeBoardModal/store/slice';
import { canvasSettingsSliceConfig } from 'features/controlLayers/store/canvasSettingsSlice';
import { canvasSliceConfig } from 'features/controlLayers/store/canvasSlice';
@@ -33,6 +34,7 @@ import { nodesSliceConfig } from 'features/nodes/store/nodesSlice';
import { workflowLibrarySliceConfig } from 'features/nodes/store/workflowLibrarySlice';
import { workflowSettingsSliceConfig } from 'features/nodes/store/workflowSettingsSlice';
import { upscaleSliceConfig } from 'features/parameters/store/upscaleSlice';
import { videoSliceConfig } from 'features/parameters/store/videoSlice';
import { queueSliceConfig } from 'features/queue/store/queueSlice';
import { stylePresetSliceConfig } from 'features/stylePresets/store/stylePresetSlice';
import { configSliceConfig } from 'features/system/store/configSlice';
@@ -78,6 +80,7 @@ const SLICE_CONFIGS = {
[systemSliceConfig.slice.reducerPath]: systemSliceConfig,
[uiSliceConfig.slice.reducerPath]: uiSliceConfig,
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig,
[videoSliceConfig.slice.reducerPath]: videoSliceConfig,
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig,
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig,
};
@@ -111,6 +114,7 @@ const ALL_REDUCERS = {
[systemSliceConfig.slice.reducerPath]: systemSliceConfig.slice.reducer,
[uiSliceConfig.slice.reducerPath]: uiSliceConfig.slice.reducer,
[upscaleSliceConfig.slice.reducerPath]: upscaleSliceConfig.slice.reducer,
[videoSliceConfig.slice.reducerPath]: videoSliceConfig.slice.reducer,
[workflowLibrarySliceConfig.slice.reducerPath]: workflowLibrarySliceConfig.slice.reducer,
[workflowSettingsSliceConfig.slice.reducerPath]: workflowSettingsSliceConfig.slice.reducer,
};
@@ -130,16 +134,14 @@ const unserialize: UnserializeFunction = (data, key) => {
const initialState = getInitialState();
const parsed = JSON.parse(data);
// strip out old keys
const stripped = pick(deepClone(parsed), keys(initialState));
/*
* Merge in initial state as default values, covering any missing keys. You might be tempted to use _.defaultsDeep,
* but that merges arrays by index and partial objects by key. Using an identity function as the customizer results
* in behaviour like defaultsDeep, but doesn't overwrite any values that are not undefined in the migrated state.
*/
const unPersistDenylisted = mergeWith(stripped, initialState, (objVal) => objVal);
// run (additive) migrations
const migrated = persistConfig.migrate(unPersistDenylisted);
// We need to inject non-persisted values from initial state into the rehydrated state. These values always are
// required to be in the state, but won't be in the persisted data. Build an object that consists of only these
// values, then merge it with the rehydrated state.
const nonPersistedSubsetOfState = pick(initialState, persistConfig.persistDenylist ?? []);
const stateToMigrate = merge(deepClone(parsed), nonPersistedSubsetOfState);
// Run migrations to bring old state up to date with the current version.
const migrated = persistConfig.migrate(stateToMigrate);
log.debug(
{

View File

@@ -58,6 +58,7 @@ const zNumericalParameterConfig = z.object({
fineStep: z.number().default(8),
coarseStep: z.number().default(64),
});
export type NumericalParameterConfig = z.infer<typeof zNumericalParameterConfig>;
/**
* Configuration options for the InvokeAI UI.
@@ -79,6 +80,7 @@ export const zAppConfig = z.object({
allowClientSideUpload: z.boolean(),
allowPublishWorkflows: z.boolean(),
allowPromptExpansion: z.boolean(),
allowVideo: z.boolean(),
disabledTabs: z.array(zTabName),
disabledFeatures: z.array(zAppFeature),
disabledSDFeatures: z.array(zSDFeature),
@@ -139,8 +141,9 @@ export const getDefaultAppConfig = (): AppConfig => ({
allowClientSideUpload: false,
allowPublishWorkflows: false,
allowPromptExpansion: false,
allowVideo: false, // used to determine if video is enabled vs upsell
shouldShowCredits: false,
disabledTabs: [],
disabledTabs: ['video'], // used to determine if video functionality is visible
disabledFeatures: ['lightbox', 'faceRestore', 'batches'] satisfies AppFeature[],
disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'] satisfies SDFeature[],
sd: {

View File

@@ -37,6 +37,7 @@ const REGION_NAMES = [
'workflows',
'progress',
'settings',
'video',
] as const;
/**
* The names of the focus regions.

View File

@@ -6,13 +6,13 @@ import { toast } from 'features/toast/toast';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const useDownloadImage = () => {
export const useDownloadItem = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const authToken = useStore($authToken);
const downloadImage = useCallback(
async (image_url: string, image_name: string) => {
const downloadItem = useCallback(
async (item_url: string, item_id: string) => {
try {
const requestOpts = authToken
? {
@@ -21,7 +21,7 @@ export const useDownloadImage = () => {
},
}
: {};
const blob = await fetch(image_url, requestOpts).then((resp) => resp.blob());
const blob = await fetch(item_url, requestOpts).then((resp) => resp.blob());
if (!blob) {
throw new Error('Unable to create Blob');
}
@@ -30,7 +30,7 @@ export const useDownloadImage = () => {
const a = document.createElement('a');
a.style.display = 'none';
a.href = url;
a.download = image_name;
a.download = item_id;
document.body.appendChild(a);
a.click();
window.URL.revokeObjectURL(url);
@@ -47,5 +47,5 @@ export const useDownloadImage = () => {
[t, dispatch, authToken]
);
return { downloadImage };
return { downloadItem };
};

View File

@@ -1,5 +1,6 @@
import { useAppStore } from 'app/store/storeHooks';
import { useDeleteImageModalApi } from 'features/deleteImageModal/store/state';
import { useDeleteVideoModalApi } from 'features/deleteVideoModal/store/state';
import { selectSelection } from 'features/gallery/store/gallerySelectors';
import { useClearQueue } from 'features/queue/hooks/useClearQueue';
import { useDeleteCurrentQueueItem } from 'features/queue/hooks/useDeleteCurrentQueueItem';
@@ -12,6 +13,7 @@ import { getFocusedRegion } from './focus';
export const useGlobalHotkeys = () => {
const { dispatch, getState } = useAppStore();
const isVideoEnabled = useFeatureStatus('video');
const isModelManagerEnabled = useFeatureStatus('modelManager');
const queue = useInvoke();
@@ -92,6 +94,18 @@ export const useGlobalHotkeys = () => {
dependencies: [dispatch],
});
useRegisteredHotkeys({
id: 'selectVideoTab',
category: 'app',
callback: () => {
navigationApi.switchToTab('video');
},
options: {
enabled: isVideoEnabled,
},
dependencies: [dispatch],
});
useRegisteredHotkeys({
id: 'selectWorkflowsTab',
category: 'app',
@@ -123,6 +137,8 @@ export const useGlobalHotkeys = () => {
});
const deleteImageModalApi = useDeleteImageModalApi();
const deleteVideoModalApi = useDeleteVideoModalApi();
useRegisteredHotkeys({
id: 'deleteSelection',
category: 'gallery',
@@ -135,7 +151,13 @@ export const useGlobalHotkeys = () => {
if (!selection.length) {
return;
}
deleteImageModalApi.delete(selection);
if (selection.every(({ type }) => type === 'image')) {
deleteImageModalApi.delete(selection.map((s) => s.id));
} else if (selection.every(({ type }) => type === 'video')) {
deleteVideoModalApi.delete(selection.map((s) => s.id));
} else {
// no-op, we expect selections to always be only images or only video
}
},
dependencies: [getState, deleteImageModalApi],
});

Some files were not shown because too many files have changed in this diff Show More