Compare commits

...

178 Commits

Author SHA1 Message Date
Mary Hipp Rogers
649596cec5 fix 1:1 ratio (#8127)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-06-25 19:39:56 -04:00
psychedelicious
45aa84c01a feat: add user_label to FieldIdentifier (#8126)
Co-authored-by: Mary Hipp Rogers <maryhipp@gmail.com>
2025-06-25 09:48:15 -04:00
Mary Hipp Rogers
064d5787c9 Flux Kontext UI support (#8111)
* add support for flux-kontext models in nodes

* flux kontext in canvas

* add aspect ratio support

* lint

* restore aspect ratio logic

* more linting

* typegen

* fix typegen

---------

Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-06-25 09:46:58 -04:00
psychedelicious
d81b23adff fix(nodes): ensure each invocation overrides _original_model_fields with own field data 2025-06-19 09:57:11 -04:00
psychedelicious
c72480fd1b fix: opencv dependency conflict (#8095)
* build: prevent `opencv-python` from being installed

Fixes this error: `AttributeError: module 'cv2.ximgproc' has no attribute 'thinning'`

`opencv-contrib-python` supersedes `opencv-python`, providing the same API + additional features. The two packages should not be installed at the same time to avoid conflicts and/or errors.

The `invisible-watermark` package requires `opencv-python`, but we require the contrib variant.

This change updates `pyproject.toml` to prevent `opencv-python` from ever being installed using a `uv` features called dependency overrides.

* feat(ui): data viewer supports disabling wrap

* feat(api): list _all_ pkgs in app deps endpoint

* chore(ui): typegen

* feat(ui): update about modal to display new full deps list

* chore: uv lock
2025-06-10 08:34:00 -04:00
psychedelicious
3704573ef8 chore: bump version to v5.14.0 2025-06-06 22:36:32 +10:00
Hiroto N
01fbf2ce4d translationBot(ui): update translation (Japanese)
Currently translated at 76.5% (1467 of 1917 strings)

Co-authored-by: Hiroto N <hironow365@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ja/
Translation: InvokeAI/Web UI
2025-06-06 20:56:13 +10:00
Riccardo Giovanetti
96e7003449 translationBot(ui): update translation (Italian)
Currently translated at 98.9% (1896 of 1917 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-06-06 20:56:13 +10:00
RyoKoba
80197b8856 translationBot(ui): update translation (Japanese)
Currently translated at 76.1% (1460 of 1917 strings)

Co-authored-by: RyoKoba <kobayashi_ryo@cyberagent.co.jp>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ja/
Translation: InvokeAI/Web UI
2025-06-06 20:52:36 +10:00
Hosted Weblate
0187bc671e translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-06-06 20:52:36 +10:00
psychedelicious
31584daabe feat(ui): display canvas spinner during compositing operations 2025-06-06 20:50:02 +10:00
psychedelicious
a6cb522fed feat(ui): add bboxUpdated callback to transformer, use it to fit layer to stage when creating new canvas from an image
When a layer is initialized, we do not yet know its bbox, so we cannot fit the stage view to the layer. We have to wait for the bbox calculation to finish. Previously, we had no way to wait unti lthat bbox calculation was complete to take an action.

For example, this means we could not fit the layers to the stage immediately after creating a new layer, bc we don't know the dimensions of the layer yet.

This callback lets us do that. When creating a new canvas from an image, we now...
- Register a bbox update callback to fit the layers to stage
- Layer is created
- Canvas initializes the layer's entity adapter module (layer's width and height are set to zero at this point)
- Canvas calculates the bbox
- Bbox is updated (width and height are now correct)
- Callback is ran, fitting layer to stage
2025-06-06 20:50:02 +10:00
psychedelicious
f70be1e415 feat(ui): animate stage fit operations (e.g. fit layers to stage) 2025-06-06 20:50:02 +10:00
psychedelicious
a2901f2b46 feat(ui): add method to stage to fit to union of bbox and layers
This ensures that _both_ bbox and layers are visible
2025-06-06 20:50:02 +10:00
psychedelicious
b61c66c3a9 feat(ui): add spinner indicator to canvas during rasterizing operations and while pending rect calculations 2025-06-06 20:50:02 +10:00
psychedelicious
c77f9ec202 feat(ui): add hook to get all entity adapters in array 2025-06-06 20:50:02 +10:00
psychedelicious
2c5c35647f fix(ui): new canvas from image places image in bbox correctly 2025-06-06 20:50:02 +10:00
dunkeroni
bf0fdbd10e Fix: inpaint model mask using wrong tensor name 2025-06-05 11:31:35 -04:00
psychedelicious
731d317a42 chore(ui): update whatsnew 2025-06-04 22:29:37 +10:00
psychedelicious
e81579f752 fix(mm): handle invoke syntax for HF repo ids when fetching HF model metadata
Closes #8074
2025-06-04 22:27:15 +10:00
Linos
9a10e98c0b translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (1918 of 1918 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-06-04 17:03:06 +10:00
Riccardo Giovanetti
27fdc139b7 translationBot(ui): update translation (Italian)
Currently translated at 98.9% (1897 of 1918 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-06-04 17:03:06 +10:00
psychedelicious
0a00805afc chore: bump version to v5.13.0 2025-06-04 05:55:34 +10:00
psychedelicious
7b38143fbd chore: bump version to v5.13.0rc3 2025-05-30 21:44:21 +10:00
mickr777
4c5ad1b7d7 Ruff Fix 2025-05-30 19:03:43 +10:00
mickr777
d80cc962ad Delay Imports that require torch 2025-05-30 19:03:43 +10:00
RyoKoba
7ccabfa200 translationBot(ui): update translation (Japanese)
Currently translated at 68.0% (1304 of 1915 strings)

Co-authored-by: RyoKoba <kobayashi_ryo@cyberagent.co.jp>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ja/
Translation: InvokeAI/Web UI
2025-05-30 14:48:41 +10:00
Riccardo Giovanetti
936d59cc52 translationBot(ui): update translation (Italian)
Currently translated at 98.9% (1894 of 1915 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-05-30 14:48:41 +10:00
psychedelicious
fc16fb6099 chore: bump version to v5.13.0rc2 2025-05-30 14:16:33 +10:00
psychedelicious
c848cbc2e3 feat(app): move output annotation checking to run_app
Also change import order to ensure CLI args are handled correctly. Had to do this bc importing `InvocationRegistry` before parsing args resulted in the `--root` CLI arg being ignored.
2025-05-30 14:10:13 +10:00
psychedelicious
66fd0f0d8a feat(ui): warn on unregistered invocation output 2025-05-30 14:10:13 +10:00
psychedelicious
c266f39f06 chore(ui): typegen 2025-05-30 13:36:04 +10:00
psychedelicious
98a44fa4d7 fix(ui): conditional display of message 2025-05-30 13:36:04 +10:00
Mary Hipp
c1d230f961 add support to delete all uncategorized images 2025-05-30 13:36:04 +10:00
Kevin Turner
68108435ae feat(LoRA): allow LoRA layer patcher to continue past unknown layers 2025-05-30 13:29:02 +10:00
psychedelicious
e121bf1f62 feat(ui): persist sizes of all 4 prompt boxes 2025-05-30 12:36:06 +10:00
psychedelicious
4835c344b3 feat(ui): implement generalized textarea size tracking system 2025-05-30 12:36:06 +10:00
Mary Hipp
a589dec122 store positive prompt textarea height in redux so it persists across refresh 2025-05-30 12:36:06 +10:00
dunkeroni
bc67d5c841 add invert logic to grayscale mask composite 2025-05-30 11:19:37 +10:00
Mary Hipp
f3d5691c04 use onClickGoToModelManager for empty model picker 2025-05-29 11:13:55 -04:00
psychedelicious
b98abc2457 chore(ui): typegen 2025-05-29 13:49:07 +10:00
psychedelicious
7e527ccfb7 feat(api): add validationg for max resize_to on upload endpoint 2025-05-29 13:49:07 +10:00
psychedelicious
0f0c911845 chore: uv lock 2025-05-29 13:49:07 +10:00
psychedelicious
e4818b967b tidy(api): remove benchmark logging 2025-05-29 13:49:07 +10:00
psychedelicious
ce3eede26f feat(nodes): revised heuristic_resize
better handling for smaller image sizes
2025-05-29 13:49:07 +10:00
psychedelicious
d98725c5e9 feat(nodes): use guo-hall thinning 2025-05-29 13:49:07 +10:00
psychedelicious
31a96d2945 feat(ui): use resize on uplaod functionality when creating new canvas from image 2025-05-29 13:49:07 +10:00
psychedelicious
845a321a43 feat(ui): support resize_to when uploading images 2025-05-29 13:49:07 +10:00
psychedelicious
87a44a28ef chore(ui): typegen 2025-05-29 13:49:07 +10:00
psychedelicious
d5b9c3ee5a feat(api): support resizing image on upload 2025-05-29 13:49:07 +10:00
psychedelicious
91db136cd1 feat(nodes): much faster heuristic resize utility
Add `heuristic_resize_fast`, which does the same thing as `heuristic_resize`, except it's about 20x faster.

This is achieved by using opencv for the binary edge handling isntead of python, and checking only 100k pixels to determine what kind of image we are working with.

Besides being much faster, it results in cleaner lines for resized binary canny edge maps, and has results in fewer misidentified segmentation maps.

Tested against normal images, binary canny edge maps, grayscale HED edge maps, segmentation maps, and normal images.

Tested resizing up and down for each.

Besides the new utility function, I needed to swap the `opencv-python` dep for `opencv-contrib-python`, which includes `cv2.ximgproc.thinning`. This function accounts for a good chunk of the perf improvement.
2025-05-29 13:49:07 +10:00
Jonathan
f351ad4b66 Update communityNodes.md
Added some of JPPhoto's nodes.
2025-05-28 07:26:44 +10:00
psychedelicious
fb6fb9abbd gh: update CODEOWNERS
Added myself to everything so we do not get into situations where we need to rely on vic or lincoln to approve
2025-05-27 22:37:44 +10:00
psychedelicious
675c990486 docs: add comments to classifiers stuff 2025-05-27 22:02:48 +10:00
psychedelicious
6ee5cde4bb ci: do not install project when checking classifiers 2025-05-27 22:02:48 +10:00
psychedelicious
c8077f9430 ci: check classifiers in python-checks workflow 2025-05-27 22:02:48 +10:00
psychedelicious
6aabe9959e chore: fix license classifier 2025-05-27 22:02:48 +10:00
psychedelicious
0b58d172d2 build: update build script to check classifiers 2025-05-27 22:02:48 +10:00
psychedelicious
d7c6e293d7 scripts: add script to check pypi classifiers 2025-05-27 22:02:48 +10:00
psychedelicious
c600bc867d chore: bump version to v5.13.0rc1 2025-05-27 13:30:34 +10:00
Riccardo Giovanetti
f4140dd772 translationBot(ui): update translation (Italian)
Currently translated at 98.9% (1890 of 1911 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.9% (1890 of 1911 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-05-27 13:18:06 +10:00
psychedelicious
a2d8261d40 feat(ui): canvas scroll scale snap 2025-05-27 13:10:57 +10:00
psychedelicious
bce88a8873 perf(ui): lazy mount scale slider popover 2025-05-27 13:10:57 +10:00
psychedelicious
b37e1a3ad6 feat(ui): do not round scale
Makes it a lot smoother, don't think it breaks anything...
2025-05-27 13:10:57 +10:00
psychedelicious
35a088e0a6 perf(ui): optimize <CanvasToolbarScale /> 2025-05-27 13:10:57 +10:00
psychedelicious
b936cab039 feat(ui): add computed for stage scale 2025-05-27 13:10:57 +10:00
psychedelicious
34e4093408 fix(ui): revert snapping logic, doesn't work w/ certain input devices 2025-05-27 13:10:57 +10:00
Kent Keirsey
d7f93c3cc0 uv update 2025-05-26 22:54:15 -04:00
Kent Keirsey
d4c4926caa Update Compel to 2.1.1 and apply Sentences Split logic 2025-05-26 22:54:15 -04:00
psychedelicious
558c7db055 chore(ui): knipignore InpaintMaskAddButtons 2025-05-27 07:28:47 +10:00
psychedelicious
2ece59b51b feat(ui): remove unnecessary type casts 2025-05-27 07:28:47 +10:00
psychedelicious
7dbe39957c feat(ui): bbox rect is always defined, no need for fallback logic 2025-05-27 07:28:47 +10:00
psychedelicious
6fa46d35a5 feat(ui): inpaint mask settings layout 2025-05-27 07:28:47 +10:00
psychedelicious
b2a2b38ea8 feat(ui): split inpaint mask setting selectors to avoid manual memoization 2025-05-27 07:28:47 +10:00
dunkeroni
12934da390 Use Optional instead of Nullable for mask settings 2025-05-27 07:28:47 +10:00
dunkeroni
231bc18188 remove buttons, change denoise limit format 2025-05-27 07:28:47 +10:00
dunkeroni
530cd180c5 chore:ruff 2025-05-27 07:28:47 +10:00
dunkeroni
2a92e7b920 Flux/CogView/SD3 compatible with gradient masks 2025-05-27 07:28:47 +10:00
dunkeroni
019e057e29 chore: typegen 2025-05-27 07:28:47 +10:00
dunkeroni
9aa26f883e chore: ruff 2025-05-27 07:28:47 +10:00
dunkeroni
3f727e24b1 change default noise level to 0.15 2025-05-27 07:28:47 +10:00
dunkeroni
9e90bf1b20 fix gradient mask broken with flux gen 2025-05-27 07:28:47 +10:00
dunkeroni
db3964797f clean up comments 2025-05-27 07:28:47 +10:00
dunkeroni
881efbda1b fix: inpaint breaks when scaled processing 2025-05-27 07:28:47 +10:00
dunkeroni
e9ce2ed5f2 inpaint mask sliders compatible with outpainting 2025-05-27 07:28:47 +10:00
dunkeroni
53ac9eafbf reuse inpaint image noise seed for caching 2025-05-27 07:28:47 +10:00
dunkeroni
9e095006a5 remove some AI detritus 2025-05-27 07:28:47 +10:00
dunkeroni
21b24c3ba6 change denoise limit default to 1.0 2025-05-27 07:28:47 +10:00
dunkeroni
139ecc10ce ruff 2025-05-27 07:28:47 +10:00
dunkeroni
78ea143b46 composite masks based on denoise level 2025-05-27 07:28:47 +10:00
dunkeroni
174249ec15 grtadient mask node works on greyscale now 2025-05-27 07:28:47 +10:00
dunkeroni
2510ad7431 consolidate code 2025-05-27 07:28:47 +10:00
dunkeroni
ba5e855a60 Correctly composite grey values on white for masks 2025-05-27 07:28:47 +10:00
dunkeroni
23627cf18d compositing in frontend 2025-05-27 07:28:47 +10:00
dunkeroni
5e20c9a1ca mask noise slider option 2025-05-27 07:28:47 +10:00
Kent Keirsey
933cf5f276 update prettier 2025-05-25 23:53:16 -04:00
Kent Keirsey
41316de659 Update order 2025-05-25 23:53:16 -04:00
Kent Keirsey
041ccfd68e Enable 'pull into bounding box' from empty Control Layer 2025-05-25 23:53:16 -04:00
dunkeroni
ad24c203a4 preserve SDXL training values for bounding box 2025-05-25 08:15:37 -04:00
Kent Keirsey
3fd28ce600 Update scaling math to land on 100% consistently. 2025-05-25 07:59:27 -04:00
Mary Hipp
32df3bdf6e typegen 2025-05-22 14:09:10 -04:00
Mary Hipp
ba69e89e8c typegen 2025-05-22 14:09:10 -04:00
Mary Hipp
a8e0c48ddc add new method types to metadata 2025-05-22 14:09:10 -04:00
Jonathan
66f6571086 Update manual installation for v5.12.0 2025-05-22 09:00:58 -04:00
psychedelicious
8a3848e7b6 chore(ui): update whats new copy 2025-05-22 14:25:02 +10:00
psychedelicious
3f8486b480 chore: bump version to v5.12.0 2025-05-22 14:25:02 +10:00
Hosted Weblate
b80be4f639 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-05-22 14:11:52 +10:00
Linos
adb3a849b9 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (1910 of 1910 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-05-22 14:11:52 +10:00
Riccardo Giovanetti
798499fda6 translationBot(ui): update translation (Italian)
Currently translated at 98.9% (1889 of 1910 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.9% (1889 of 1910 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-05-22 14:11:52 +10:00
psychedelicious
02fc5a165c chore(ui): typegen 2025-05-22 13:50:15 +10:00
psychedelicious
b1b8edecfb fix(ui): minor ts issue 2025-05-22 13:50:15 +10:00
Mary Hipp
3cd8d48809 lint 2025-05-22 13:50:15 +10:00
Mary Hipp
f4672ad8c1 more cleanup 2025-05-22 13:50:15 +10:00
Mary Hipp
5a86490845 cleanup and refactor into hooks 2025-05-22 13:50:15 +10:00
Mary Hipp
27dc843046 Imagen4 working in UI 2025-05-22 13:50:15 +10:00
Mary Hipp
2f35d74902 backend updates 2025-05-22 13:50:15 +10:00
Kevin Turner
8bd52ed744 fix: improve gguf performance with torch.compile
pytorch 2.7 does not implement `set.__contains__`, so make this a list instead.

See https://github.com/pytorch/pytorch/issues/145761
2025-05-22 13:42:09 +10:00
psychedelicious
f3e2a3c384 gh: update CODEOWNERS
- Remove brandon
- Consolidate two entries for `invokeai/backend`
2025-05-22 13:37:24 +10:00
psychedelicious
ecc6e8a532 fix(nodes): transformers bug with SAM
Upstream bug in `transformers` breaks use of `AutoModelForMaskGeneration` class to load SAM models

Simple fix - directly load the model with `SamModel` class instead.

See upstream issue https://github.com/huggingface/transformers/issues/38228
2025-05-22 11:32:37 +10:00
Mary Hipp
9170576a38 make logic more straight forward 2025-05-21 10:52:04 -04:00
Mary Hipp
f26baa0341 use hook instead 2025-05-21 10:52:04 -04:00
psychedelicious
99dad953a4 chore: bump version to v5.12.0rc2 2025-05-20 14:50:03 +10:00
jazzhaiku
c39bcdffd3 Re-enable classification API as fallback (#8007)
## Summary

- Fallback to new classification API if legacy probe fails
- Method to read model metadata
- Created `StrippedModelOnDisk` class for testing
- Test to verify only a single config `matches` with a model

## Related Issues / Discussions

<!--WHEN APPLICABLE: List any related issues or discussions on github or
discord. If this PR closes an issue, please use the "Closes #1234"
format, so that the issue will be automatically closed when the PR
merges.-->

## QA Instructions

<!--WHEN APPLICABLE: Describe how you have tested the changes in this
PR. Provide enough detail that a reviewer can reproduce your tests.-->

## Merge Plan

<!--WHEN APPLICABLE: Large PRs, or PRs that touch sensitive things like
DB schemas, may need some care when merging. For example, a careful
rebase by the change author, timing to not interfere with a pending
release, or a message to contributors on discord after merging.-->

## Checklist

- [ ] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2025-05-20 11:25:38 +10:00
Billy
32f2223237 Warning comment 2025-05-20 11:19:59 +10:00
Billy
6176941853 Warning comment 2025-05-20 11:19:59 +10:00
Billy
af41dc83f7 Make ruff happy 2025-05-20 11:19:59 +10:00
Billy
a17e771eba Re-enable classification API as fallback 2025-05-20 11:19:59 +10:00
psychedelicious
19ecdb196e chore: ruff 2025-05-20 10:47:02 +10:00
psychedelicious
15880e6ea7 fix(ui): invocation parsing for optional enum fields
For example:
```py
my_field: Literal["foo", "bar"] | None = InputField(default=None)
```

Previously, this would cause a field parsing error and prevent the app from loading.

Two fixes:
- This type annotation and resultant schema are now parsed correctly
- Error handling added to template building logic to prevent the hang at startup when an error does occur
2025-05-20 10:47:02 +10:00
psychedelicious
53ffa98662 chore(ui): typegen 2025-05-20 10:47:02 +10:00
psychedelicious
021a334240 fix(nodes): fix spots where default of None was provided for non-optional fields 2025-05-20 10:47:02 +10:00
psychedelicious
cfed293d48 fix(nodes): do not make invocation field defaults None when they are not provided 2025-05-20 10:47:02 +10:00
Mary Hipp
d36bc185c8 only use client side uploads if more than one image to retain metadata for single uploads 2025-05-20 08:03:00 +10:00
psychedelicious
7878203b03 chore(ui): update whats new copy 2025-05-19 23:28:40 +10:00
psychedelicious
3352220d39 chore: bump version to v5.12.0rc1 2025-05-19 23:28:40 +10:00
Riccardo Giovanetti
bcfb1e7e52 translationBot(ui): update translation (Italian)
Currently translated at 98.7% (1887 of 1910 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-05-19 23:23:07 +10:00
psychedelicious
e84b3c142c chore(ui): typegen 2025-05-19 13:50:04 +10:00
Kent Keirsey
22f637b647 ruff ruff 2025-05-19 13:50:04 +10:00
Kent Keirsey
5d192ab6e5 Fix SD precise in patcher. 2025-05-19 13:50:04 +10:00
Kent Keirsey
9273d1629e UX Copy Clean-up 2025-05-19 13:50:04 +10:00
Kent Keirsey
27a12f080b missing translation values 2025-05-19 13:50:04 +10:00
Kent Keirsey
3bfb497764 ruff fixes 2025-05-19 13:50:04 +10:00
Kent Keirsey
b849c7d382 ruff fix 2025-05-19 13:50:04 +10:00
Kent Keirsey
8d4120583d update schema pt 2 2025-05-19 13:50:04 +10:00
Kent Keirsey
402cdc7eda update schema 2025-05-19 13:50:04 +10:00
Kent Keirsey
b02ea1a898 Expanded styles & updated UI 2025-05-19 13:50:04 +10:00
Kent Keirsey
d709040f4b Matt3o base changes 2025-05-19 13:50:04 +10:00
psychedelicious
8a7a498da3 chore: update uv lock 2025-05-19 12:29:51 +10:00
psychedelicious
699736486b chore: bump torch to 2.7.0
- Update `pyproject.toml`
- Update `pins.json` so launcher installs latest CUDA 12.8 & ROCm 6.3
2025-05-19 12:29:51 +10:00
psychedelicious
37e790ae19 fix(app): address pydantic deprecation warning for accessing BaseModel.model_fields 2025-05-19 12:22:59 +10:00
David Burnett
6c0bd7d150 fix import ordering, remove code I reverted that the resync added back 2025-05-19 11:16:23 +10:00
David Burnett
99e154d773 fix picky ruff issue 2025-05-19 11:16:23 +10:00
David Burnett
e4e43ae126 fix missing bracket 2025-05-19 11:16:23 +10:00
David Burnett
a07fac6180 raise exected exception when attempting to change dtype 2025-05-19 11:16:23 +10:00
David Burnett
93d4b00082 Add to overload for GGMLTensor, so calling to on the model moves the quantized data as well 2025-05-19 11:16:23 +10:00
David Burnett
8abcc99ced add check for state_dict, required to load TI's 2025-05-19 11:16:23 +10:00
David Burnett
73ab4b8895 fix offload device 2025-05-19 11:16:23 +10:00
David Burnett
86719f2065 revert to overload due to failing tests, use Torch futures instead 2025-05-19 11:16:23 +10:00
David Burnett
5271fc1cac fix picky ruff issue 2025-05-19 11:16:23 +10:00
David Burnett
96ff7d9093 fix missing bracket 2025-05-19 11:16:23 +10:00
David Burnett
6f73d9e9c6 raise exected exception when attempting to change dtype 2025-05-19 11:16:23 +10:00
David Burnett
29b406a84b Add to overload for GGMLTensor, so calling to on the model moves the quantized data as well 2025-05-19 11:16:23 +10:00
psychedelicious
2b1e4b88d3 tests: add new service to mocks 2025-05-19 10:29:07 +10:00
psychedelicious
0f0085a776 chore(ui): typegen 2025-05-19 10:29:07 +10:00
psychedelicious
ea28ed8261 chore: ruff 2025-05-19 10:29:07 +10:00
Lucian Hardy
c0e6327d3a chore(ui): Refactor RelatedModels.tsx
Major cleanup of RelatedModels.tsx for improved readability, structure, and maintainability.
Dried out repetitive logic
Consolidated model type sorting into reusable helpers
Added disallowed model type relationships to prevent broken connections (e.g. VAE ↔ LoRA)
- Aware this introduces a new constraint—open to feedback (see PR comment)
Some naming and types may still need refinement; happy to revisit
2025-05-19 10:29:07 +10:00
Lucian Hardy
459491e402 chore(backend): Removed unused model_relationship methods
removed unused AnyModelConfig related methods,
removed unused get_related_model_key_count method.
2025-05-19 10:29:07 +10:00
Lucian Hardy
a4cddfa47d feat(ui): model relationship management
Adds full support for managing model-to-model relationships in the UI and backend.

Introduces RelatedModels subpanel for linking and unlinking models in model management.
 - Adds REST API routes for adding, removing, and retrieving model relationships.
 - New database migration: creates model_relationships table for bidirectional links.
 - New service layer (model_relationships) for relationship management.
 - Updated frontend: Related models float to top of LoRA/Main grouped model comboboxes for quick access.
     - Added 'Show Only Related' toggle badge to MainModelPicker filter bar

**Amended commit to remove changes to ParamMainModelSelect.tsx and MainModelPicker.tsx to avoid conflict with upstream deletion/ rewrite**
2025-05-19 10:29:07 +10:00
jazzhaiku
9a822bcfe8 Jazzhaiku/stats (#8006)
## Summary

- Modify stats reset to be on a per session basis, rather than a "full
reset", to allow for parallel session execution
- Add "aider" to gitignore

## Related Issues / Discussions

<!--WHEN APPLICABLE: List any related issues or discussions on github or
discord. If this PR closes an issue, please use the "Closes #1234"
format, so that the issue will be automatically closed when the PR
merges.-->

## QA Instructions

<!--WHEN APPLICABLE: Describe how you have tested the changes in this
PR. Provide enough detail that a reviewer can reproduce your tests.-->

## Merge Plan

<!--WHEN APPLICABLE: Large PRs, or PRs that touch sensitive things like
DB schemas, may need some care when merging. For example, a careful
rebase by the change author, timing to not interfere with a pending
release, or a message to contributors on discord after merging.-->

## Checklist

- [ ] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2025-05-16 07:51:23 +10:00
psychedelicious
5f12b9185f feat(mm): add cache_snapshot to model cache clear callback 2025-05-15 16:06:47 +10:00
psychedelicious
d958d2e5a0 feat(mm): iterate on cache callbacks API 2025-05-15 14:37:22 +10:00
psychedelicious
823ca214e6 feat(mm): iterate on cache callbacks API 2025-05-15 13:28:51 +10:00
psychedelicious
a33da450fd feat(mm): support cache callbacks 2025-05-15 11:23:58 +10:00
Billy
8b5f4d190c Restore Schema 2025-05-15 10:38:01 +10:00
Billy
f1f3b7965a Schema 2025-05-15 10:26:45 +10:00
Billy
987be3507c Merge branch 'main' into jazzhaiku/stats 2025-05-15 10:22:56 +10:00
Billy
1f4090fe0e Reset invocation stats on per session basis 2025-05-15 10:19:05 +10:00
Billy
029e2d2c46 Add aider to gitignore 2025-05-15 10:18:42 +10:00
161 changed files with 4745 additions and 765 deletions

29
.github/CODEOWNERS vendored
View File

@@ -1,32 +1,31 @@
# continuous integration
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku @psychedelicious
# documentation
/docs/ @lstein @blessedcoolant @hipsterusername @psychedelicious
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @psychedelicious
# nodes
/invokeai/app/ @blessedcoolant @psychedelicious @brandonrising @hipsterusername @jazzhaiku
/invokeai/app/ @blessedcoolant @psychedelicious @hipsterusername @jazzhaiku
# installation and configuration
/pyproject.toml @lstein @blessedcoolant @hipsterusername
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
/scripts/ @ebr @lstein @hipsterusername
/installer/ @lstein @ebr @hipsterusername
/invokeai/assets @lstein @ebr @hipsterusername
/invokeai/configs @lstein @hipsterusername
/invokeai/version @lstein @blessedcoolant @hipsterusername
/pyproject.toml @lstein @blessedcoolant @psychedelicious @hipsterusername
/docker/ @lstein @blessedcoolant @psychedelicious @hipsterusername @ebr
/scripts/ @ebr @lstein @psychedelicious @hipsterusername
/installer/ @lstein @ebr @psychedelicious @hipsterusername
/invokeai/assets @lstein @ebr @psychedelicious @hipsterusername
/invokeai/configs @lstein @psychedelicious @hipsterusername
/invokeai/version @lstein @blessedcoolant @psychedelicious @hipsterusername
# web ui
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
# generation, model management, postprocessing
/invokeai/backend @lstein @blessedcoolant @brandonrising @hipsterusername @jazzhaiku
/invokeai/backend @lstein @blessedcoolant @hipsterusername @jazzhaiku @psychedelicious @maryhipp
# front ends
/invokeai/frontend/CLI @lstein @hipsterusername
/invokeai/frontend/install @lstein @ebr @hipsterusername
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
/invokeai/frontend/CLI @lstein @psychedelicious @hipsterusername
/invokeai/frontend/install @lstein @ebr @psychedelicious @hipsterusername
/invokeai/frontend/merge @lstein @blessedcoolant @psychedelicious @hipsterusername
/invokeai/frontend/training @lstein @blessedcoolant @psychedelicious @hipsterusername
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername

View File

@@ -67,6 +67,10 @@ jobs:
version: '0.6.10'
enable-cache: true
- name: check pypi classifiers
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
- name: ruff check
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
run: uv tool run ruff@0.11.2 check --output-format=github .

1
.gitignore vendored
View File

@@ -188,3 +188,4 @@ installer/install.sh
installer/update.bat
installer/update.sh
installer/InvokeAI-Installer/
.aider*

View File

@@ -71,7 +71,14 @@ The following commands vary depending on the version of Invoke being installed a
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
=== "Invoke v5.10.0 and later"
=== "Invoke v5.12 and later"
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
- **In all other cases, do not use an index.**
=== "Invoke v5.10.0 to v5.11.0"
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.

View File

@@ -13,6 +13,7 @@ If you'd prefer, you can also just download the whole node folder from the linke
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
- Community Nodes
+ [Anamorphic Tools](#anamorphic-tools)
+ [Adapters-Linked](#adapters-linked-nodes)
+ [Autostereogram](#autostereogram-nodes)
+ [Average Images](#average-images)
@@ -20,9 +21,12 @@ To use a community workflow, download the `.json` node graph file and load it in
+ [Close Color Mask](#close-color-mask)
+ [Clothing Mask](#clothing-mask)
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
+ [Curves](#curves)
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
+ [Enhance Detail](#enhance-detail)
+ [Film Grain](#film-grain)
+ [Flip Pose](#flip-pose)
+ [Flux Ideal Size](#flux-ideal-size)
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
+ [Grid to Gif](#grid-to-gif)
@@ -61,6 +65,13 @@ To use a community workflow, download the `.json` node graph file and load it in
- [Help](#help)
--------------------------------
### Anamorphic Tools
**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting.
**Node Link:** https://github.com/JPPhoto/anamorphic-tools
--------------------------------
### Adapters Linked Nodes
@@ -132,6 +143,13 @@ Node Link: https://github.com/VeyDlin/clahe-node
View:
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
--------------------------------
### Curves
**Description:** Adjust an image's curve based on a user-defined string.
**Node Link:** https://github.com/JPPhoto/curves-node
--------------------------------
### Depth Map from Wavefront OBJ
@@ -162,6 +180,20 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
**Node Link:** https://github.com/JPPhoto/film-grain-node
--------------------------------
### Flip Pose
**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands.
**Node Link:** https://github.com/JPPhoto/flip-pose-node
--------------------------------
### Flux Ideal Size
**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement.
**Node Link:** https://github.com/JPPhoto/flux-ideal-size
--------------------------------
### Generative Grammar-Based Prompt Nodes

View File

@@ -23,6 +23,10 @@ from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_images.model_images_default import ModelImageFileStorageDisk
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService
from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL
from invokeai.app.services.model_relationship_records.model_relationship_records_sqlite import (
SqliteModelRelationshipRecordStorage,
)
from invokeai.app.services.model_relationships.model_relationships_default import ModelRelationshipsService
from invokeai.app.services.names.names_default import SimpleNameService
from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk
from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache
@@ -136,6 +140,8 @@ class ApiDependencies:
download_queue=download_queue_service,
events=events,
)
model_relationships = ModelRelationshipsService()
model_relationship_records = SqliteModelRelationshipRecordStorage(db=db)
names = SimpleNameService()
performance_statistics = InvocationStatsService()
session_processor = DefaultSessionProcessor(session_runner=DefaultSessionRunner())
@@ -161,6 +167,8 @@ class ApiDependencies:
logger=logger,
model_images=model_images_service,
model_manager=model_manager,
model_relationships=model_relationships,
model_relationship_records=model_relationship_records,
download_queue=download_queue_service,
names=names,
performance_statistics=performance_statistics,

View File

@@ -1,8 +1,7 @@
import typing
from enum import Enum
from importlib.metadata import PackageNotFoundError, version
from importlib.metadata import distributions
from pathlib import Path
from platform import python_version
from typing import Optional
import torch
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
class AppDependencyVersions(BaseModel):
"""App depencency Versions Response"""
accelerate: str = Field(description="accelerate version")
compel: str = Field(description="compel version")
cuda: Optional[str] = Field(description="CUDA version")
diffusers: str = Field(description="diffusers version")
numpy: str = Field(description="Numpy version")
opencv: str = Field(description="OpenCV version")
onnx: str = Field(description="ONNX version")
pillow: str = Field(description="Pillow (PIL) version")
python: str = Field(description="Python version")
torch: str = Field(description="PyTorch version")
torchvision: str = Field(description="PyTorch Vision version")
transformers: str = Field(description="transformers version")
xformers: Optional[str] = Field(description="xformers version")
class AppConfig(BaseModel):
"""App Config Response"""
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
return AppVersion(version=__version__)
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
async def get_app_deps() -> AppDependencyVersions:
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
async def get_app_deps() -> dict[str, str]:
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
try:
xformers = version("xformers")
except PackageNotFoundError:
xformers = None
return AppDependencyVersions(
accelerate=version("accelerate"),
compel=version("compel"),
cuda=torch.version.cuda,
diffusers=version("diffusers"),
numpy=version("numpy"),
opencv=version("opencv-python"),
onnx=version("onnx"),
pillow=version("pillow"),
python=python_version(),
torch=torch.version.__version__,
torchvision=version("torchvision"),
transformers=version("transformers"),
xformers=xformers,
)
cuda = torch.version.cuda or "N/A"
except Exception:
cuda = "N/A"
deps["CUDA"] = cuda
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
return sorted_deps
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)

View File

@@ -146,7 +146,7 @@ async def list_boards(
response_model=list[str],
)
async def list_all_board_image_names(
board_id: str = Path(description="The id of the board"),
board_id: str = Path(description="The id of the board or 'none' for uncategorized images"),
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
) -> list[str]:

View File

@@ -1,12 +1,13 @@
import io
import json
import traceback
from typing import Optional
from typing import ClassVar, Optional
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
from fastapi.responses import FileResponse
from fastapi.routing import APIRouter
from PIL import Image
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, model_validator
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
@@ -19,6 +20,8 @@ from invokeai.app.services.image_records.image_records_common import (
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
images_router = APIRouter(prefix="/v1/images", tags=["images"])
@@ -27,6 +30,19 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
IMAGE_MAX_AGE = 31536000
class ResizeToDimensions(BaseModel):
width: int = Field(..., gt=0)
height: int = Field(..., gt=0)
MAX_SIZE: ClassVar[int] = 4096 * 4096
@model_validator(mode="after")
def validate_total_output_size(self):
if self.width * self.height > self.MAX_SIZE:
raise ValueError(f"Max total output size for resizing is {self.MAX_SIZE} pixels")
return self
@images_router.post(
"/upload",
operation_id="upload_image",
@@ -46,6 +62,11 @@ async def upload_image(
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
resize_to: Optional[str] = Body(
default=None,
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
example='"[1024,1024]"',
),
metadata: Optional[str] = Body(
default=None,
description="The metadata to associate with the image, must be a stringified JSON dict",
@@ -59,13 +80,31 @@ async def upload_image(
contents = await file.read()
try:
pil_image = Image.open(io.BytesIO(contents))
if crop_visible:
bbox = pil_image.getbbox()
pil_image = pil_image.crop(bbox)
except Exception:
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
raise HTTPException(status_code=415, detail="Failed to read image")
if crop_visible:
try:
bbox = pil_image.getbbox()
pil_image = pil_image.crop(bbox)
except Exception:
raise HTTPException(status_code=500, detail="Failed to crop image")
if resize_to:
try:
dims = json.loads(resize_to)
resize_dims = ResizeToDimensions(**dims)
except Exception:
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
try:
np_image = pil_to_np(pil_image)
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
pil_image = np_to_pil(np_image)
except Exception:
raise HTTPException(status_code=500, detail="Failed to resize image")
extracted_metadata = extract_metadata_from_image(
pil_image=pil_image,
invokeai_metadata_override=metadata,
@@ -356,6 +395,29 @@ async def delete_images_from_list(
raise HTTPException(status_code=500, detail="Failed to delete images")
@images_router.delete(
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
)
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
"""Deletes all images that are uncategorized"""
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
board_id="none", categories=None, is_intermediate=None
)
try:
deleted_images: list[str] = []
for image_name in image_names:
try:
ApiDependencies.invoker.services.images.delete(image_name)
deleted_images.append(image_name)
except Exception:
pass
return DeleteImagesFromListResult(deleted_images=deleted_images)
except Exception:
raise HTTPException(status_code=500, detail="Failed to delete images")
class ImagesUpdatedFromListResult(BaseModel):
updated_image_names: list[str] = Field(description="The image names that were updated")

View File

@@ -0,0 +1,215 @@
"""FastAPI route for model relationship records."""
from typing import List
from fastapi import APIRouter, Body, HTTPException, Path, status
from pydantic import BaseModel, Field
from invokeai.app.api.dependencies import ApiDependencies
model_relationships_router = APIRouter(prefix="/v1/model_relationships", tags=["model_relationships"])
# === Schemas ===
class ModelRelationshipCreateRequest(BaseModel):
model_key_1: str = Field(
...,
description="The key of the first model in the relationship",
examples=[
"aa3b247f-90c9-4416-bfcd-aeaa57a5339e",
"ac32b914-10ab-496e-a24a-3068724b9c35",
"d944abfd-c7c3-42e2-a4ff-da640b29b8b4",
"b1c2d3e4-f5a6-7890-abcd-ef1234567890",
"12345678-90ab-cdef-1234-567890abcdef",
"fedcba98-7654-3210-fedc-ba9876543210",
],
)
model_key_2: str = Field(
...,
description="The key of the second model in the relationship",
examples=[
"3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4",
"f0c3da4e-d9ff-42b5-a45c-23be75c887c9",
"38170dd8-f1e5-431e-866c-2c81f1277fcc",
"c57fea2d-7646-424c-b9ad-c0ba60fc68be",
"10f7807b-ab54-46a9-ab03-600e88c630a1",
"f6c1d267-cf87-4ee0-bee0-37e791eacab7",
],
)
class ModelRelationshipBatchRequest(BaseModel):
model_keys: List[str] = Field(
...,
description="List of model keys to fetch related models for",
examples=[
[
"aa3b247f-90c9-4416-bfcd-aeaa57a5339e",
"ac32b914-10ab-496e-a24a-3068724b9c35",
],
[
"b1c2d3e4-f5a6-7890-abcd-ef1234567890",
"12345678-90ab-cdef-1234-567890abcdef",
"fedcba98-7654-3210-fedc-ba9876543210",
],
[
"3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4",
],
],
)
# === Routes ===
@model_relationships_router.get(
"/i/{model_key}",
operation_id="get_related_models",
response_model=list[str],
responses={
200: {
"description": "A list of related model keys was retrieved successfully",
"content": {
"application/json": {
"example": [
"15e9eb28-8cfe-47c9-b610-37907a79fc3c",
"71272e82-0e5f-46d5-bca9-9a61f4bd8a82",
"a5d7cd49-1b98-4534-a475-aeee4ccf5fa2",
]
}
},
},
404: {"description": "The specified model could not be found"},
422: {"description": "Validation error"},
},
)
async def get_related_models(
model_key: str = Path(..., description="The key of the model to get relationships for"),
) -> list[str]:
"""
Get a list of model keys related to a given model.
"""
try:
return ApiDependencies.invoker.services.model_relationships.get_related_model_keys(model_key)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@model_relationships_router.post(
"/",
status_code=status.HTTP_204_NO_CONTENT,
responses={
204: {"description": "The relationship was successfully created"},
400: {"description": "Invalid model keys or self-referential relationship"},
409: {"description": "The relationship already exists"},
422: {"description": "Validation error"},
500: {"description": "Internal server error"},
},
summary="Add Model Relationship",
description="Creates a **bidirectional** relationship between two models, allowing each to reference the other as related.",
)
async def add_model_relationship(
req: ModelRelationshipCreateRequest = Body(..., description="The model keys to relate"),
) -> None:
"""
Add a relationship between two models.
Relationships are bidirectional and will be accessible from both models.
- Raises 400 if keys are invalid or identical.
- Raises 409 if the relationship already exists.
"""
try:
if req.model_key_1 == req.model_key_2:
raise HTTPException(status_code=400, detail="Cannot relate a model to itself.")
ApiDependencies.invoker.services.model_relationships.add_model_relationship(
req.model_key_1,
req.model_key_2,
)
except ValueError as e:
raise HTTPException(status_code=409, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@model_relationships_router.delete(
"/",
status_code=status.HTTP_204_NO_CONTENT,
responses={
204: {"description": "The relationship was successfully removed"},
400: {"description": "Invalid model keys or self-referential relationship"},
404: {"description": "The relationship does not exist"},
422: {"description": "Validation error"},
500: {"description": "Internal server error"},
},
summary="Remove Model Relationship",
description="Removes a **bidirectional** relationship between two models. The relationship must already exist.",
)
async def remove_model_relationship(
req: ModelRelationshipCreateRequest = Body(..., description="The model keys to disconnect"),
) -> None:
"""
Removes a bidirectional relationship between two model keys.
- Raises 400 if attempting to unlink a model from itself.
- Raises 404 if the relationship was not found.
"""
try:
if req.model_key_1 == req.model_key_2:
raise HTTPException(status_code=400, detail="Cannot unlink a model from itself.")
ApiDependencies.invoker.services.model_relationships.remove_model_relationship(
req.model_key_1,
req.model_key_2,
)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@model_relationships_router.post(
"/batch",
operation_id="get_related_models_batch",
response_model=List[str],
responses={
200: {
"description": "Related model keys retrieved successfully",
"content": {
"application/json": {
"example": [
"ca562b14-995e-4a42-90c1-9528f1a5921d",
"cc0c2b8a-c62e-41d6-878e-cc74dde5ca8f",
"18ca7649-6a9e-47d5-bc17-41ab1e8cec81",
"7c12d1b2-0ef9-4bec-ba55-797b2d8f2ee1",
"c382eaa3-0e28-4ab0-9446-408667699aeb",
"71272e82-0e5f-46d5-bca9-9a61f4bd8a82",
"a5d7cd49-1b98-4534-a475-aeee4ccf5fa2",
]
}
},
},
422: {"description": "Validation error"},
500: {"description": "Internal server error"},
},
summary="Get Related Model Keys (Batch)",
description="Retrieves all **unique related model keys** for a list of given models. This is useful for contextual suggestions or filtering.",
)
async def get_related_models_batch(
req: ModelRelationshipBatchRequest = Body(..., description="Model keys to check for related connections"),
) -> list[str]:
"""
Accepts multiple model keys and returns a flat list of all unique related keys.
Useful when working with multiple selections in the UI or cross-model comparisons.
"""
try:
all_related: set[str] = set()
for key in req.model_keys:
related = ApiDependencies.invoker.services.model_relationships.get_related_model_keys(key)
all_related.update(related)
return list(all_related)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -22,6 +22,7 @@ from invokeai.app.api.routers import (
download_queue,
images,
model_manager,
model_relationships,
session_queue,
style_presets,
utilities,
@@ -125,6 +126,7 @@ app.include_router(download_queue.download_queue_router, prefix="/api")
app.include_router(images.images_router, prefix="/api")
app.include_router(boards.boards_router, prefix="/api")
app.include_router(board_images.board_images_router, prefix="/api")
app.include_router(model_relationships.model_relationships_router, prefix="/api")
app.include_router(app_info.app_router, prefix="/api")
app.include_router(session_queue.session_queue_router, prefix="/api")
app.include_router(workflows.workflows_router, prefix="/api")

View File

@@ -177,7 +177,7 @@ class BaseInvocation(ABC, BaseModel):
return cls.model_fields["type"].default
@classmethod
def get_output_annotation(cls) -> BaseInvocationOutput:
def get_output_annotation(cls) -> Type[BaseInvocationOutput]:
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
return signature(cls.invoke).return_annotation
@@ -209,7 +209,7 @@ class BaseInvocation(ABC, BaseModel):
Internal invoke method, calls `invoke()` after some prep.
Handles optional fields that are required to call `invoke()` and invocation cache.
"""
for field_name, field in self.model_fields.items():
for field_name, field in type(self).model_fields.items():
if not field.json_schema_extra or callable(field.json_schema_extra):
# something has gone terribly awry, we should always have this and it should be a dict
continue
@@ -224,9 +224,9 @@ class BaseInvocation(ABC, BaseModel):
setattr(self, field_name, orig_default)
if orig_required and orig_default is PydanticUndefined and getattr(self, field_name) is None:
if input_ == Input.Connection:
raise RequiredConnectionException(self.model_fields["type"].default, field_name)
raise RequiredConnectionException(type(self).model_fields["type"].default, field_name)
elif input_ == Input.Any:
raise MissingInputException(self.model_fields["type"].default, field_name)
raise MissingInputException(type(self).model_fields["type"].default, field_name)
# skip node cache codepath if it's disabled
if services.configuration.node_cache_size == 0:
@@ -582,6 +582,8 @@ def invocation(
fields: dict[str, tuple[Any, FieldInfo]] = {}
original_model_fields: dict[str, OriginalModelField] = {}
for field_name, field_info in cls.model_fields.items():
annotation = field_info.annotation
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
@@ -589,7 +591,7 @@ def invocation(
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
)
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info)
@@ -643,6 +645,16 @@ def invocation(
fields["type"] = (invocation_type_annotation, invocation_type_field_info)
# Invocation outputs must be registered using the @invocation_output decorator, but it is possible that the
# output is registered _after_ this invocation is registered. It depends on module import ordering.
#
# We can only confirm the output for an invocation is registered after all modules are imported. There's
# only really one good time to do that - during application startup, in `run_app.py`, after loading all
# custom nodes.
#
# We can still do some basic validation here - ensure the invoke method is defined and returns an instance
# of BaseInvocationOutput.
# Validate the `invoke()` method is implemented
if "invoke" in cls.__abstractmethods__:
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
@@ -666,6 +678,7 @@ def invocation(
docstring = cls.__doc__
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore
new_class.__doc__ = docstring
new_class._original_model_fields = original_model_fields
InvocationRegistry.register_invocation(new_class)

View File

@@ -1,7 +1,7 @@
from typing import Iterator, List, Optional, Tuple, Union, cast
import torch
from compel import Compel, ReturnedEmbeddingsType
from compel import Compel, ReturnedEmbeddingsType, SplitLongTextMode
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
@@ -104,6 +104,7 @@ class CompelInvocation(BaseInvocation):
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
truncate_long_prompts=False,
device=TorchDevice.choose_torch_device(),
split_long_text_mode=SplitLongTextMode.SENTENCES,
)
conjunction = Compel.parse_prompt_string(self.prompt)
@@ -205,6 +206,7 @@ class SDXLPromptInvocationBase:
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
requires_pooled=get_pooled,
device=TorchDevice.choose_torch_device(),
split_long_text_mode=SplitLongTextMode.SENTENCES,
)
conjunction = Compel.parse_prompt_string(prompt)

View File

@@ -274,12 +274,12 @@ class InvokeAdjustImageHuePlusInvocation(BaseInvocation, WithMetadata, WithBoard
title="Enhance Image",
tags=["enhance", "image"],
category="image",
version="1.2.0",
version="1.2.1",
)
class InvokeImageEnhanceInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Applies processing from PIL's ImageEnhance module. Originally created by @dwringer"""
image: ImageField = InputField(default=None, description="The image for which to apply processing")
image: ImageField = InputField(description="The image for which to apply processing")
invert: bool = InputField(default=False, description="Whether to invert the image colors")
color: float = InputField(ge=0, default=1.0, description="Color enhancement factor")
contrast: float = InputField(ge=0, default=1.0, description="Contrast enhancement factor")

View File

@@ -22,7 +22,11 @@ from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
from invokeai.app.util.controlnet_utils import (
CONTROLNET_MODE_VALUES,
CONTROLNET_RESIZE_VALUES,
heuristic_resize_fast,
)
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
@@ -109,7 +113,7 @@ class ControlNetInvocation(BaseInvocation):
title="Heuristic Resize",
tags=["image, controlnet"],
category="image",
version="1.0.1",
version="1.1.1",
classification=Classification.Prototype,
)
class HeuristicResizeInvocation(BaseInvocation):
@@ -122,7 +126,7 @@ class HeuristicResizeInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.images.get_pil(self.image.image_name, "RGB")
np_img = pil_to_np(image)
np_resized = heuristic_resize(np_img, (self.width, self.height))
np_resized = heuristic_resize_fast(np_img, (self.width, self.height))
resized = np_to_pil(np_resized)
image_dto = context.images.save(image=resized)
return ImageOutput.build(image_dto)

View File

@@ -1,12 +1,14 @@
from typing import Literal, Optional
import cv2
import numpy as np
import torch
import torchvision.transforms as T
from PIL import Image, ImageFilter
from PIL import Image
from torchvision.transforms.functional import resize as tv_resize
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
DenoiseMaskField,
FieldDescriptions,
@@ -42,15 +44,13 @@ class GradientMaskOutput(BaseInvocationOutput):
title="Create Gradient Mask",
tags=["mask", "denoise"],
category="latents",
version="1.2.0",
version="1.3.0",
)
class CreateGradientMaskInvocation(BaseInvocation):
"""Creates mask for denoising model run."""
"""Creates mask for denoising."""
mask: ImageField = InputField(default=None, description="Image which will be masked", ui_order=1)
edge_radius: int = InputField(
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
)
mask: ImageField = InputField(description="Image which will be masked", ui_order=1)
edge_radius: int = InputField(default=16, ge=0, description="How far to expand the edges of the mask", ui_order=2)
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
minimum_denoise: float = InputField(
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
@@ -81,45 +81,110 @@ class CreateGradientMaskInvocation(BaseInvocation):
@torch.no_grad()
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
# Resize the mask_image. Makes the filter 64x faster and doesn't hurt quality in latent scale anyway
mask_image = mask_image.resize(
(
mask_image.width // LATENT_SCALE_FACTOR,
mask_image.height // LATENT_SCALE_FACTOR,
),
resample=Image.Resampling.BILINEAR,
)
mask_np_orig = np.array(mask_image, dtype=np.float32)
self.edge_radius = self.edge_radius // LATENT_SCALE_FACTOR # scale the edge radius to match the mask size
if self.edge_radius > 0:
mask_np = 255 - mask_np_orig # invert so 0 is unmasked (higher values = higher denoise strength)
dilated_mask = mask_np.copy()
# Create kernel based on coherence mode
if self.coherence_mode == "Box Blur":
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
else: # Gaussian Blur OR Staged
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
# Create a circular distance kernel that fades from center outward
kernel_size = self.edge_radius * 2 + 1
center = self.edge_radius
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
for i in range(kernel_size):
for j in range(kernel_size):
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
if dist <= self.edge_radius:
kernel[i, j] = 1.0 - (dist / self.edge_radius)
else: # Gaussian Blur or Staged
# Create a Gaussian kernel
kernel_size = self.edge_radius * 2 + 1
kernel = cv2.getGaussianKernel(
kernel_size, self.edge_radius / 2.5
) # 2.5 is a magic number (standard deviation capturing)
kernel = kernel * kernel.T # Make 2D gaussian kernel
kernel = kernel / np.max(kernel) # Normalize center to 1.0
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
# Ensure values outside radius are 0
center = self.edge_radius
for i in range(kernel_size):
for j in range(kernel_size):
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
if dist > self.edge_radius:
kernel[i, j] = 0
# redistribute blur so that the original edges are 0 and blur outwards to 1
blur_tensor = (blur_tensor - 0.5) * 2
blur_tensor[blur_tensor < 0] = 0.0
# 2D max filter
mask_tensor = torch.tensor(mask_np)
kernel_tensor = torch.tensor(kernel)
dilated_mask = 255 - self.max_filter2D_torch(mask_tensor, kernel_tensor).cpu()
dilated_mask = dilated_mask.numpy()
threshold = 1 - self.minimum_denoise
threshold = (1 - self.minimum_denoise) * 255
if self.coherence_mode == "Staged":
# wherever the blur_tensor is less than fully masked, convert it to threshold
blur_tensor = torch.where((blur_tensor < 1) & (blur_tensor > 0), threshold, blur_tensor)
else:
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
# wherever expanded mask is darker than the original mask but original was above threshhold, set it to the threshold
# makes any expansion areas drop to threshhold. Raising minimum across the image happen outside of this if
threshold_mask = (dilated_mask < mask_np_orig) & (mask_np_orig > threshold)
dilated_mask = np.where(threshold_mask, threshold, mask_np_orig)
# wherever expanded mask is less than 255 but greater than threshold, drop it to threshold (minimum denoise)
threshold_mask = (dilated_mask > threshold) & (dilated_mask < 255)
dilated_mask = np.where(threshold_mask, threshold, dilated_mask)
else:
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
dilated_mask = mask_np_orig.copy()
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
# convert to tensor
dilated_mask = np.clip(dilated_mask, 0, 255).astype(np.uint8)
mask_tensor = torch.tensor(dilated_mask, device=torch.device("cpu"))
# compute a [0, 1] mask from the blur_tensor
expanded_mask = torch.where((blur_tensor < 1), 0, 1)
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
# binary mask for compositing
expanded_mask = np.where((dilated_mask < 255), 0, 255)
expanded_mask_image = Image.fromarray(expanded_mask.astype(np.uint8), mode="L")
expanded_mask_image = expanded_mask_image.resize(
(
mask_image.width * LATENT_SCALE_FACTOR,
mask_image.height * LATENT_SCALE_FACTOR,
),
resample=Image.Resampling.NEAREST,
)
expanded_image_dto = context.images.save(expanded_mask_image)
# restore the original mask size
dilated_mask = Image.fromarray(dilated_mask.astype(np.uint8))
dilated_mask = dilated_mask.resize(
(
mask_image.width * LATENT_SCALE_FACTOR,
mask_image.height * LATENT_SCALE_FACTOR,
),
resample=Image.Resampling.NEAREST,
)
# stack the mask as a tensor, repeating 4 times on dimmension 1
dilated_mask_tensor = image_resized_to_grid_as_tensor(dilated_mask, normalize=False)
mask_name = context.tensors.save(tensor=dilated_mask_tensor.unsqueeze(0))
masked_latents_name = None
if self.unet is not None and self.vae is not None and self.image is not None:
# all three fields must be present at the same time
main_model_config = context.models.get_config(self.unet.unet.key)
assert isinstance(main_model_config, MainConfigBase)
if main_model_config.variant is ModelVariantType.Inpaint:
mask = blur_tensor
mask = dilated_mask_tensor
vae_info: LoadedModel = context.models.load(self.vae.vae)
image = context.images.get_pil(self.image.image_name)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
@@ -137,3 +202,29 @@ class CreateGradientMaskInvocation(BaseInvocation):
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
)
def max_filter2D_torch(self, image: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
"""
This morphological operation is much faster in torch than numpy or opencv
For reasonable kernel sizes, the overhead of copying the data to the GPU is not worth it.
"""
h, w = kernel.shape
pad_h, pad_w = h // 2, w // 2
padded = torch.nn.functional.pad(image, (pad_w, pad_w, pad_h, pad_h), mode="constant", value=0)
result = torch.zeros_like(image)
# This looks like it's inside out, but it does the same thing and is more efficient
for i in range(h):
for j in range(w):
weight = kernel[i, j]
if weight <= 0:
continue
# Extract the region from padded tensor
region = padded[i : i + image.shape[0], j : j + image.shape[1]]
# Apply weight and update max
result = torch.maximum(result, region * weight)
return result

View File

@@ -608,6 +608,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
end_step_percent=single_ip_adapter.end_step_percent,
ip_adapter_conditioning=IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds),
mask=mask,
method=single_ip_adapter.method,
)
)

View File

@@ -62,7 +62,9 @@ class UIType(str, Enum, metaclass=MetaEnum):
FluxReduxModel = "FluxReduxModelField"
LlavaOnevisionModel = "LLaVAModelField"
Imagen3Model = "Imagen3ModelField"
Imagen4Model = "Imagen4ModelField"
ChatGPT4oModel = "ChatGPT4oModelField"
FluxKontextModel = "FluxKontextModelField"
# endregion
# region Misc Field Types
@@ -400,8 +402,8 @@ class InputFieldJSONSchemaExtra(BaseModel):
"""
input: Input
orig_required: bool
field_kind: FieldKind
orig_required: bool = True
default: Optional[Any] = None
orig_default: Optional[Any] = None
ui_hidden: bool = False
@@ -498,7 +500,7 @@ def InputField(
input: Input = Input.Any,
ui_type: Optional[UIType] = None,
ui_component: Optional[UIComponent] = None,
ui_hidden: bool = False,
ui_hidden: Optional[bool] = None,
ui_order: Optional[int] = None,
ui_choice_labels: Optional[dict[str, str]] = None,
) -> Any:
@@ -534,15 +536,20 @@ def InputField(
json_schema_extra_ = InputFieldJSONSchemaExtra(
input=input,
ui_type=ui_type,
ui_component=ui_component,
ui_hidden=ui_hidden,
ui_order=ui_order,
ui_choice_labels=ui_choice_labels,
field_kind=FieldKind.Input,
orig_required=True,
)
if ui_type is not None:
json_schema_extra_.ui_type = ui_type
if ui_component is not None:
json_schema_extra_.ui_component = ui_component
if ui_hidden is not None:
json_schema_extra_.ui_hidden = ui_hidden
if ui_order is not None:
json_schema_extra_.ui_order = ui_order
if ui_choice_labels is not None:
json_schema_extra_.ui_choice_labels = ui_choice_labels
"""
There is a conflict between the typing of invocation definitions and the typing of an invocation's
`invoke()` function.
@@ -614,7 +621,7 @@ def InputField(
return Field(
**provided_args,
json_schema_extra=json_schema_extra_.model_dump(exclude_none=True),
json_schema_extra=json_schema_extra_.model_dump(exclude_unset=True),
)

View File

@@ -21,14 +21,14 @@ class IdealSizeOutput(BaseInvocationOutput):
"ideal_size",
title="Ideal Size - SD1.5, SDXL",
tags=["latents", "math", "ideal_size"],
version="1.0.5",
version="1.0.6",
)
class IdealSizeInvocation(BaseInvocation):
"""Calculates the ideal size for generation to avoid duplication"""
width: int = InputField(default=1024, description="Final image width")
height: int = InputField(default=576, description="Final image height")
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
unet: UNetField = InputField(description=FieldDescriptions.unet)
multiplier: float = InputField(
default=1.0,
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in "

View File

@@ -975,13 +975,13 @@ class SaveImageInvocation(BaseInvocation, WithMetadata, WithBoard):
title="Canvas Paste Back",
tags=["image", "combine"],
category="image",
version="1.0.0",
version="1.0.1",
)
class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Combines two images by using the mask provided. Intended for use on the Unified Canvas."""
source_image: ImageField = InputField(description="The source image")
target_image: ImageField = InputField(default=None, description="The target image")
target_image: ImageField = InputField(description="The target image")
mask: ImageField = InputField(
description="The mask to use when pasting",
)
@@ -1218,12 +1218,15 @@ class ApplyMaskToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
title="Add Image Noise",
tags=["image", "noise"],
category="image",
version="1.0.1",
version="1.1.0",
)
class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Add noise to an image"""
image: ImageField = InputField(description="The image to add noise to")
mask: Optional[ImageField] = InputField(
default=None, description="Optional mask determining where to apply noise (black=noise, white=no noise)"
)
seed: int = InputField(
default=0,
ge=0,
@@ -1267,12 +1270,27 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
noise = Image.fromarray(noise.astype(numpy.uint8), mode="RGB").resize(
(image.width, image.height), Image.Resampling.NEAREST
)
# Create a noisy version of the input image
noisy_image = Image.blend(image.convert("RGB"), noise, self.amount).convert("RGBA")
# Paste back the alpha channel
noisy_image.putalpha(alpha)
# Apply mask if provided
if self.mask is not None:
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
image_dto = context.images.save(image=noisy_image)
if mask_image.size != image.size:
mask_image = mask_image.resize(image.size, Image.Resampling.LANCZOS)
result_image = image.copy()
mask_image = ImageOps.invert(mask_image)
result_image.paste(noisy_image, (0, 0), mask=mask_image)
else:
result_image = noisy_image
# Paste back the alpha channel from the original image
result_image.putalpha(alpha)
image_dto = context.images.save(image=result_image)
return ImageOutput.build(image_dto)

View File

@@ -31,6 +31,7 @@ class IPAdapterField(BaseModel):
image_encoder_model: ModelIdentifierField = Field(description="The name of the CLIP image encoder model.")
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the IP-Adapter.")
target_blocks: List[str] = Field(default=[], description="The IP Adapter blocks to apply")
method: str = Field(default="full", description="Weight apply method")
begin_step_percent: float = Field(
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
)
@@ -94,7 +95,7 @@ class IPAdapterInvocation(BaseInvocation):
weight: Union[float, List[float]] = InputField(
default=1, description="The weight given to the IP-Adapter", title="Weight"
)
method: Literal["full", "style", "composition"] = InputField(
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = InputField(
default="full", description="The method to apply the IP-Adapter"
)
begin_step_percent: float = InputField(
@@ -147,6 +148,38 @@ class IPAdapterInvocation(BaseInvocation):
target_blocks = ["down_blocks.2.attentions.1"]
else:
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
elif self.method == "style_precise":
if ip_adapter_info.base == "sd-1":
target_blocks = ["up_blocks.1", "down_blocks.2", "mid_block"]
elif ip_adapter_info.base == "sdxl":
target_blocks = ["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"]
else:
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
elif self.method == "style_strong":
if ip_adapter_info.base == "sd-1":
target_blocks = ["up_blocks.0", "up_blocks.1", "up_blocks.2", "down_blocks.0", "down_blocks.1"]
elif ip_adapter_info.base == "sdxl":
target_blocks = [
"up_blocks.0.attentions.1",
"up_blocks.1.attentions.1",
"up_blocks.2.attentions.1",
"up_blocks.0.attentions.2",
"up_blocks.1.attentions.2",
"up_blocks.2.attentions.2",
"up_blocks.0.attentions.0",
"up_blocks.1.attentions.0",
"up_blocks.2.attentions.0",
"down_blocks.0.attentions.0",
"down_blocks.0.attentions.1",
"down_blocks.0.attentions.2",
"down_blocks.1.attentions.0",
"down_blocks.1.attentions.1",
"down_blocks.1.attentions.2",
"down_blocks.2.attentions.0",
"down_blocks.2.attentions.2",
]
else:
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
elif self.method == "full":
target_blocks = ["block"]
else:
@@ -162,6 +195,7 @@ class IPAdapterInvocation(BaseInvocation):
begin_step_percent=self.begin_step_percent,
end_step_percent=self.end_step_percent,
mask=self.mask,
method=self.method,
),
)

View File

@@ -42,7 +42,9 @@ class IPAdapterMetadataField(BaseModel):
image: ImageField = Field(description="The IP-Adapter image prompt.")
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
clip_vision_model: Literal["ViT-L", "ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = Field(
description="Method to apply IP Weights with"
)
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")

View File

@@ -6,7 +6,7 @@ import numpy as np
import torch
from PIL import Image
from pydantic import BaseModel, Field
from transformers import AutoModelForMaskGeneration, AutoProcessor
from transformers import AutoProcessor
from transformers.models.sam import SamModel
from transformers.models.sam.processing_sam import SamProcessor
@@ -104,14 +104,13 @@ class SegmentAnythingInvocation(BaseInvocation):
@staticmethod
def _load_sam_model(model_path: Path):
sam_model = AutoModelForMaskGeneration.from_pretrained(
sam_model = SamModel.from_pretrained(
model_path,
local_files_only=True,
# TODO(ryand): Setting the torch_dtype here doesn't work. Investigate whether fp16 is supported by the
# model, and figure out how to make it work in the pipeline.
# torch_dtype=TorchDevice.choose_torch_dtype(),
)
assert isinstance(sam_model, SamModel)
sam_processor = AutoProcessor.from_pretrained(model_path, local_files_only=True)
assert isinstance(sam_processor, SamProcessor)

View File

@@ -1,12 +1,3 @@
import uvicorn
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
def get_app():
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
@@ -18,9 +9,18 @@ def get_app():
def run_app() -> None:
"""The main entrypoint for the app."""
# Parse the CLI arguments.
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
# Parse the CLI arguments before doing anything else, which ensures CLI args correctly override settings from other
# sources like `invokeai.yaml` or env vars.
InvokeAIArgs.parse_args()
import uvicorn
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
from invokeai.backend.util.logging import InvokeAILogger
# Load config.
app_config = get_config()
@@ -32,6 +32,8 @@ def run_app() -> None:
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
# This import must happen after configure_torch_cuda_allocator() is called, because the module imports torch.
from invokeai.app.invocations.baseinvocation import InvocationRegistry
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
from invokeai.backend.util.devices import TorchDevice
torch_device_name = TorchDevice.get_torch_device_name()
@@ -66,6 +68,15 @@ def run_app() -> None:
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
# Check all invocations and ensure their outputs are registered.
for invocation in InvocationRegistry.get_invocation_classes():
invocation_type = invocation.get_type()
output_annotation = invocation.get_output_annotation()
if output_annotation not in InvocationRegistry.get_output_classes():
logger.warning(
f'Invocation "{invocation_type}" has unregistered output class "{output_annotation.__name__}"'
)
if app_config.dev_reload:
# load_custom_nodes seems to bypass jurrigged's import sniffer, so be sure to call it *after* they're already
# imported.

View File

@@ -98,9 +98,18 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
# Handle board_id filter
if board_id == "none":
stmt += """--sql
AND board_images.board_id IS NULL
"""
else:
stmt += """--sql
AND board_images.board_id = ?
"""
params.append(board_id)
params.append(board_id)
# Add the category filter
if categories is not None:

View File

@@ -27,6 +27,10 @@ if TYPE_CHECKING:
from invokeai.app.services.invocation_stats.invocation_stats_base import InvocationStatsServiceBase
from invokeai.app.services.model_images.model_images_base import ModelImageFileStorageBase
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
ModelRelationshipRecordStorageBase,
)
from invokeai.app.services.model_relationships.model_relationships_base import ModelRelationshipsServiceABC
from invokeai.app.services.names.names_base import NameServiceBase
from invokeai.app.services.session_processor.session_processor_base import SessionProcessorBase
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
@@ -54,6 +58,8 @@ class InvocationServices:
logger: "Logger",
model_images: "ModelImageFileStorageBase",
model_manager: "ModelManagerServiceBase",
model_relationships: "ModelRelationshipsServiceABC",
model_relationship_records: "ModelRelationshipRecordStorageBase",
download_queue: "DownloadQueueServiceBase",
performance_statistics: "InvocationStatsServiceBase",
session_queue: "SessionQueueBase",
@@ -81,6 +87,8 @@ class InvocationServices:
self.logger = logger
self.model_images = model_images
self.model_manager = model_manager
self.model_relationships = model_relationships
self.model_relationship_records = model_relationship_records
self.download_queue = download_queue
self.performance_statistics = performance_statistics
self.session_queue = session_queue

View File

@@ -60,7 +60,7 @@ class InvocationStatsServiceBase(ABC):
pass
@abstractmethod
def reset_stats(self):
def reset_stats(self, graph_execution_state_id: str) -> None:
"""Reset all stored statistics."""
pass

View File

@@ -73,9 +73,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
)
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
def reset_stats(self):
self._stats = {}
self._cache_stats = {}
def reset_stats(self, graph_execution_state_id: str) -> None:
self._stats.pop(graph_execution_state_id, None)
self._cache_stats.pop(graph_execution_state_id, None)
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)

View File

@@ -38,6 +38,7 @@ from invokeai.backend.model_manager.config import (
AnyModelConfig,
CheckpointConfigBase,
InvalidModelConfigException,
ModelConfigBase,
)
from invokeai.backend.model_manager.legacy_probe import ModelProbe
from invokeai.backend.model_manager.metadata import (
@@ -646,14 +647,18 @@ class ModelInstallService(ModelInstallServiceBase):
hash_algo = self._app_config.hashing_algorithm
fields = config.model_dump()
return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo)
# New model probe API is disabled pending resolution of issue caused by a change of the ordering of checks.
# See commit message for details.
# try:
# return ModelConfigBase.classify(model_path=model_path, hash_algo=hash_algo, **fields)
# except InvalidModelConfigException:
# return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo) # type: ignore
# WARNING!
# The legacy probe relies on the implicit order of tests to determine model classification.
# This can lead to regressions between the legacy and new probes.
# Do NOT change the order of `probe` and `classify` without implementing one of the following fixes:
# Short-term fix: `classify` tests `matches` in the same order as the legacy probe.
# Long-term fix: Improve `matches` to be more specific so that only one config matches
# any given model - eliminating ambiguity and removing reliance on order.
# After implementing either of these fixes, remove @pytest.mark.xfail from `test_regression_against_model_probe`
try:
return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo) # type: ignore
except InvalidModelConfigException:
return ModelConfigBase.classify(model_path, hash_algo, **fields)
def _register(
self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None

View File

@@ -0,0 +1,25 @@
from abc import ABC, abstractmethod
class ModelRelationshipRecordStorageBase(ABC):
"""Abstract base class for model-to-model relationship record storage."""
@abstractmethod
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
"""Creates a relationship between two models by keys."""
pass
@abstractmethod
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
"""Removes a relationship between two models by keys."""
pass
@abstractmethod
def get_related_model_keys(self, model_key: str) -> list[str]:
"""Gets all models keys related to a given model key."""
pass
@abstractmethod
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
"""Get related model keys for multiple models given a list of keys."""
pass

View File

@@ -0,0 +1,66 @@
import sqlite3
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
ModelRelationshipRecordStorageBase,
)
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteModelRelationshipRecordStorage(ModelRelationshipRecordStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
if model_key_1 == model_key_2:
raise ValueError("Cannot relate a model to itself.")
a, b = sorted([model_key_1, model_key_2])
try:
cursor = self._conn.cursor()
cursor.execute(
"INSERT OR IGNORE INTO model_relationships (model_key_1, model_key_2) VALUES (?, ?)",
(a, b),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise e
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
a, b = sorted([model_key_1, model_key_2])
try:
cursor = self._conn.cursor()
cursor.execute(
"DELETE FROM model_relationships WHERE model_key_1 = ? AND model_key_2 = ?",
(a, b),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise e
def get_related_model_keys(self, model_key: str) -> list[str]:
cursor = self._conn.cursor()
cursor.execute(
"""
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
UNION
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
""",
(model_key, model_key),
)
return [row[0] for row in cursor.fetchall()]
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
cursor = self._conn.cursor()
key_list = ",".join("?" for _ in model_keys)
cursor.execute(
f"""
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
UNION
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
""",
model_keys + model_keys,
)
return [row[0] for row in cursor.fetchall()]

View File

@@ -0,0 +1,25 @@
from abc import ABC, abstractmethod
class ModelRelationshipsServiceABC(ABC):
"""High-level service for managing model-to-model relationships."""
@abstractmethod
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
"""Creates a relationship between two models keys."""
pass
@abstractmethod
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
"""Removes a relationship between two models keys."""
pass
@abstractmethod
def get_related_model_keys(self, model_key: str) -> list[str]:
"""Gets all models keys related to a given model key."""
pass
@abstractmethod
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
"""Get related model keys for multiple models."""
pass

View File

@@ -0,0 +1,9 @@
from datetime import datetime
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
class ModelRelationship(BaseModelExcludeNull):
model_key_1: str
model_key_2: str
created_at: datetime

View File

@@ -0,0 +1,31 @@
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_relationships.model_relationships_base import ModelRelationshipsServiceABC
from invokeai.backend.model_manager.config import AnyModelConfig
class ModelRelationshipsService(ModelRelationshipsServiceABC):
__invoker: Invoker
def start(self, invoker: Invoker) -> None:
self.__invoker = invoker
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
self.__invoker.services.model_relationship_records.add_model_relationship(model_key_1, model_key_2)
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
self.__invoker.services.model_relationship_records.remove_model_relationship(model_key_1, model_key_2)
def get_related_model_keys(self, model_key: str) -> list[str]:
return self.__invoker.services.model_relationship_records.get_related_model_keys(model_key)
def add_relationship_from_models(self, model_1: AnyModelConfig, model_2: AnyModelConfig) -> None:
self.add_model_relationship(model_1.key, model_2.key)
def remove_relationship_from_models(self, model_1: AnyModelConfig, model_2: AnyModelConfig) -> None:
self.remove_model_relationship(model_1.key, model_2.key)
def get_related_keys_from_model(self, model: AnyModelConfig) -> list[str]:
return self.get_related_model_keys(model.key)
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
return self.__invoker.services.model_relationship_records.get_related_model_keys_batch(model_keys)

View File

@@ -210,7 +210,7 @@ class DefaultSessionRunner(SessionRunnerBase):
# we don't care about that - suppress the error.
with suppress(GESStatsNotFoundError):
self._services.performance_statistics.log_stats(queue_item.session.id)
self._services.performance_statistics.reset_stats()
self._services.performance_statistics.reset_stats(queue_item.session.id)
for callback in self._on_after_run_session_callbacks:
callback(queue_item=queue_item)

View File

@@ -148,7 +148,7 @@ class Batch(BaseModel):
node = cast(BaseInvocation, graph.get_node(batch_data.node_path))
except NodeNotFoundError:
raise NodeNotFoundError(f"Node {batch_data.node_path} not found in graph")
if batch_data.field_name not in node.model_fields:
if batch_data.field_name not in type(node).model_fields:
raise NodeNotFoundError(f"Field {batch_data.field_name} not found in node {batch_data.node_path}")
return values
@@ -205,6 +205,7 @@ class FieldIdentifier(BaseModel):
kind: Literal["input", "output"] = Field(description="The kind of field")
node_id: str = Field(description="The ID of the node")
field_name: str = Field(description="The name of the field")
user_label: str | None = Field(description="The user label of the field, if any")
class SessionQueueItemWithoutGraph(BaseModel):

View File

@@ -424,7 +424,7 @@ class Graph(BaseModel):
)
# input fields are on the node
if edge.destination.field not in destination_node.model_fields:
if edge.destination.field not in type(destination_node).model_fields:
raise NodeFieldNotFoundError(
f"Edge destination field {edge.destination.field} does not exist in node {edge.destination.node_id}"
)

View File

@@ -22,6 +22,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import build_migration_17
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_19 import build_migration_19
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_20 import build_migration_20
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -61,6 +62,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_17())
migrator.register_migration(build_migration_18())
migrator.register_migration(build_migration_19(app_config=config))
migrator.register_migration(build_migration_20())
migrator.run_migrations()
return db

View File

@@ -0,0 +1,37 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration20Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
cursor.execute(
"""
-- many-to-many relationship table for models
CREATE TABLE IF NOT EXISTS model_relationships (
-- model_key_1 and model_key_2 are the same as the key(primary key) in the models table
model_key_1 TEXT NOT NULL,
model_key_2 TEXT NOT NULL,
created_at TEXT DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
PRIMARY KEY (model_key_1, model_key_2),
-- model_key_1 < model_key_2, to ensure uniqueness and prevent duplicates
FOREIGN KEY (model_key_1) REFERENCES models(id) ON DELETE CASCADE,
FOREIGN KEY (model_key_2) REFERENCES models(id) ON DELETE CASCADE
);
"""
)
cursor.execute(
"""
-- Creates an index to keep performance equal when searching for model_key_1 or model_key_2
CREATE INDEX IF NOT EXISTS keyx_model_relationships_model_key_2
ON model_relationships(model_key_2)
"""
)
def build_migration_20() -> Migration:
return Migration(
from_version=19,
to_version=20,
callback=Migration20Callback(),
)

View File

@@ -230,6 +230,86 @@ def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.
return resized
# precompute common kernels
_KERNEL3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# directional masks for NMS
_DIRS = [
np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], np.uint8),
np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], np.uint8),
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], np.uint8),
np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], np.uint8),
]
def heuristic_resize_fast(np_img: np.ndarray, size: tuple[int, int]) -> np.ndarray:
h, w = np_img.shape[:2]
# early exit
if (w, h) == size:
return np_img
# separate alpha channel
img = np_img
alpha = None
if img.ndim == 3 and img.shape[2] == 4:
alpha, img = img[:, :, 3], img[:, :, :3]
# build small sample for uniquecolor & binary detection
flat = img.reshape(-1, img.shape[-1])
N = flat.shape[0]
# include four corners to avoid missing extreme values
corners = np.vstack([img[0, 0], img[0, w - 1], img[h - 1, 0], img[h - 1, w - 1]])
cnt = min(N, 100_000)
samp = np.vstack([corners, flat[np.random.choice(N, cnt, replace=False)]])
uc = np.unique(samp, axis=0).shape[0]
vmin, vmax = samp.min(), samp.max()
# detect binary edge map & onepixeledge case
is_binary = uc == 2 and vmin < 16 and vmax > 240
one_pixel_edge = False
if is_binary:
# single gray conversion
gray0 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grad = cv2.morphologyEx(gray0, cv2.MORPH_GRADIENT, _KERNEL3)
cnt_edge = cv2.countNonZero(grad)
cnt_all = cv2.countNonZero((gray0 > 127).astype(np.uint8))
one_pixel_edge = (2 * cnt_edge) > cnt_all
# choose interp for color/seg/grayscale
area_new, area_old = size[0] * size[1], w * h
if 2 < uc < 200: # segmentation map
interp = cv2.INTER_NEAREST
elif area_new < area_old:
interp = cv2.INTER_AREA
else:
interp = cv2.INTER_CUBIC
# single resize pass on RGB
resized = cv2.resize(img, size, interpolation=interp)
if is_binary:
# convert to gray & apply NMS via C++ dilate
gray_r = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
nms = np.zeros_like(gray_r)
for K in _DIRS:
d = cv2.dilate(gray_r, K)
mask = d == gray_r
nms[mask] = gray_r[mask]
# threshold + thinning if needed
_, bw = cv2.threshold(nms, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
out_bin = cv2.ximgproc.thinning(bw) if one_pixel_edge else bw
# restore 3 channels
resized = np.stack([out_bin] * 3, axis=2)
# restore alpha with same interp as RGB for consistency
if alpha is not None:
am = cv2.resize(alpha, size, interpolation=interp)
am = (am > 127).astype(np.uint8) * 255
resized = np.dstack((resized, am))
return resized
###########################################################################
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
# modified for InvokeAI
@@ -244,7 +324,7 @@ def np_img_resize(
np_img = normalize_image_channel_count(np_img)
if resize_mode == "just_resize": # RESIZE
np_img = heuristic_resize(np_img, (w, h))
np_img = heuristic_resize_fast(np_img, (w, h))
np_img = clone_contiguous(np_img)
return np_img_to_torch(np_img, device), np_img
@@ -265,7 +345,7 @@ def np_img_resize(
# Inpaint hijack
high_quality_border_color[3] = 255
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
new_h, new_w, _ = np_img.shape
pad_h = max(0, (h - new_h) // 2)
pad_w = max(0, (w - new_w) // 2)
@@ -275,7 +355,7 @@ def np_img_resize(
return np_img_to_torch(np_img, device), np_img
else: # resize_mode == "crop_resize" (INNER_FIT)
k = max(k0, k1)
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
new_h, new_w, _ = np_img.shape
pad_h = max(0, (new_h - h) // 2)
pad_w = max(0, (new_w - w) // 2)

View File

@@ -12,6 +12,9 @@ from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFie
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.services.events.events_common import EventBase
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None:

View File

@@ -146,33 +146,35 @@ class ModelConfigBase(ABC, BaseModel):
)
usage_info: Optional[str] = Field(default=None, description="Usage information for this model")
_USING_LEGACY_PROBE: ClassVar[set] = set()
_USING_CLASSIFY_API: ClassVar[set] = set()
USING_LEGACY_PROBE: ClassVar[set] = set()
USING_CLASSIFY_API: ClassVar[set] = set()
_MATCH_SPEED: ClassVar[MatchSpeed] = MatchSpeed.MED
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if issubclass(cls, LegacyProbeMixin):
ModelConfigBase._USING_LEGACY_PROBE.add(cls)
ModelConfigBase.USING_LEGACY_PROBE.add(cls)
else:
ModelConfigBase._USING_CLASSIFY_API.add(cls)
ModelConfigBase.USING_CLASSIFY_API.add(cls)
@staticmethod
def all_config_classes():
subclasses = ModelConfigBase._USING_LEGACY_PROBE | ModelConfigBase._USING_CLASSIFY_API
subclasses = ModelConfigBase.USING_LEGACY_PROBE | ModelConfigBase.USING_CLASSIFY_API
concrete = {cls for cls in subclasses if not isabstract(cls)}
return concrete
@staticmethod
def classify(model_path: Path, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides):
def classify(mod: str | Path | ModelOnDisk, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides):
"""
Returns the best matching ModelConfig instance from a model's file/folder path.
Raises InvalidModelConfigException if no valid configuration is found.
Created to deprecate ModelProbe.probe
"""
candidates = ModelConfigBase._USING_CLASSIFY_API
if isinstance(mod, Path | str):
mod = ModelOnDisk(mod, hash_algo)
candidates = ModelConfigBase.USING_CLASSIFY_API
sorted_by_match_speed = sorted(candidates, key=lambda cls: (cls._MATCH_SPEED, cls.__name__))
mod = ModelOnDisk(model_path, hash_algo)
for config_cls in sorted_by_match_speed:
try:

View File

@@ -2,6 +2,8 @@ from typing import Any
import torch
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
class CachedModelOnlyFullLoad:
"""A wrapper around a PyTorch model to handle full loads and unloads between the CPU and the compute device.
@@ -76,7 +78,15 @@ class CachedModelOnlyFullLoad:
for k, v in self._cpu_state_dict.items():
new_state_dict[k] = v.to(self._compute_device, copy=True)
self._model.load_state_dict(new_state_dict, assign=True)
self._model.to(self._compute_device)
check_for_gguf = hasattr(self._model, "state_dict") and self._model.state_dict().get("img_in.weight")
if isinstance(check_for_gguf, GGMLTensor):
old_value = torch.__future__.get_overwrite_module_params_on_conversion()
torch.__future__.set_overwrite_module_params_on_conversion(True)
self._model.to(self._compute_device)
torch.__future__.set_overwrite_module_params_on_conversion(old_value)
else:
self._model.to(self._compute_device)
self._is_in_vram = True
return self._total_bytes
@@ -92,7 +102,15 @@ class CachedModelOnlyFullLoad:
if self._cpu_state_dict is not None:
self._model.load_state_dict(self._cpu_state_dict, assign=True)
self._model.to(self._offload_device)
check_for_gguf = hasattr(self._model, "state_dict") and self._model.state_dict().get("img_in.weight")
if isinstance(check_for_gguf, GGMLTensor):
old_value = torch.__future__.get_overwrite_module_params_on_conversion()
torch.__future__.set_overwrite_module_params_on_conversion(True)
self._model.to(self._offload_device)
torch.__future__.set_overwrite_module_params_on_conversion(old_value)
else:
self._model.to(self._offload_device)
self._is_in_vram = False
return self._total_bytes

View File

@@ -2,9 +2,10 @@ import gc
import logging
import threading
import time
from dataclasses import dataclass
from functools import wraps
from logging import Logger
from typing import Any, Callable, Dict, List, Optional
from typing import Any, Callable, Dict, List, Optional, Protocol
import psutil
import torch
@@ -54,6 +55,39 @@ def synchronized(method: Callable[..., Any]) -> Callable[..., Any]:
return wrapper
@dataclass
class CacheEntrySnapshot:
cache_key: str
total_bytes: int
current_vram_bytes: int
class CacheMissCallback(Protocol):
def __call__(
self,
model_key: str,
cache_snapshot: dict[str, CacheEntrySnapshot],
) -> None: ...
class CacheHitCallback(Protocol):
def __call__(
self,
model_key: str,
cache_snapshot: dict[str, CacheEntrySnapshot],
) -> None: ...
class CacheModelsClearedCallback(Protocol):
def __call__(
self,
models_cleared: int,
bytes_requested: int,
bytes_freed: int,
cache_snapshot: dict[str, CacheEntrySnapshot],
) -> None: ...
class ModelCache:
"""A cache for managing models in memory.
@@ -144,6 +178,34 @@ class ModelCache:
# - Requests to empty the cache from a separate thread
self._lock = threading.RLock()
self._on_cache_hit_callbacks: set[CacheHitCallback] = set()
self._on_cache_miss_callbacks: set[CacheMissCallback] = set()
self._on_cache_models_cleared_callbacks: set[CacheModelsClearedCallback] = set()
def on_cache_hit(self, cb: CacheHitCallback) -> Callable[[], None]:
self._on_cache_hit_callbacks.add(cb)
def unsubscribe() -> None:
self._on_cache_hit_callbacks.discard(cb)
return unsubscribe
def on_cache_miss(self, cb: CacheHitCallback) -> Callable[[], None]:
self._on_cache_miss_callbacks.add(cb)
def unsubscribe() -> None:
self._on_cache_miss_callbacks.discard(cb)
return unsubscribe
def on_cache_models_cleared(self, cb: CacheModelsClearedCallback) -> Callable[[], None]:
self._on_cache_models_cleared_callbacks.add(cb)
def unsubscribe() -> None:
self._on_cache_models_cleared_callbacks.discard(cb)
return unsubscribe
@property
@synchronized
def stats(self) -> Optional[CacheStats]:
@@ -195,6 +257,20 @@ class ModelCache:
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size / MB:.2f}MB)"
)
@synchronized
def _get_cache_snapshot(self) -> dict[str, CacheEntrySnapshot]:
overview: dict[str, CacheEntrySnapshot] = {}
for cache_key, cache_entry in self._cached_models.items():
total_bytes = cache_entry.cached_model.total_bytes()
current_vram_bytes = cache_entry.cached_model.cur_vram_bytes()
overview[cache_key] = CacheEntrySnapshot(
cache_key=cache_key,
total_bytes=total_bytes,
current_vram_bytes=current_vram_bytes,
)
return overview
@synchronized
def get(self, key: str, stats_name: Optional[str] = None) -> CacheRecord:
"""Retrieve a model from the cache.
@@ -208,6 +284,8 @@ class ModelCache:
if self.stats:
self.stats.hits += 1
else:
for cb in self._on_cache_miss_callbacks:
cb(model_key=key, cache_snapshot=self._get_cache_snapshot())
if self.stats:
self.stats.misses += 1
self._logger.debug(f"Cache miss: {key}")
@@ -229,6 +307,8 @@ class ModelCache:
self._cache_stack.append(key)
self._logger.debug(f"Cache hit: {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
for cb in self._on_cache_hit_callbacks:
cb(model_key=key, cache_snapshot=self._get_cache_snapshot())
return cache_entry
@synchronized
@@ -649,6 +729,13 @@ class ModelCache:
# immediately when their reference count hits 0.
if self.stats:
self.stats.cleared = models_cleared
for cb in self._on_cache_models_cleared_callbacks:
cb(
models_cleared=models_cleared,
bytes_requested=bytes_needed,
bytes_freed=ram_bytes_freed,
cache_snapshot=self._get_cache_snapshot(),
)
gc.collect()
TorchDevice.empty_cache()

View File

@@ -62,11 +62,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
# If this too fails, raise exception.
model_info = None
# Handling for our special syntax - we only want the base HF `org/repo` here.
repo_id = id.split("::")[0] or id
while not model_info:
try:
model_info = HfApi().model_info(repo_id=id, files_metadata=True, revision=variant)
model_info = HfApi().model_info(repo_id=repo_id, files_metadata=True, revision=variant)
except RepositoryNotFoundError as excp:
raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp
raise UnknownMetadataException(f"'{repo_id}' not found. See trace for details.") from excp
except RevisionNotFoundError:
if variant is None:
raise
@@ -75,14 +78,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
files: list[RemoteModelFile] = []
_, name = id.split("/")
_, name = repo_id.split("/")
for s in model_info.siblings or []:
assert s.rfilename is not None
assert s.size is not None
files.append(
RemoteModelFile(
url=hf_hub_url(id, s.rfilename, revision=variant or "main"),
url=hf_hub_url(repo_id, s.rfilename, revision=variant or "main"),
path=Path(name, s.rfilename),
size=s.size,
sha256=s.lfs.get("sha256") if s.lfs else None,

View File

@@ -4,6 +4,7 @@ from typing import Any, Optional, TypeAlias
import safetensors.torch
import torch
from picklescan.scanner import scan_file_path
from safetensors import safe_open
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
@@ -35,12 +36,21 @@ class ModelOnDisk:
return self.path.stat().st_size
return sum(file.stat().st_size for file in self.path.rglob("*"))
def component_paths(self) -> set[Path]:
def weight_files(self) -> set[Path]:
if self.path.is_file():
return {self.path}
extensions = {".safetensors", ".pt", ".pth", ".ckpt", ".bin", ".gguf"}
return {f for f in self.path.rglob("*") if f.suffix in extensions}
def metadata(self, path: Optional[Path] = None) -> dict[str, str]:
try:
with safe_open(self.path, framework="pt", device="cpu") as f:
metadata = f.metadata()
assert isinstance(metadata, dict)
return metadata
except Exception:
return {}
def repo_variant(self) -> Optional[ModelRepoVariant]:
if self.path.is_file():
return None
@@ -64,18 +74,7 @@ class ModelOnDisk:
if path in sd_cache:
return sd_cache[path]
if not path:
components = list(self.component_paths())
match components:
case []:
raise ValueError("No weight files found for this model")
case [p]:
path = p
case ps if len(ps) >= 2:
raise ValueError(
f"Multiple weight files found for this model: {ps}. "
f"Please specify the intended file using the 'path' argument"
)
path = self.resolve_weight_file(path)
with SilenceWarnings():
if path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
@@ -94,3 +93,18 @@ class ModelOnDisk:
state_dict = checkpoint.get("state_dict", checkpoint)
sd_cache[path] = state_dict
return state_dict
def resolve_weight_file(self, path: Optional[Path] = None) -> Path:
if not path:
weight_files = list(self.weight_files())
match weight_files:
case []:
raise ValueError("No weight files found for this model")
case [p]:
return p
case ps if len(ps) >= 2:
raise ValueError(
f"Multiple weight files found for this model: {ps}. "
f"Please specify the intended file using the 'path' argument"
)
return path

View File

@@ -27,7 +27,9 @@ class BaseModelType(str, Enum):
Flux = "flux"
CogView4 = "cogview4"
Imagen3 = "imagen3"
Imagen4 = "imagen4"
ChatGPT4o = "chatgpt-4o"
FluxKontext = "flux-kontext"
class ModelType(str, Enum):

View File

@@ -1,3 +1,4 @@
import re
from contextlib import contextmanager
from typing import Dict, Iterable, Optional, Tuple
@@ -7,6 +8,7 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
from invokeai.backend.util import InvokeAILogger
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
@@ -23,6 +25,7 @@ class LayerPatcher:
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
force_direct_patching: bool = False,
force_sidecar_patching: bool = False,
suppress_warning_layers: Optional[re.Pattern] = None,
):
"""Apply 'smart' model patching that chooses whether to use direct patching or a sidecar wrapper for each
module.
@@ -44,6 +47,7 @@ class LayerPatcher:
dtype=dtype,
force_direct_patching=force_direct_patching,
force_sidecar_patching=force_sidecar_patching,
suppress_warning_layers=suppress_warning_layers,
)
yield
@@ -70,6 +74,7 @@ class LayerPatcher:
dtype: torch.dtype,
force_direct_patching: bool,
force_sidecar_patching: bool,
suppress_warning_layers: Optional[re.Pattern] = None,
):
"""Apply a single LoRA patch to a model using the 'smart' patching strategy that chooses whether to use direct
patching or a sidecar wrapper for each module.
@@ -89,9 +94,17 @@ class LayerPatcher:
if not layer_key.startswith(prefix):
continue
module_key, module = LayerPatcher._get_submodule(
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
)
try:
module_key, module = LayerPatcher._get_submodule(
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
)
except AttributeError:
if suppress_warning_layers and suppress_warning_layers.search(layer_key):
pass
else:
logger = InvokeAILogger.get_logger(LayerPatcher.__name__)
logger.warning("Failed to find module for LoRA layer key: %s", layer_key)
continue
# Decide whether to use direct patching or a sidecar patch.
# Direct patching is preferred, because it results in better runtime speed.

View File

@@ -5,7 +5,8 @@ from typing import Callable, Optional, Union
import gguf
import torch
TORCH_COMPATIBLE_QTYPES = {None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16}
# should not be a Set until this is resolved: https://github.com/pytorch/pytorch/issues/145761
TORCH_COMPATIBLE_QTYPES = [None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16]
# K Quants #
QK_K = 256

View File

@@ -30,18 +30,13 @@ class RectifiedFlowInpaintExtension:
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
# threshold worked well.
# 1.0. This helps to produce more coherent seams around the inpainted region.
# We use a small epsilon to avoid any potential issues with floating point precision.
eps = 1e-4
mask_gradient_t_cutoff = 0.5
if t_prev > mask_gradient_t_cutoff:
# Early in the denoising process, use the inpaint mask as-is.
return self._inpaint_mask
else:
# After the cut-off, promote all non-zero mask values to 1.0.
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
mask = torch.where(self._inpaint_mask >= t_prev + eps, 1.0, 0.0).to(
dtype=self._inpaint_mask.dtype, device=self._inpaint_mask.device
)
return mask

View File

@@ -371,7 +371,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if use_ip_adapter or use_regional_prompting:
ip_adapters: Optional[List[UNetIPAdapterData]] = (
[{"ip_adapter": ipa.ip_adapter_model, "target_blocks": ipa.target_blocks} for ipa in ip_adapter_data]
[
{"ip_adapter": ipa.ip_adapter_model, "target_blocks": ipa.target_blocks, "method": ipa.method}
for ipa in ip_adapter_data
]
if use_ip_adapter
else None
)

View File

@@ -1,7 +1,7 @@
from __future__ import annotations
import math
from dataclasses import dataclass
from dataclasses import dataclass, field
from enum import Enum
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
@@ -104,15 +104,29 @@ class IPAdapterConditioningInfo:
@dataclass
class IPAdapterData:
"""Data class for IP-Adapter configuration.
Attributes:
ip_adapter_model: The IP-Adapter model to use.
ip_adapter_conditioning: The IP-Adapter conditioning data.
mask: The mask to apply to the IP-Adapter conditioning.
target_blocks: List of target attention block names to apply IP-Adapter to.
negative_blocks: List of target attention block names that should use negative attention.
weight: The weight to apply to the IP-Adapter conditioning.
begin_step_percent: The percentage of steps at which to start applying the IP-Adapter.
end_step_percent: The percentage of steps at which to stop applying the IP-Adapter.
method: The method to use for applying the IP-Adapter ('full', 'style', 'composition').
"""
ip_adapter_model: IPAdapter
ip_adapter_conditioning: IPAdapterConditioningInfo
mask: torch.Tensor
target_blocks: List[str]
# Either a single weight applied to all steps, or a list of weights for each step.
negative_blocks: List[str] = field(default_factory=list)
weight: Union[float, List[float]] = 1.0
begin_step_percent: float = 0.0
end_step_percent: float = 1.0
method: str = "full"
def scale_for_step(self, step_index: int, total_steps: int) -> float:
first_adapter_step = math.floor(self.begin_step_percent * total_steps)

View File

@@ -14,6 +14,7 @@ from invokeai.backend.stable_diffusion.diffusion.regional_prompt_data import Reg
class IPAdapterAttentionWeights:
ip_adapter_weights: IPAttentionProcessorWeights
skip: bool
negative: bool
class CustomAttnProcessor2_0(AttnProcessor2_0):
@@ -162,6 +163,10 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
# Expected ip_hidden_state shape: (batch_size, num_ip_images, ip_seq_len, ip_image_embedding)
if not self._ip_adapter_attention_weights[ipa_index].skip:
# apply the IP-Adapter weights to the negative embeds
if self._ip_adapter_attention_weights[ipa_index].negative:
ip_hidden_states = torch.cat([ip_hidden_states[1], ip_hidden_states[0] * 0], dim=0)
ip_key = ipa_weights.to_k_ip(ip_hidden_states)
ip_value = ipa_weights.to_v_ip(ip_hidden_states)

View File

@@ -12,7 +12,8 @@ from invokeai.backend.stable_diffusion.diffusion.custom_atttention import (
class UNetIPAdapterData(TypedDict):
ip_adapter: IPAdapter
target_blocks: List[str]
target_blocks: List[str] # Blocks where IP-Adapter should be applied
method: str # Style or other method type
class UNetAttentionPatcher:
@@ -39,12 +40,18 @@ class UNetAttentionPatcher:
for ip_adapter in self._ip_adapters:
ip_adapter_weights = ip_adapter["ip_adapter"].attn_weights.get_attention_processor_weights(idx)
skip = True
negative = False
for block in ip_adapter["target_blocks"]:
if block in name:
skip = False
negative = ip_adapter["method"] == "style_precise" and (
block == "down_blocks.2.attentions.1"
or block == "down_blocks.2"
or block == "mid_block"
)
break
ip_adapter_attention_weights: IPAdapterAttentionWeights = IPAdapterAttentionWeights(
ip_adapter_weights=ip_adapter_weights, skip=skip
ip_adapter_weights=ip_adapter_weights, skip=skip, negative=negative
)
ip_adapter_attention_weights_collection.append(ip_adapter_attention_weights)

View File

@@ -14,6 +14,8 @@ const config: KnipConfig = {
'src/features/controlLayers/konva/util.ts',
// TODO(psyche): restore HRF functionality?
'src/features/hrf/**',
// This feature is (temprarily?) disabled
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
],
ignoreBinaries: ['only-allow'],
paths: {

View File

@@ -24,15 +24,18 @@
"autoAddBoard": "Auto-Add Board",
"boards": "Boards",
"selectedForAutoAdd": "Selected for Auto-Add",
"bottomMessage": "Deleting this board and its images will reset any features currently using them.",
"bottomMessage": "Deleting images will reset any features currently using them.",
"cancel": "Cancel",
"changeBoard": "Change Board",
"clearSearch": "Clear Search",
"deleteBoard": "Delete Board",
"deleteBoardAndImages": "Delete Board and Images",
"deleteBoardOnly": "Delete Board Only",
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
"deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
"deletedBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
"deletedPrivateBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
"uncategorizedImages": "Uncategorized Images",
"deleteAllUncategorizedImages": "Delete All Uncategorized Images",
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
"hideBoards": "Hide Boards",
"loading": "Loading...",
"menuItemAutoAdd": "Auto-add to this Board",
@@ -46,7 +49,7 @@
"searchBoard": "Search Boards...",
"selectBoard": "Select a Board",
"shared": "Shared Boards",
"topMessage": "This board contains images used in the following features:",
"topMessage": "This selection contains images used in the following features:",
"unarchiveBoard": "Unarchive Board",
"uncategorized": "Uncategorized",
"viewBoards": "View Boards",
@@ -846,6 +849,8 @@
"predictionType": "Prediction Type",
"prune": "Prune",
"pruneTooltip": "Prune finished imports from queue",
"relatedModels": "Related Models",
"showOnlyRelatedModels": "Related",
"repo_id": "Repo ID",
"repoVariant": "Repo Variant",
"scanFolder": "Scan Folder",
@@ -1142,6 +1147,7 @@
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
"canvasIsFiltering": "Canvas is busy (filtering)",
"canvasIsTransforming": "Canvas is busy (transforming)",
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
@@ -1330,8 +1336,9 @@
"unableToCopyDesc": "Your browser does not support clipboard access. Firefox users may be able to fix this by following ",
"unableToCopyDesc_theseSteps": "these steps",
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.",
"imagen3IncompatibleGenerationMode": "Google Imagen3 supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
"imagenIncompatibleGenerationMode": "Google {{model}} supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supports Text to Image and Image to Image only. Use other models Inpainting and Outpainting tasks.",
"fluxKontextIncompatibleGenerationMode": "Flux Kontext supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
"problemUnpublishingWorkflow": "Problem Unpublishing Workflow",
"problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.",
"workflowUnpublished": "Workflow Unpublished"
@@ -1905,11 +1912,13 @@
"addPositivePrompt": "Add $t(controlLayers.prompt)",
"addNegativePrompt": "Add $t(controlLayers.negativePrompt)",
"addReferenceImage": "Add $t(controlLayers.referenceImage)",
"addImageNoise": "Add $t(controlLayers.imageNoise)",
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
"addControlLayer": "Add $t(controlLayers.controlLayer)",
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
"addGlobalReferenceImage": "Add $t(controlLayers.globalReferenceImage)",
"addDenoiseLimit": "Add $t(controlLayers.denoiseLimit)",
"rasterLayer": "Raster Layer",
"controlLayer": "Control Layer",
"inpaintMask": "Inpaint Mask",
@@ -2007,8 +2016,10 @@
"resetCanvasLayers": "Reset Canvas Layers",
"resetGenerationSettings": "Reset Generation Settings",
"replaceCurrent": "Replace Current",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
"imageNoise": "Image Noise",
"denoiseLimit": "Denoise Limit",
"warnings": {
"problemsFound": "Problems found",
"unsupportedModel": "layer not supported for selected base model",
@@ -2040,10 +2051,14 @@
"ipAdapterMethod": "Mode",
"full": "Style and Composition",
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
"style": "Style Only",
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
"style": "Style (Simple)",
"styleDesc": "Applies visual style (colors, textures) without considering its layout. Previously called Style Only.",
"composition": "Composition Only",
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
"compositionDesc": "Replicates layout & structure while ignoring the reference's style.",
"styleStrong": "Style (Strong)",
"styleStrongDesc": "Applies a strong visual style, with a slightly reduced composition influence.",
"stylePrecise": "Style (Precise)",
"stylePreciseDesc": "Applies a precise visual style, eliminating subject influence."
},
"fluxReduxImageInfluence": {
"imageInfluence": "Image Influence",
@@ -2413,8 +2428,8 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"CogView4: Support for CogView4 models in Canvas and Workflows.",
"Updated Dependencies: Invoke now runs on the latest version of its dependencies, including Python 3.12 and Pytorch 2.6.0."
"Inpainting: Per-mask noise levels and denoise limits.",
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -654,7 +654,9 @@
"modelPickerFallbackNoModelsInstalled": "Nessun modello installato.",
"modelPickerFallbackNoModelsInstalled2": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare i modelli.",
"manageModels": "Gestione modelli",
"hfTokenReset": "Ripristino del gettone HF"
"hfTokenReset": "Ripristino del gettone HF",
"relatedModels": "Modelli correlati",
"showOnlyRelatedModels": "Correlati"
},
"parameters": {
"images": "Immagini",
@@ -881,8 +883,8 @@
"problemUnpublishingWorkflow": "Problema durante l'annullamento della pubblicazione del flusso di lavoro",
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
"workflowUnpublished": "Flusso di lavoro non pubblicato",
"imagen3IncompatibleGenerationMode": "Google Imagen3 supporta solo la conversione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting."
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
},
"accessibility": {
"invokeProgressBar": "Barra di avanzamento generazione",
@@ -1084,11 +1086,11 @@
"menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
"cancel": "Annulla",
"addBoard": "Aggiungi Bacheca",
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
"bottomMessage": "L'eliminazione delle immagini reimposterà tutte le funzionalità che le stanno utilizzando.",
"changeBoard": "Cambia Bacheca",
"loading": "Caricamento in corso ...",
"clearSearch": "Cancella Ricerca",
"topMessage": "Questa bacheca contiene immagini utilizzate nelle seguenti funzionalità:",
"topMessage": "Questa selezione contiene immagini utilizzate nelle seguenti funzionalità:",
"move": "Sposta",
"myBoard": "Bacheca",
"searchBoard": "Cerca bacheche ...",
@@ -1099,7 +1101,7 @@
"deleteBoardOnly": "solo la Bacheca",
"deleteBoard": "Elimina Bacheca",
"deleteBoardAndImages": "Bacheca e Immagini",
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate nella bacheca \"Non categorizzato\".",
"deletedBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate in uno stato non categorizzato.",
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
"movingImagesToBoard_other": "Spostare {{count}} immagini nella bacheca:",
@@ -1121,8 +1123,11 @@
"noBoards": "Nessuna bacheca {{boardType}}",
"hideBoards": "Nascondi bacheche",
"viewBoards": "Visualizza bacheche",
"deletedPrivateBoardsCannotbeRestored": "Le bacheche cancellate non possono essere ripristinate. Selezionando 'Cancella solo bacheca', le immagini verranno spostate nella bacheca \"Non categorizzato\" privata dell'autore dell'immagine.",
"updateBoardError": "Errore durante l'aggiornamento della bacheca"
"deletedPrivateBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\", le immagini verranno spostate in uno stato privato e non categorizzato per l'autore dell'immagine.",
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
"uncategorizedImages": "Immagini non categorizzate",
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
},
"queue": {
"queueFront": "Aggiungi all'inizio della coda",
@@ -2003,12 +2008,16 @@
"stagingOnCanvas": "Genera immagini nella",
"ipAdapterMethod": {
"full": "Stile e Composizione",
"style": "Solo Stile",
"style": "Stile (semplice)",
"composition": "Solo Composizione",
"ipAdapterMethod": "Modalità",
"fullDesc": "Applica lo stile visivo (colori, texture) e la composizione (disposizione, struttura).",
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione.",
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento."
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione. Precedentemente chiamato \"Solo stile\".",
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento.",
"styleStrong": "Stile (forte)",
"styleStrongDesc": "Applica uno stile visivo forte, con un'influenza sulla composizione leggermente ridotta.",
"stylePrecise": "Stile (preciso)",
"stylePreciseDesc": "Applica uno stile visivo preciso, eliminando l'influenza del soggetto."
},
"showingType": "Mostra {{type}}",
"dynamicGrid": "Griglia dinamica",
@@ -2290,7 +2299,7 @@
"replaceCurrent": "Sostituisci corrente",
"mergeDown": "Unire in basso",
"mergingLayers": "Unione dei livelli",
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
"useImage": "Usa immagine",
"resetGenerationSettings": "Ripristina impostazioni di generazione",
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
@@ -2339,7 +2348,11 @@
"lowest": "Il più basso",
"medium": "Medio",
"highest": "La più alta"
}
},
"denoiseLimit": "Limite di riduzione del rumore",
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
"imageNoise": "Rumore dell'immagine"
},
"ui": {
"tabs": {
@@ -2439,8 +2452,8 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"CogView4: supporto per i modelli CogView4 in Tela e Flussi di lavoro.",
"Dipendenze aggiornate: Invoke ora funziona con l'ultima versione delle sue dipendenze, tra cui Python 3.12 e Pytorch 2.6.0."
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
]
},
"system": {

View File

@@ -392,7 +392,7 @@
"title": "全選択"
},
"addNode": {
"desc": "ノード追加メニューを開く",
"desc": "ノード追加メニューを開く",
"title": "ノードを追加"
},
"pasteSelectionWithEdges": {
@@ -652,7 +652,9 @@
"filterModels": "フィルターモデル",
"modelPickerFallbackNoModelsInstalled": "モデルがインストールされていません.",
"manageModels": "モデル管理",
"hfTokenReset": "ハギングフェイストークンリセット"
"hfTokenReset": "ハギングフェイストークンリセット",
"relatedModels": "関連のあるモデル",
"showOnlyRelatedModels": "関連している"
},
"parameters": {
"images": "画像",
@@ -872,8 +874,8 @@
"problemDeletingWorkflow": "ワークフローが削除された問題",
"imageNotLoadedDesc": "画像を見つけられません",
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
"imagen3IncompatibleGenerationMode": "Google Imagen3 はテキストから画像への生成のみをサポートしています.画像から画像,インペインティング,アウトペインティングタスクには他のモデルをご利用ください.",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください."
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングタスクには他のモデルを使用してください."
},
"accessibility": {
"invokeProgressBar": "進捗バー",
@@ -1154,11 +1156,11 @@
"unknownField": "不明なフィールド",
"unexpectedField_withName": "予期しないフィールド\"{{name}}\"",
"loadingTemplates": "読み込み中 {{name}}",
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします.",
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします",
"validateConnections": "接続とグラフを確認する",
"saveToGallery": "ギャラリーに保存",
"newWorkflowDesc": "新しいワークフローを作りますか?",
"unknownFieldType": "$t(nodes.unknownField)型:{type}}",
"unknownFieldType": "$t(nodes.unknownField)型: {{type}}",
"unsupportedArrayItemType": "サポートされていない配列項目型です \"{{type}}\"",
"unableToLoadWorkflow": "ワークフローが読み込めません",
"unableToValidateWorkflow": "ワークフローを確認できません",
@@ -1201,13 +1203,13 @@
"downloadBoard": "ボードをダウンロード",
"changeBoard": "ボードを変更",
"loading": "ロード中...",
"topMessage": "このボードには、以下の機能で使用されている画像が含まれています",
"bottomMessage": "このボードおよび画像を削除すると、現在これらを利用している機能はリセットされます。",
"topMessage": "この選択には、の機能で使用される画像が含まれています:",
"bottomMessage": "この画像を削除すると、現在利用している機能はリセットされます。",
"clearSearch": "検索をクリア",
"deleteBoard": "ボードの削除",
"deleteBoardAndImages": "ボードと画像の削除",
"deleteBoardOnly": "ボードのみ削除",
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
"deletedBoardsCannotbeRestored": "削除たボードと画像は復元できません。ボードのみ削除を選択すると画像は未分類の状態になります。",
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
"hideBoards": "ボードを隠す",
"assetsWithCount_other": "{{count}} のアセット",
@@ -1222,9 +1224,12 @@
"imagesWithCount_other": "{{count}} の画像",
"updateBoardError": "ボード更新エラー",
"selectedForAutoAdd": "自動追加に選択済み",
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
"deletedPrivateBoardsCannotbeRestored": "削除されたボードと画像は復元できません。ボードのみ削除を選択すると画像は作成者に対して非公開の未分類状態になります。",
"noBoards": "{{boardType}} ボードがありません",
"viewBoards": "ボードを表示"
"viewBoards": "ボードを表示",
"uncategorizedImages": "分類されていない画像",
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除",
"deletedImagesCannotBeRestored": "削除した画像は復元できません."
},
"invocationCache": {
"invocationCache": "呼び出しキャッシュ",
@@ -1247,7 +1252,8 @@
"paramRatio": {
"heading": "縦横比",
"paragraphs": [
"生成された画像の縦横比。"
"生成された画像の縦横比。",
"SD1.5 モデルの場合は 512x512 に相当する画像サイズ (ピクセル数) が推奨され, SDXL モデルの場合は 1024x1024 に相当するサイズが推奨されます."
]
},
"regionalGuidanceAndReferenceImage": {
@@ -1289,25 +1295,49 @@
]
},
"paramUpscaleMethod": {
"heading": "アップスケール手法"
"heading": "アップスケール手法",
"paragraphs": [
"高解像度修正のために画像を拡大するために使用される方法。"
]
},
"upscaleModel": {
"heading": "アップスケールモデル"
"heading": "アップスケールモデル",
"paragraphs": [
"アップスケールモデルは、ディテールを追加する前に画像を出力サイズに合わせて拡大縮小します。サポートされているアップスケールモデルであればどれでも使用できますが、写真や線画など、特定の種類の画像に特化したモデルもあります。"
]
},
"paramAspect": {
"heading": "縦横比"
"heading": "縦横比",
"paragraphs": [
"生成される画像のアスペクト比。比率を変更すると、幅と高さもそれに応じて更新されます。",
"「最適化」は、選択したモデルの幅と高さを最適な寸法に設定します。"
]
},
"refinerSteps": {
"heading": "ステップ"
"heading": "ステップ",
"paragraphs": [
"生成プロセスのリファイナー部分で実行されるステップの数。",
"生成ステップと似ています。"
]
},
"paramVAE": {
"heading": "VAE"
"heading": "VAE",
"paragraphs": [
"AI 出力を最終画像に変換するために使用されるモデル。"
]
},
"scale": {
"heading": "スケール"
"heading": "スケール",
"paragraphs": [
"スケールは出力画像のサイズを制御し、入力画像の解像度の倍数に基づいて決定されます。例えば、1024x1024の画像を2倍に拡大すると、2048x2048の出力が生成されます。"
]
},
"refinerScheduler": {
"heading": "スケジューラー"
"heading": "スケジューラー",
"paragraphs": [
"生成プロセスのリファイナー部分で使用されるスケジューラ。",
"生成スケジューラに似ています。"
]
},
"compositingCoherenceMode": {
"heading": "モード",
@@ -1316,13 +1346,23 @@
]
},
"paramModel": {
"heading": "モデル"
"heading": "モデル",
"paragraphs": [
"生成に使用されるモデル。異なるモデルは、異なる美的結果とコンテンツを生成するように特化するようにトレーニングされています。"
]
},
"paramHeight": {
"heading": "高さ"
"heading": "高さ",
"paragraphs": [
"生成される画像の高さ。8の倍数にする必要があります。"
]
},
"paramSteps": {
"heading": "ステップ"
"heading": "ステップ",
"paragraphs": [
"各生成で実行されるステップの数.",
"通常, ステップ数が多いほど, より高品質な画像が作成されますが生成時間も長くなります."
]
},
"ipAdapterMethod": {
"heading": "モード",
@@ -1331,10 +1371,18 @@
]
},
"paramSeed": {
"heading": "シード"
"heading": "シード",
"paragraphs": [
"生成に使用する始動ノイズを制御します.",
"同じ生成設定で同一の結果を生成するには, 「ランダム」オプションを無効にします."
]
},
"paramIterations": {
"heading": "生成回数"
"heading": "生成回数",
"paragraphs": [
"生成する画像の数。",
"動的プロンプトが有効になっている場合、各プロンプトはこの回数生成されます。"
]
},
"controlNet": {
"heading": "ControlNet",
@@ -1343,16 +1391,29 @@
]
},
"paramWidth": {
"heading": "幅"
"heading": "幅",
"paragraphs": [
"生成される画像の幅。8の倍数にする必要があります。"
]
},
"lora": {
"heading": "LoRA"
"heading": "LoRA",
"paragraphs": [
"ベースモデルと組み合わせて使用する軽量モデル."
]
},
"loraWeight": {
"heading": "重み"
"heading": "重み",
"paragraphs": [
"LoRA の重み. 重みを大きくすると, 最終的な画像への影響が大きくなります."
]
},
"patchmatchDownScaleSize": {
"heading": "Downscale"
"heading": "Downscale",
"paragraphs": [
"埋め込む前にどの程度のダウンスケーリングが行われるか。",
"ダウンスケーリングを大きくするとパフォーマンスは向上しますが、品質は低下します。"
]
},
"controlNetWeight": {
"heading": "重み",
@@ -1438,7 +1499,8 @@
"heading": "ダイナミックプロンプト",
"paragraphs": [
"ダイナミック プロンプトは,単一のプロンプトを複数のプロンプトに解析します.",
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます."
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます.",
"1 つのプロンプト内で構文を何度でも使用できますが, 生成されるプロンプトの数を Max Prompts 設定で制限するようにしてください."
]
},
"controlNetResizeMode": {
@@ -1458,6 +1520,159 @@
"paragraphs": [
"プロンプトまたは コントロールネットのいずれかを重視します."
]
},
"noiseUseCPU": {
"paragraphs": [
"CPU または GPU でノイズを生成するかどうかを制御します.",
"CPU ノイズを有効にすると, 特定のシードによってどのマシンでも同じ画像が生成されます.",
"CPU ノイズを有効にしてもパフォーマンスに影響はありません."
],
"heading": "CPUイズを使用する"
},
"dynamicPromptsMaxPrompts": {
"heading": "最大プロンプト",
"paragraphs": [
"ダイナミック プロンプトによって生成できるプロンプトの数を制限します."
]
},
"dynamicPromptsSeedBehaviour": {
"paragraphs": [
"プロンプトを生成するときにシードがどのように使用されるかを制御します.",
"反復ごとに固有のシードを使用します. 単一のシードでプロンプトのバリエーションを試す場合に使用します.",
"たとえば, プロンプトが 5 つある場合, 各画像は同じシードを使用します.",
"「画像ごと」では, 画像ごとに固有のシード値が使用されます. これにより、より多くのバリエーションが得られます."
],
"heading": "シード行動"
},
"imageFit": {
"paragraphs": [
"初期画像の幅と高さを出力画像に合わせてサイズ変更します. 有効にすることをお勧めします."
],
"heading": "初期画像を出力サイズに合わせる"
},
"infillMethod": {
"heading": "充填方法",
"paragraphs": [
"アウトペインティングまたはインペインティングのプロセス中に埋め込む方法."
]
},
"paramGuidance": {
"paragraphs": [
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
"ガイダンス値が高すぎると過飽和状態になる可能性があり、ガイダンス値が高すぎるか低すぎると生成結果に歪みが生じる可能性があります。ガイダンスはFLUX DEVモデルにのみ適用されます。"
],
"heading": "ガイダンス"
},
"paramDenoisingStrength": {
"paragraphs": [
"生成されたイメージがラスター レイヤーとどの程度異なるかを制御します。",
"強度が低いほど、結合された表示ラスターレイヤーに近くなります。強度が高いほど、グローバルプロンプトに大きく依存します。",
"表示されるコンテンツを持つラスター レイヤーがない場合、この設定は無視されます。"
],
"heading": "ディノイジングストレングス"
},
"refinerStart": {
"heading": "リファイナースタート",
"paragraphs": [
"生成プロセスのどの時点でリファイナーが使用され始めるか。",
"0 はリファイナーが生成プロセス全体で使用されることを意味し、0.8 は、リファイナーが生成プロセスの最後の 20% で使用されることを意味します。"
]
},
"optimizedDenoising": {
"heading": "イメージtoイメージの最適化",
"paragraphs": [
"「イメージtoイメージを最適化」を有効にすると、Fluxモデルを用いた画像間変換およびインペインティング変換において、より段階的なイズ除去強度スケールが適用されます。この設定により、画像に適用される変化量を制御する能力が向上しますが、標準のイズ除去強度スケールを使用したい場合はオフにすることができます。この設定は現在調整中で、ベータ版です。"
]
},
"refinerPositiveAestheticScore": {
"heading": "ポジティブ美的スコア",
"paragraphs": [
"トレーニング データに基づいて、美的スコアの高い画像に類似するように生成を重み付けします。"
]
},
"paramCFGScale": {
"paragraphs": [
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
"CFG スケールの値が高すぎると、飽和しすぎて生成結果が歪む可能性があります。 "
],
"heading": "CFGスケール"
},
"paramVAEPrecision": {
"paragraphs": [
"VAE エンコードおよびデコード時に使用される精度。",
"Fp16/Half 精度は、画像のわずかな変化を犠牲にして、より効率的です。"
],
"heading": "VAE精度"
},
"refinerModel": {
"heading": "リファイナーモデル",
"paragraphs": [
"生成プロセスの精製部分で使用されるモデル。",
"世代モデルに似ています。"
]
},
"refinerCfgScale": {
"heading": "CFGスケール",
"paragraphs": [
"プロンプトが生成プロセスに与える影響を制御する。",
"生成CFG スケールに似ています。"
]
},
"seamlessTilingYAxis": {
"heading": "シームレスタイリングY軸",
"paragraphs": [
"画像を垂直軸に沿ってシームレスに並べます。"
]
},
"scaleBeforeProcessing": {
"heading": "プロセス前のスケール値",
"paragraphs": [
"「自動」は、画像生成プロセスの前に、選択した領域をモデルに最適なサイズに拡大縮小します。",
"「手動」では、画像生成プロセスの前に、選択した領域を拡大縮小する幅と高さを選択できます。"
]
},
"creativity": {
"heading": "クリエイティビティ",
"paragraphs": [
"クリエイティビティは、ディテールを追加する際のモデルに与えられる自由度を制御します。クリエイティビティが低いと元のイメージに近いままになり、クリエイティビティが高いとより多くの変化を加えることができます。プロンプトを使用する場合、クリエイティビティが高いとプロンプトの影響が増します。"
]
},
"paramHrf": {
"heading": "高解像度修正を有効にする",
"paragraphs": [
"モデルに最適な解像度よりも高い解像度で、高品質な画像を生成します。通常、生成された画像内の重複を防ぐために使用されます。"
]
},
"seamlessTilingXAxis": {
"heading": "シームレスタイリングX軸",
"paragraphs": [
"画像を水平軸に沿ってシームレスに並べます。"
]
},
"paramCFGRescaleMultiplier": {
"paragraphs": [
"ゼロ端末 SNR (ztsnr) を使用してトレーニングされたモデルに使用される、CFG ガイダンスのリスケールマルチプライヤー。",
"これらのモデルの場合、推奨値は 0.7 です。"
],
"heading": "CFG リスケールマルチプライヤー"
},
"structure": {
"heading": "ストラクチャ",
"paragraphs": [
"ストラクチャは、出力画像が元のレイアウトにどれだけ忠実に従うかを制御します。低いストラクチャでは大幅な変更が可能ですが、高いストラクチャでは元の構成とレイアウトが厳密に維持されます。"
]
},
"refinerNegativeAestheticScore": {
"paragraphs": [
"トレーニング データに基づいて、美観スコアが低い画像に類似するように生成に重み付けします。"
],
"heading": "ネガティブ美的スコア"
},
"fluxDevLicense": {
"heading": "非商用ライセンス",
"paragraphs": [
"FLUX.1 [dev]モデルは、FLUX [dev]非商用ライセンスに基づいてライセンスされています。Invokeでこのモデルタイプを商用目的で使用する場合は、当社のウェブサイトをご覧ください。"
]
}
},
"accordions": {
@@ -1630,7 +1845,106 @@
"workflows": "ワークフロー",
"ascending": "昇順",
"name": "名前",
"descending": "降順"
"descending": "降順",
"searchPlaceholder": "名前、説明、タグで検索",
"projectWorkflows": "プロジェクトワークフロー",
"searchWorkflows": "ワークフローを検索",
"updated": "アップデート",
"published": "公表",
"builder": {
"label": "ラベル",
"containerPlaceholder": "空のコンテナ",
"showDescription": "説明を表示",
"emptyRootPlaceholderEditMode": "開始するには、フォーム要素またはノード フィールドをここにドラッグします。",
"divider": "仕切り",
"deleteAllElements": "すべてのフォーム要素を削除",
"heading": "見出し",
"nodeField": "ノードフィールド",
"zoomToNode": "ノードにズーム",
"dropdown": "ドロップダウン",
"resetOptions": "オプションをリセット",
"both": "両方",
"builder": "フォームビルダー",
"text": "テキスト",
"row": "行",
"multiLine": "マルチライン",
"resetAllNodeFields": "すべてのノードフィールドをリセット",
"slider": "スライダー",
"layout": "レイアウト",
"addToForm": "フォームに追加",
"headingPlaceholder": "空の見出し",
"nodeFieldTooltip": "ノード フィールドを追加するには、ワークフロー エディターのフィールドにある小さなプラス記号ボタンをクリックするか、フィールド名をフォームにドラッグします。",
"workflowBuilderAlphaWarning": "ワークフロービルダーは現在アルファ版です。安定版リリースまでに互換性に影響する変更が発生する可能性があります。",
"component": "コンポーネント",
"textPlaceholder": "空のテキスト",
"emptyRootPlaceholderViewMode": "このワークフローのフォームの作成を開始するには、[編集] をクリックします。",
"addOption": "オプションを追加",
"singleLine": "単線",
"numberInput": "数値入力",
"column": "列",
"container": "コンテナ",
"containerRowLayout": "コンテナ(行レイアウト)",
"containerColumnLayout": "コンテナ(列レイアウト)",
"maximum": "最大",
"published": "公開済み",
"publishedWorkflowOutputs": "アウトプット",
"minimum": "最小",
"publish": "公開",
"unpublish": "非公開",
"publishedWorkflowInputs": "インプット"
},
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
"unnamedWorkflow": "名前のないワークフロー",
"download": "ダウンロード",
"savingWorkflow": "ワークフローを保存しています...",
"problemSavingWorkflow": "ワークフローの保存に関する問題",
"convertGraph": "グラフを変換",
"downloadWorkflow": "ファイルに保存",
"saveWorkflow": "ワークフローを保存",
"userWorkflows": "ユーザーワークフロー",
"yourWorkflows": "あなたのワークフロー",
"edit": "編集",
"workflowLibrary": "ワークフローライブラリ",
"workflowSaved": "ワークフローが保存されました",
"clearWorkflowSearchFilter": "ワークフロー検索フィルタをクリア",
"workflowCleared": "ワークフローが作成されました",
"autoLayout": "オートレイアウト",
"view": "ビュー",
"saveChanges": "変更を保存",
"noDescription": "説明なし",
"recommended": "あなたへのおすすめ",
"noRecentWorkflows": "最近のワークフローがありません",
"problemLoading": "ワークフローのローディングに関する問題",
"newWorkflowCreated": "新しいワークフローが作成されました",
"noWorkflows": "ワークフローがありません",
"copyShareLink": "共有リンクをコピー",
"copyShareLinkForWorkflow": "ワークフローの共有リンクをコピー",
"workflowThumbnail": "ワークフローサムネイル",
"loadWorkflow": "$t(common.load) ワークフロー",
"shared": "共有",
"openWorkflow": "ワークフローを開く",
"emptyStringPlaceholder": "<空の文字列>",
"browseWorkflows": "ワークフローを閲覧する",
"saveWorkflowAs": "ワークフローとして保存",
"private": "プライベート",
"deselectAll": "すべて選択解除",
"delete": "削除",
"openLibrary": "ライブラリを開く",
"loadMore": "もっと読み込む",
"saveWorkflowToProject": "ワークフローをプロジェクトに保存",
"created": "作成されました",
"workflowEditorMenu": "ワークフローエディターメニュー",
"defaultWorkflows": "デフォルトワークフロー",
"allLoaded": "すべてのワークフローが読み込まれました",
"filterByTags": "タグでフィルター",
"recentlyOpened": "最近開いた",
"opened": "オープン",
"deleteWorkflow": "ワークフローを削除",
"deleteWorkflow2": "このワークフローを削除してもよろしいですか? 元に戻すことはできません。",
"loadFromGraph": "グラフからワークフローをロード",
"workflowName": "ワークフロー名",
"loading": "ワークフローをロードしています",
"uploadWorkflow": "ファイルからロードする"
},
"system": {
"logNamespaces": {

View File

@@ -30,7 +30,7 @@
"boards": "Bảng",
"selectedForAutoAdd": "Đã Chọn Để Tự động thêm",
"myBoard": "Bảng Của Tôi",
"deletedPrivateBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
"deletedPrivateBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
"changeBoard": "Thay Đổi Bảng",
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
"updateBoardError": "Lỗi khi cập nhật Bảng",
@@ -41,18 +41,21 @@
"deleteBoard": "Xoá Bảng",
"deleteBoardAndImages": "Xoá Bảng Lẫn Hình ảnh",
"deleteBoardOnly": "Chỉ Xoá Bảng",
"deletedBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
"bottomMessage": "Xoá bảng này lẫn ảnh của nó sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
"deletedBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
"bottomMessage": "Việc xóa ảnh sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
"menuItemAutoAdd": "Tự động thêm cho Bảng này",
"move": "Di Chuyển",
"topMessage": "Bảng này chứa ảnh được dùng với những tính năng sau:",
"topMessage": "Lựa chọn này chứa ảnh được dùng với những tính năng sau:",
"uncategorized": "Chưa Sắp Xếp",
"archived": "Được Lưu Trữ",
"loading": "Đang Tải...",
"selectBoard": "Chọn Bảng",
"archiveBoard": "Lưu trữ Bảng",
"unarchiveBoard": "Ngừng Lưu Trữ Bảng",
"assetsWithCount_other": "{{count}} tài nguyên"
"assetsWithCount_other": "{{count}} tài nguyên",
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
},
"gallery": {
"swapImages": "Đổi Hình Ảnh",
@@ -789,7 +792,9 @@
"modelPickerFallbackNoModelsInstalled2": "Nhấp vào <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải.",
"modelPickerFallbackNoModelsInstalled": "Không Có Sẵn Model.",
"manageModels": "Quản Lý Model",
"hfTokenReset": "Làm Mới HF Token"
"hfTokenReset": "Làm Mới HF Token",
"relatedModels": "Model Liên Quan",
"showOnlyRelatedModels": "Liên Quan"
},
"metadata": {
"guidance": "Hướng Dẫn",
@@ -1715,12 +1720,16 @@
"fitBboxToLayers": "Xếp Vừa Hộp Giới Hạn Vào Layer",
"ipAdapterMethod": {
"full": "Phong Cách Và Thành Phần",
"style": "Chỉ Lấy Phong Cách",
"style": "Phong Cách (Đơn Giản)",
"composition": "Chỉ Lấy Thành Phần",
"ipAdapterMethod": "Cách Thức",
"compositionDesc": "Áp dụng cách trình bày và bỏ qua phong cách mẫu.",
"fullDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) & thành phần (cách trình bày).",
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày."
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày. Tên trước đây là Chỉ Lấy Phong Cách.",
"styleStrong": "Phong Cách (Mạnh Mẽ)",
"styleStrongDesc": "Áp dụng cách trình bày mạnh mẽ, với một chút giảm nhẹ ảnh hưởng lên thành phần.",
"stylePrecise": "Phong Cách (Chính Xác)",
"stylePreciseDesc": "Áp dụng cách trình bày chính xác, loại bỏ các chủ thể ảnh hưởng."
},
"deletePrompt": "Xoá Lệnh",
"rasterLayer": "Layer Dạng Raster",
@@ -2053,7 +2062,7 @@
"colorPicker": "Chọn Màu"
},
"mergingLayers": "Đang gộp layer",
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
"useImage": "Dùng Hình Ảnh",
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
@@ -2102,7 +2111,11 @@
"imageInfluence": "Ảnh Chi Phối",
"medium": "Vừa",
"highest": "Cao Nhất"
}
},
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
"imageNoise": "Độ Nhiễu Hình Ảnh",
"denoiseLimit": "Giới Hạn Khử Nhiễu",
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
},
"stylePresets": {
"negativePrompt": "Lệnh Tiêu Cực",
@@ -2243,8 +2256,8 @@
"problemUnpublishingWorkflowDescription": "Có vấn đề khi ngừng đăng tải workflow. Vui lòng thử lại sau.",
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
"imagen3IncompatibleGenerationMode": "Google Imagen3 chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint."
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
},
"ui": {
"tabs": {
@@ -2426,8 +2439,8 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"CogView4: Hỗ trợ model CogView4 ở Canvas và Workflow.",
"Cập nhật Dependency: Invoke bây giờ sẽ chạy trên phiên bản mới nhất của các dependency của nó, bao gồm Python 3.12 và Pytorch 2.6.0"
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
]
},
"upsell": {

View File

@@ -10,7 +10,9 @@ import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatch
import { buildChatGPT4oGraph } from 'features/nodes/util/graph/generation/buildChatGPT4oGraph';
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
import { buildFluxKontextGraph } from 'features/nodes/util/graph/generation/buildFluxKontextGraph';
import { buildImagen3Graph } from 'features/nodes/util/graph/generation/buildImagen3Graph';
import { buildImagen4Graph } from 'features/nodes/util/graph/generation/buildImagen4Graph';
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
@@ -54,8 +56,12 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
return await buildCogView4Graph(state, manager);
case 'imagen3':
return await buildImagen3Graph(state, manager);
case 'imagen4':
return await buildImagen4Graph(state, manager);
case 'chatgpt-4o':
return await buildChatGPT4oGraph(state, manager);
case 'flux-kontext':
return await buildFluxKontextGraph(state, manager);
default:
assert(false, `No graph builders for base ${base}`);
}

View File

@@ -29,7 +29,7 @@ export type AppFeature =
| 'hfToken'
| 'retryQueueItem'
| 'cancelAndClearAll'
| 'chatGPT4oModels';
| 'chatGPT4oHigh';
/**
* A disable-able Stable Diffusion feature
*/

View File

@@ -83,7 +83,7 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
}
} else {
let imageDTOs: ImageDTO[] = [];
if (isClientSideUploadEnabled) {
if (isClientSideUploadEnabled && files.length > 1) {
imageDTOs = await Promise.all(files.map((file, i) => clientSideUpload(file, i)));
} else {
imageDTOs = await uploadImages(

View File

@@ -0,0 +1,108 @@
import { useAppStore } from 'app/store/nanostores/store';
import type { Dimensions } from 'features/controlLayers/store/types';
import { selectUiSlice, textAreaSizesStateChanged } from 'features/ui/store/uiSlice';
import { debounce } from 'lodash-es';
import { type RefObject, useCallback, useEffect, useMemo } from 'react';
type Options = {
trackWidth: boolean;
trackHeight: boolean;
initialWidth?: number;
initialHeight?: number;
};
/**
* Persists the width and/or height of a text area to redux.
* @param id The unique id of this textarea, used as key to storage
* @param ref A ref to the textarea element
* @param options.trackWidth Whether to track width
* @param options.trackHeight Whether to track width
* @param options.initialWidth An optional initial width in pixels
* @param options.initialHeight An optional initial height in pixels
*/
export const usePersistedTextAreaSize = (id: string, ref: RefObject<HTMLTextAreaElement>, options: Options) => {
const { dispatch, getState } = useAppStore();
const onResize = useCallback(
(size: Partial<Dimensions>) => {
dispatch(textAreaSizesStateChanged({ id, size }));
},
[dispatch, id]
);
const debouncedOnResize = useMemo(() => debounce(onResize, 300), [onResize]);
useEffect(() => {
const el = ref.current;
if (!el) {
return;
}
// Nothing to do here if we are not tracking anything.
if (!options.trackHeight && !options.trackWidth) {
return;
}
// Before registering the observer, grab the stored size from state - we may need to restore the size.
const storedSize = selectUiSlice(getState()).textAreaSizes[id];
// Prefer to restore the stored size, falling back to initial size if it exists
if (storedSize?.width !== undefined) {
el.style.width = `${storedSize.width}px`;
} else if (options.initialWidth !== undefined) {
el.style.width = `${options.initialWidth}px`;
}
if (storedSize?.height !== undefined) {
el.style.height = `${storedSize.height}px`;
} else if (options.initialHeight !== undefined) {
el.style.height = `${options.initialHeight}px`;
}
let currentHeight = el.offsetHeight;
let currentWidth = el.offsetWidth;
const resizeObserver = new ResizeObserver(() => {
// We only want to push the changes if a tracked dimension changes
let didChange = false;
const newSize: Partial<Dimensions> = {};
if (options.trackHeight) {
if (el.offsetHeight !== currentHeight) {
didChange = true;
currentHeight = el.offsetHeight;
}
newSize.height = currentHeight;
}
if (options.trackWidth) {
if (el.offsetWidth !== currentWidth) {
didChange = true;
currentWidth = el.offsetWidth;
}
newSize.width = currentWidth;
}
if (didChange) {
debouncedOnResize(newSize);
}
});
resizeObserver.observe(el);
return () => {
debouncedOnResize.cancel();
resizeObserver.disconnect();
};
}, [
debouncedOnResize,
dispatch,
getState,
id,
options.initialHeight,
options.initialWidth,
options.trackHeight,
options.trackWidth,
ref,
]);
};

View File

@@ -0,0 +1,92 @@
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import type { GroupBase } from 'chakra-react-select';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { useTranslation } from 'react-i18next';
import type { AnyModelConfig } from 'services/api/types';
import { useGroupedModelCombobox } from './useGroupedModelCombobox';
import { useRelatedModelKeys } from './useRelatedModelKeys';
import { useSelectedModelKeys } from './useSelectedModelKeys';
type UseRelatedGroupedModelComboboxArg<T extends AnyModelConfig> = {
modelConfigs: T[];
selectedModel?: ModelIdentifierField | null;
onChange: (value: T | null) => void;
getIsDisabled?: (model: T) => boolean;
isLoading?: boolean;
groupByType?: boolean;
};
// Custom hook to overlay the grouped model combobox with related models on top!
// Cleaner than hooking into useGroupedModelCombobox with a flag to enable/disable the related models
// Also allows for related models to be shown conditionally with some pretty simple logic if it ends up as a config flag.
type UseRelatedGroupedModelComboboxReturn = {
value: ComboboxOption | undefined | null;
options: GroupBase<ComboboxOption>[];
onChange: ComboboxOnChange;
placeholder: string;
noOptionsMessage: () => string;
};
export function useRelatedGroupedModelCombobox<T extends AnyModelConfig>({
modelConfigs,
selectedModel,
onChange,
isLoading = false,
getIsDisabled,
groupByType,
}: UseRelatedGroupedModelComboboxArg<T>): UseRelatedGroupedModelComboboxReturn {
const { t } = useTranslation();
const selectedKeys = useSelectedModelKeys();
const relatedKeys = useRelatedModelKeys(selectedKeys);
// Base grouped options
const base = useGroupedModelCombobox({
modelConfigs,
selectedModel,
onChange,
getIsDisabled,
isLoading,
groupByType,
});
// If no related models selected, just return base
if (relatedKeys.size === 0) {
return base;
}
const relatedOptions: ComboboxOption[] = [];
const updatedGroups: GroupBase<ComboboxOption>[] = [];
for (const group of base.options) {
const remainingOptions: ComboboxOption[] = [];
for (const option of group.options) {
if (relatedKeys.has(option.value)) {
relatedOptions.push({ ...option, label: `* ${option.label}` });
} else {
remainingOptions.push(option);
}
}
if (remainingOptions.length > 0) {
updatedGroups.push({
label: group.label,
options: remainingOptions,
});
}
}
const finalOptions: GroupBase<ComboboxOption>[] =
relatedOptions.length > 0
? [{ label: t('modelManager.relatedModels'), options: relatedOptions }, ...updatedGroups]
: updatedGroups;
return {
...base,
options: finalOptions,
};
}

View File

@@ -0,0 +1,14 @@
import { useMemo } from 'react';
import { useGetRelatedModelIdsBatchQuery } from 'services/api/endpoints/modelRelationships';
/**
* Fetches related model keys for a given set of selected model keys.
* Returns a Set<string> for fast lookup.
*/
export const useRelatedModelKeys = (selectedKeys: Set<string>) => {
const { data: related = [] } = useGetRelatedModelIdsBatchQuery([...selectedKeys], {
skip: selectedKeys.size === 0,
});
return useMemo(() => new Set(related), [related]);
};

View File

@@ -0,0 +1,34 @@
import { useAppSelector } from 'app/store/storeHooks';
/**
* Gathers all currently selected model keys from parameters and loras.
* This includes the main model, VAE, refiner model, controlnet, and loras.
*/
export const useSelectedModelKeys = () => {
return useAppSelector((state) => {
const keys = new Set<string>();
const main = state.params.model;
const vae = state.params.vae;
const refiner = state.params.refinerModel;
const controlnet = state.params.controlLora;
const loras = state.loras.loras.map((l) => l.model);
if (main) {
keys.add(main.key);
}
if (vae) {
keys.add(vae.key);
}
if (refiner) {
keys.add(refiner.key);
}
if (controlnet) {
keys.add(controlnet.key);
}
for (const lora of loras) {
keys.add(lora.key);
}
return keys;
});
};

View File

@@ -0,0 +1,28 @@
import { Spinner } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { useAllEntityAdapters } from 'features/controlLayers/contexts/EntityAdapterContext';
import { computed } from 'nanostores';
import { memo, useMemo } from 'react';
export const CanvasBusySpinner = memo(() => {
const canvasManager = useCanvasManager();
const allEntityAdapters = useAllEntityAdapters();
const $isPendingRectCalculation = useMemo(
() =>
computed(
allEntityAdapters.map(({ transformer }) => transformer.$isPendingRectCalculation),
(...values) => values.some((v) => v)
),
[allEntityAdapters]
);
const isPendingRectCalculation = useStore($isPendingRectCalculation);
const isRasterizing = useStore(canvasManager.stateApi.$isRasterizing);
const isCompositing = useStore(canvasManager.compositor.$isBusy);
if (isRasterizing || isCompositing || isPendingRectCalculation) {
return <Spinner opacity={0.3} />;
}
return null;
});
CanvasBusySpinner.displayName = 'CanvasBusySpinner';

View File

@@ -12,6 +12,7 @@ import { FocusRegionWrapper } from 'common/components/FocusRegionWrapper';
import { CanvasAlertsPreserveMask } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsPreserveMask';
import { CanvasAlertsSelectedEntityStatus } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSelectedEntityStatus';
import { CanvasAlertsSendingToGallery } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSendingTo';
import { CanvasBusySpinner } from 'features/controlLayers/components/CanvasBusySpinner';
import { CanvasContextMenuGlobalMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuGlobalMenuItems';
import { CanvasContextMenuSelectedEntityMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuSelectedEntityMenuItems';
import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea';
@@ -106,6 +107,9 @@ export const CanvasMainPanelContent = memo(() => {
<MenuContent />
</Menu>
</Flex>
<Flex position="absolute" bottom={4} insetInlineEnd={4}>
<CanvasBusySpinner />
</Flex>
</CanvasManagerProviderGate>
</Flex>
)}

View File

@@ -2,10 +2,11 @@ import { Button, Flex, Text } from '@invoke-ai/ui-library';
import { useAppStore } from 'app/store/nanostores/store';
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { usePullBboxIntoLayer } from 'features/controlLayers/hooks/saveCanvasHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { replaceCanvasEntityObjectsWithImage } from 'features/imageActions/actions';
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
import { memo, useCallback } from 'react';
import { memo, useCallback, useMemo } from 'react';
import { Trans } from 'react-i18next';
import type { ImageDTO } from 'services/api/types';
@@ -23,27 +24,27 @@ export const ControlLayerSettingsEmptyState = memo(() => {
const onClickGalleryButton = useCallback(() => {
dispatch(activeTabCanvasRightPanelChanged('gallery'));
}, [dispatch]);
const pullBboxIntoLayer = usePullBboxIntoLayer(entityIdentifier);
const components = useMemo(
() => ({
UploadButton: (
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
),
GalleryButton: (
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
),
PullBboxButton: (
<Button onClick={pullBboxIntoLayer} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
),
}),
[isBusy, onClickGalleryButton, pullBboxIntoLayer, uploadApi]
);
return (
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
<Text textAlign="center" color="base.300">
<Trans
i18nKey="controlLayers.controlLayerEmptyState"
components={{
UploadButton: (
<Button
isDisabled={isBusy}
size="sm"
variant="link"
color="base.300"
{...uploadApi.getUploadButtonProps()}
/>
),
GalleryButton: (
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
),
}}
/>
<Trans i18nKey="controlLayers.controlLayerEmptyState" components={components} />
</Text>
<input {...uploadApi.getUploadInputProps()} />
</Flex>

View File

@@ -30,6 +30,16 @@ export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
value: 'style',
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.styleDesc') : undefined,
},
{
label: t('controlLayers.ipAdapterMethod.styleStrong'),
value: 'style_strong',
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.styleStrongDesc') : undefined,
},
{
label: t('controlLayers.ipAdapterMethod.stylePrecise'),
value: 'style_precise',
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.stylePreciseDesc') : undefined,
},
{
label: t('controlLayers.ipAdapterMethod.composition'),
value: 'composition',

View File

@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
import { InpaintMaskSettings } from 'features/controlLayers/components/InpaintMask/InpaintMaskSettings';
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
import { InpaintMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
@@ -28,6 +29,7 @@ export const InpaintMask = memo(({ id }: Props) => {
<Spacer />
<CanvasEntityHeaderCommonActions />
</CanvasEntityHeader>
<InpaintMaskSettings />
</CanvasEntityContainer>
</CanvasEntityStateGate>
</InpaintMaskAdapterGate>

View File

@@ -0,0 +1,27 @@
// import { Button, Flex } from '@invoke-ai/ui-library';
// import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
// import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
// import { useTranslation } from 'react-i18next';
// import { PiPlusBold } from 'react-icons/pi';
// Removed buttons because denosie limit is not helpful for many architectures
// Users can access with right click menu instead.
// If buttons for noise or new features are deemed important in the future, add them back here.
export const InpaintMaskAddButtons = () => {
// Buttons are temporarily hidden. To restore, uncomment the code below.
return null;
// const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
// const { t } = useTranslation();
// const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
// const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
// return (
// <Flex w="full" p={2} justifyContent="center">
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskDenoiseLimit}>
// {t('controlLayers.denoiseLimit')}
// </Button>
// <Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addInpaintMaskNoise}>
// {t('controlLayers.imageNoise')}
// </Button>
// </Flex>
// );
};

View File

@@ -0,0 +1,29 @@
import type { IconButtonProps } from '@invoke-ai/ui-library';
import { IconButton } from '@invoke-ai/ui-library';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiXBold } from 'react-icons/pi';
type Props = Omit<IconButtonProps, 'aria-label'> & {
onDelete: () => void;
};
export const InpaintMaskDeleteModifierButton = memo(({ onDelete, ...rest }: Props) => {
const { t } = useTranslation();
return (
<IconButton
tooltip={t('common.delete')}
variant="link"
aria-label={t('common.delete')}
icon={<PiXBold />}
onClick={onDelete}
flexGrow={0}
size="sm"
p={0}
colorScheme="error"
{...rest}
/>
);
});
InpaintMaskDeleteModifierButton.displayName = 'InpaintMaskDeleteNoiseButton';

View File

@@ -0,0 +1,70 @@
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import {
inpaintMaskDenoiseLimitChanged,
inpaintMaskDenoiseLimitDeleted,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
export const InpaintMaskDenoiseLimitSlider = memo(() => {
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selectDenoiseLimit = useMemo(
() =>
createSelector(
selectCanvasSlice,
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskDenoiseLimitSlider').denoiseLimit
),
[entityIdentifier]
);
const denoiseLimit = useAppSelector(selectDenoiseLimit);
const handleDenoiseLimitChange = useCallback(
(value: number) => {
dispatch(inpaintMaskDenoiseLimitChanged({ entityIdentifier, denoiseLimit: value }));
},
[dispatch, entityIdentifier]
);
const onDeleteDenoiseLimit = useCallback(() => {
dispatch(inpaintMaskDenoiseLimitDeleted({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
if (denoiseLimit === undefined) {
return null;
}
return (
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
<Flex justifyContent="space-between" w="full" alignItems="center">
<Text fontSize="sm">{t('controlLayers.denoiseLimit')}</Text>
<Flex alignItems="center" gap={1}>
<Text fontSize="sm">{denoiseLimit.toFixed(2)}</Text>
<InpaintMaskDeleteModifierButton onDelete={onDeleteDenoiseLimit} />
</Flex>
</Flex>
<Slider
aria-label={t('controlLayers.denoiseLimit')}
value={denoiseLimit}
min={0}
max={1}
step={0.01}
onChange={handleDenoiseLimitChange}
>
<SliderTrack>
<SliderFilledTrack />
</SliderTrack>
<SliderThumb />
</Slider>
</Flex>
);
});
InpaintMaskDenoiseLimitSlider.displayName = 'InpaintMaskDenoiseLimitSlider';

View File

@@ -7,6 +7,7 @@ import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/component
import { CanvasEntityMenuItemsMergeDown } from 'features/controlLayers/components/common/CanvasEntityMenuItemsMergeDown';
import { CanvasEntityMenuItemsSave } from 'features/controlLayers/components/common/CanvasEntityMenuItemsSave';
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
import { InpaintMaskMenuItemsAddModifiers } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsAddModifiers';
import { InpaintMaskMenuItemsConvertToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsConvertToSubMenu';
import { InpaintMaskMenuItemsCopyToSubMenu } from 'features/controlLayers/components/InpaintMask/InpaintMaskMenuItemsCopyToSubMenu';
import { memo } from 'react';
@@ -20,6 +21,8 @@ export const InpaintMaskMenuItems = memo(() => {
<CanvasEntityMenuItemsDelete asIcon />
</IconMenuItemGroup>
<MenuDivider />
<InpaintMaskMenuItemsAddModifiers />
<MenuDivider />
<CanvasEntityMenuItemsTransform />
<MenuDivider />
<CanvasEntityMenuItemsMergeDown />

View File

@@ -0,0 +1,27 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { useAddInpaintMaskDenoiseLimit, useAddInpaintMaskNoise } from 'features/controlLayers/hooks/addLayerHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
export const InpaintMaskMenuItemsAddModifiers = memo(() => {
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
const { t } = useTranslation();
const isBusy = useCanvasIsBusy();
const addInpaintMaskNoise = useAddInpaintMaskNoise(entityIdentifier);
const addInpaintMaskDenoiseLimit = useAddInpaintMaskDenoiseLimit(entityIdentifier);
return (
<>
<MenuItem onClick={addInpaintMaskNoise} isDisabled={isBusy}>
{t('controlLayers.addImageNoise')}
</MenuItem>
<MenuItem onClick={addInpaintMaskDenoiseLimit} isDisabled={isBusy}>
{t('controlLayers.addDenoiseLimit')}
</MenuItem>
</>
);
});
InpaintMaskMenuItemsAddModifiers.displayName = 'InpaintMaskMenuItemsAddNoise';

View File

@@ -0,0 +1,67 @@
import { Flex, Slider, SliderFilledTrack, SliderThumb, SliderTrack, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InpaintMaskDeleteModifierButton } from 'features/controlLayers/components/InpaintMask/InpaintMaskDeleteModifierButton';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { inpaintMaskNoiseChanged, inpaintMaskNoiseDeleted } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
export const InpaintMaskNoiseSlider = memo(() => {
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selectNoiseLevel = useMemo(
() =>
createSelector(
selectCanvasSlice,
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskNoiseSlider').noiseLevel
),
[entityIdentifier]
);
const noiseLevel = useAppSelector(selectNoiseLevel);
const handleNoiseChange = useCallback(
(value: number) => {
dispatch(inpaintMaskNoiseChanged({ entityIdentifier, noiseLevel: value }));
},
[dispatch, entityIdentifier]
);
const onDeleteNoise = useCallback(() => {
dispatch(inpaintMaskNoiseDeleted({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
if (noiseLevel === undefined) {
return null;
}
return (
<Flex direction="column" gap={1} w="full" px={2} pb={2}>
<Flex justifyContent="space-between" w="full" alignItems="center">
<Text fontSize="sm">{t('controlLayers.imageNoise')}</Text>
<Flex alignItems="center" gap={1}>
<Text fontSize="sm">{Math.round(noiseLevel * 100)}%</Text>
<InpaintMaskDeleteModifierButton onDelete={onDeleteNoise} />
</Flex>
</Flex>
<Slider
aria-label={t('controlLayers.imageNoise')}
value={noiseLevel}
min={0}
max={1}
step={0.01}
onChange={handleNoiseChange}
>
<SliderTrack>
<SliderFilledTrack />
</SliderTrack>
<SliderThumb />
</Slider>
</Flex>
);
});
InpaintMaskNoiseSlider.displayName = 'InpaintMaskNoiseSlider';

View File

@@ -0,0 +1,47 @@
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
import { InpaintMaskDenoiseLimitSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskDenoiseLimitSlider';
import { InpaintMaskNoiseSlider } from 'features/controlLayers/components/InpaintMask/InpaintMaskNoiseSlider';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
import { memo, useMemo } from 'react';
const buildSelectHasDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
createSelector(selectCanvasSlice, (canvas) => {
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
return entity.denoiseLimit !== undefined;
});
const buildSelectHasNoiseLevel = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) =>
createSelector(selectCanvasSlice, (canvas) => {
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'InpaintMaskSettings');
return entity.noiseLevel !== undefined;
});
export const InpaintMaskSettings = memo(() => {
const entityIdentifier = useEntityIdentifierContext('inpaint_mask');
const selectHasDenoiseLimit = useMemo(() => buildSelectHasDenoiseLimit(entityIdentifier), [entityIdentifier]);
const selectHasNoiseLevel = useMemo(() => buildSelectHasNoiseLevel(entityIdentifier), [entityIdentifier]);
const hasDenoiseLimit = useAppSelector(selectHasDenoiseLimit);
const hasNoiseLevel = useAppSelector(selectHasNoiseLevel);
if (!hasNoiseLevel && !hasDenoiseLimit) {
// If we show the <InpaintMaskAddButtons /> below, we can remove this check.
// Until then, if there are no sliders to show for the mask settings, return null. This prevents rendering an
// empty settings wrapper div, which adds unnecessary space in the UI.
return null;
}
return (
<CanvasEntitySettingsWrapper>
{/* {!hasNoiseLevel && !hasDenoiseLimit && <InpaintMaskAddButtons />} */}
{hasNoiseLevel && <InpaintMaskNoiseSlider />}
{hasDenoiseLimit && <InpaintMaskDenoiseLimitSlider />}
</CanvasEntitySettingsWrapper>
);
});
InpaintMaskSettings.displayName = 'InpaintMaskSettings';

View File

@@ -1,3 +1,4 @@
import type { SystemStyleObject } from '@invoke-ai/ui-library';
import {
$shift,
CompositeSlider,
@@ -16,7 +17,6 @@ import { useStore } from '@nanostores/react';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { snapToNearest } from 'features/controlLayers/konva/util';
import { round } from 'lodash-es';
import { computed } from 'nanostores';
import type { KeyboardEvent } from 'react';
import { memo, useCallback, useEffect, useState } from 'react';
import { PiCaretDownBold, PiMagnifyingGlassMinusBold, PiMagnifyingGlassPlusBold } from 'react-icons/pi';
@@ -68,9 +68,16 @@ const sliderDefaultValue = mapRawValueToSliderValue(100);
const snapCandidates = marks.slice(1, marks.length - 1);
const inputFieldSx = {
paddingInlineEnd: 7,
_focusVisible: {
zIndex: 0,
},
} satisfies SystemStyleObject;
export const CanvasToolbarScale = memo(() => {
const canvasManager = useCanvasManager();
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
const scale = useStore(canvasManager.stage.$scale);
const [localScale, setLocalScale] = useState(scale * 100);
const onChangeSlider = useCallback(
@@ -115,7 +122,7 @@ export const CanvasToolbarScale = memo(() => {
return (
<Flex alignItems="center">
<ZoomOutButton />
<Popover>
<Popover isLazy lazyBehavior="unmount">
<PopoverAnchor>
<NumberInput
variant="outline"
@@ -132,7 +139,7 @@ export const CanvasToolbarScale = memo(() => {
onKeyDown={onKeyDown}
clampValueOnBlur={false}
>
<NumberInputField paddingInlineEnd={7} title="" _focusVisible={{ zIndex: 0 }} />
<NumberInputField title="" sx={inputFieldSx} />
<PopoverTrigger>
<IconButton
aria-label="open-slider"
@@ -171,16 +178,17 @@ CanvasToolbarScale.displayName = 'CanvasToolbarScale';
const SCALE_SNAPS = [0.1, 0.15, 0.2, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 5, 7.5, 10, 15, 20];
const ZoomOutButton = () => {
const ZoomOutButton = memo(() => {
const canvasManager = useCanvasManager();
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
const scale = useStore(canvasManager.stage.$scale);
const onClick = useCallback(() => {
const scale = canvasManager.stage.$scale.get();
const nextScale =
SCALE_SNAPS.slice()
.reverse()
.find((snap) => snap < scale) ?? canvasManager.stage.config.MIN_SCALE;
canvasManager.stage.setScale(Math.max(nextScale, canvasManager.stage.config.MIN_SCALE));
}, [canvasManager.stage, scale]);
}, [canvasManager.stage]);
return (
<IconButton
@@ -192,15 +200,17 @@ const ZoomOutButton = () => {
isDisabled={scale <= canvasManager.stage.config.MIN_SCALE}
/>
);
};
});
ZoomOutButton.displayName = 'ZoomOutButton';
const ZoomInButton = () => {
const ZoomInButton = memo(() => {
const canvasManager = useCanvasManager();
const scale = useStore(computed(canvasManager.stage.$stageAttrs, (attrs) => attrs.scale));
const scale = useStore(canvasManager.stage.$scale);
const onClick = useCallback(() => {
const scale = canvasManager.stage.$scale.get();
const nextScale = SCALE_SNAPS.find((snap) => snap > scale) ?? canvasManager.stage.config.MAX_SCALE;
canvasManager.stage.setScale(Math.min(nextScale, canvasManager.stage.config.MAX_SCALE));
}, [canvasManager.stage, scale]);
}, [canvasManager.stage]);
return (
<IconButton
@@ -212,4 +222,5 @@ const ZoomInButton = () => {
isDisabled={scale >= canvasManager.stage.config.MAX_SCALE}
/>
);
};
});
ZoomInButton.displayName = 'ZoomInButton';

View File

@@ -168,3 +168,33 @@ export const useEntityAdapter = (
assert(adapter, 'useEntityAdapter must be used within a EntityAdapterContext');
return adapter;
};
export const useAllEntityAdapters = () => {
const canvasManager = useCanvasManager();
const regionalGuidanceAdapters = useSyncExternalStore(
canvasManager.adapters.regionMasks.subscribe,
canvasManager.adapters.regionMasks.getSnapshot
);
const rasterLayerAdapters = useSyncExternalStore(
canvasManager.adapters.rasterLayers.subscribe,
canvasManager.adapters.rasterLayers.getSnapshot
);
const controlLayerAdapters = useSyncExternalStore(
canvasManager.adapters.controlLayers.subscribe,
canvasManager.adapters.controlLayers.getSnapshot
);
const inpaintMaskAdapters = useSyncExternalStore(
canvasManager.adapters.inpaintMasks.subscribe,
canvasManager.adapters.inpaintMasks.getSnapshot
);
const allEntityAdapters = useMemo(() => {
return [
...Array.from(rasterLayerAdapters.values()),
...Array.from(controlLayerAdapters.values()),
...Array.from(inpaintMaskAdapters.values()),
...Array.from(regionalGuidanceAdapters.values()),
];
}, [controlLayerAdapters, inpaintMaskAdapters, rasterLayerAdapters, regionalGuidanceAdapters]);
return allEntityAdapters;
};

View File

@@ -6,6 +6,8 @@ import { getPrefixedId } from 'features/controlLayers/konva/util';
import {
controlLayerAdded,
inpaintMaskAdded,
inpaintMaskDenoiseLimitAdded,
inpaintMaskNoiseAdded,
rasterLayerAdded,
referenceImageAdded,
rgAdded,
@@ -27,6 +29,7 @@ import type {
import {
initialChatGPT4oReferenceImage,
initialControlNet,
initialFluxKontextReferenceImage,
initialIPAdapter,
initialT2IAdapter,
} from 'features/controlLayers/store/util';
@@ -85,6 +88,12 @@ export const selectDefaultRefImageConfig = createSelector(
return referenceImage;
}
if (selectedMainModel?.base === 'flux-kontext') {
const referenceImage = deepClone(initialFluxKontextReferenceImage);
referenceImage.model = zModelIdentifierField.parse(selectedMainModel);
return referenceImage;
}
const { data } = query;
let model: IPAdapterModelConfig | null = null;
if (data) {
@@ -222,6 +231,24 @@ export const useAddRegionalGuidanceNegativePrompt = (entityIdentifier: CanvasEnt
return runc;
};
export const useAddInpaintMaskNoise = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
const dispatch = useAppDispatch();
const func = useCallback(() => {
dispatch(inpaintMaskNoiseAdded({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
return func;
};
export const useAddInpaintMaskDenoiseLimit = (entityIdentifier: CanvasEntityIdentifier<'inpaint_mask'>) => {
const dispatch = useAppDispatch();
const func = useCallback(() => {
dispatch(inpaintMaskDenoiseLimitAdded({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
return func;
};
export const buildSelectValidRegionalGuidanceActions = (
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>
) => {

View File

@@ -2,9 +2,12 @@ import { useAppSelector } from 'app/store/storeHooks';
import {
selectIsChatGTP4o,
selectIsCogView4,
selectIsFluxKontext,
selectIsImagen3,
selectIsImagen4,
selectIsSD3,
} from 'features/controlLayers/store/paramsSlice';
import { selectActiveReferenceImageEntities } from 'features/controlLayers/store/selectors';
import type { CanvasEntityType } from 'features/controlLayers/store/types';
import { useMemo } from 'react';
import type { Equals } from 'tsafe';
@@ -14,24 +17,30 @@ export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => {
const isSD3 = useAppSelector(selectIsSD3);
const isCogView4 = useAppSelector(selectIsCogView4);
const isImagen3 = useAppSelector(selectIsImagen3);
const isImagen4 = useAppSelector(selectIsImagen4);
const isChatGPT4o = useAppSelector(selectIsChatGTP4o);
const isFluxKontext = useAppSelector(selectIsFluxKontext);
const activeReferenceImageEntities = useAppSelector(selectActiveReferenceImageEntities);
const isEntityTypeEnabled = useMemo<boolean>(() => {
switch (entityType) {
case 'reference_image':
return !isSD3 && !isCogView4 && !isImagen3;
if (isFluxKontext) {
return activeReferenceImageEntities.length === 0;
}
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4;
case 'regional_guidance':
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
case 'control_layer':
return !isSD3 && !isCogView4 && !isImagen3 && !isChatGPT4o;
return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
case 'inpaint_mask':
return !isImagen3 && !isChatGPT4o;
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
case 'raster_layer':
return !isImagen3 && !isChatGPT4o;
return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o;
default:
assert<Equals<typeof entityType, never>>(false);
}
}, [entityType, isSD3, isCogView4, isImagen3, isChatGPT4o]);
}, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, activeReferenceImageEntities]);
return isEntityTypeEnabled;
};

View File

@@ -1,5 +1,6 @@
import { withResult, withResultAsync } from 'common/util/result';
import { CanvasCacheModule } from 'features/controlLayers/konva/CanvasCacheModule';
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
import type { CanvasEntityAdapter, CanvasEntityAdapterFromType } from 'features/controlLayers/konva/CanvasEntity/types';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
@@ -426,6 +427,145 @@ export class CanvasCompositorModule extends CanvasModuleBase {
return this.mergeByEntityIdentifiers(entityIdentifiers, false);
};
/**
* Creates and uploads a grayscale representation of the inpaint mask image noise or denoise limit values.
* This produces an image with a white background where the mask is represented by dark values.
*
* @param adapters The adapters for the canvas entities to composite
* @param rect The region to include in the rasterized image
* @param attribute The attribute to use for grayscale values (defaults to 'noiseLevel')
* @param uploadOptions Options for uploading the image
* @param forceUpload If true, the image is always re-uploaded, returning a new image DTO
* @returns A promise that resolves to the image DTO
*/
getGrayscaleMaskCompositeImageDTO = async (
adapters: CanvasEntityAdapterInpaintMask[],
rect: Rect,
attribute: 'noiseLevel' | 'denoiseLimit' = 'noiseLevel',
invertMask: boolean = false,
uploadOptions: SetOptional<Omit<UploadImageArg, 'file'>, 'image_category'> = { is_intermediate: true },
forceUpload?: boolean
): Promise<ImageDTO> => {
assert(rect.width > 0 && rect.height > 0, 'Unable to rasterize empty rect');
// Use a unique hash that includes the attribute name for caching
const hash = this.getCompositeHash(adapters, { rect, attribute, invertMask, grayscale: true });
const cachedImageName = forceUpload ? undefined : this.manager.cache.imageNameCache.get(hash);
let imageDTO: ImageDTO | null = null;
if (cachedImageName) {
imageDTO = await getImageDTOSafe(cachedImageName);
if (imageDTO) {
this.log.debug({ rect, imageName: cachedImageName, imageDTO }, 'Using cached grayscale composite image');
return imageDTO;
}
this.log.warn({ rect, imageName: cachedImageName }, 'Cached grayscale image name not found, recompositing');
}
// Create a white background canvas
const canvas = document.createElement('canvas');
canvas.width = rect.width;
canvas.height = rect.height;
const ctx = canvas.getContext('2d');
assert(ctx !== null, 'Canvas 2D context is null');
// Fill with white first (creates white background)
ctx.fillStyle = 'white';
ctx.fillRect(0, 0, rect.width, rect.height);
// Apply special compositing mode
ctx.globalCompositeOperation = 'darken';
// Draw each adapter's content
for (const adapter of adapters) {
this.log.debug({ entityIdentifier: adapter.entityIdentifier }, 'Drawing entity to grayscale composite canvas');
// Get the canvas from the adapter
const adapterCanvas = adapter.getCanvas(rect);
// Create a temporary canvas for grayscale conversion
const tempCanvas = document.createElement('canvas');
tempCanvas.width = adapterCanvas.width;
tempCanvas.height = adapterCanvas.height;
const tempCtx = tempCanvas.getContext('2d');
assert(tempCtx !== null, 'Temp canvas 2D context is null');
// Draw the original adapter canvas to the temp canvas
tempCtx.drawImage(adapterCanvas, 0, 0);
// Get the image data for processing
const imageData = tempCtx.getImageData(0, 0, tempCanvas.width, tempCanvas.height);
const data = imageData.data;
const attributeValue = typeof adapter.state[attribute] === 'number' ? (adapter.state[attribute] as number) : 1.0; // Default to full strength if attribute is undefined
// Process all pixels in the image data
for (let i = 0; i < data.length; i += 4) {
// Make sure we're accessing valid array indices
if (i + 3 < data.length) {
// input has transparency
// Calculate grayscale value: white (255) for no mask, darker for stronger mask
let grayValue = 255; // Default to white for unmasked areas
if (invertMask ? (data[i + 3] ?? 0) < 128 : (data[i + 3] ?? 0) > 127) {
grayValue = Math.max(0, Math.min(255, 255 - Math.round(255 * attributeValue)));
}
data[i] = grayValue; // R
data[i + 1] = grayValue; // G
data[i + 2] = grayValue; // B
data[i + 3] = 255; // A (output is fully opaque)
}
}
imageData.data.set(data); // Update the image data with the processed values
// Put the processed image data back to the temp canvas
tempCtx.putImageData(imageData, 0, 0);
// Draw the temp canvas to the main canvas
ctx.drawImage(tempCanvas, 0, 0);
}
// Convert to blob and upload
this.$isProcessing.set(true);
const blobResult = await withResultAsync(() => canvasToBlob(canvas));
this.$isProcessing.set(false);
if (blobResult.isErr()) {
this.log.error(
{ error: serializeError(blobResult.error) },
'Failed to convert grayscale composite canvas to blob'
);
throw blobResult.error;
}
const blob = blobResult.value;
if (this.manager._isDebugging) {
previewBlob(blob, 'Grayscale Composite');
}
this.$isUploading.set(true);
const uploadResult = await withResultAsync(() =>
uploadImage({
file: new File([blob], 'canvas-grayscale-composite.png', { type: 'image/png' }),
image_category: 'general',
...uploadOptions,
})
);
this.$isUploading.set(false);
if (uploadResult.isErr()) {
throw uploadResult.error;
}
imageDTO = uploadResult.value;
this.manager.cache.imageNameCache.set(hash, imageDTO.image_name);
return imageDTO;
};
/**
* Calculates the transparency of the composite of the give adapters.
* @param adapters The adapters to composite

View File

@@ -24,12 +24,13 @@ import {
selectCanvasSlice,
selectEntity,
} from 'features/controlLayers/store/selectors';
import {
type CanvasEntityIdentifier,
type CanvasRenderableEntityState,
isRasterLayerEntityIdentifier,
type Rect,
import type {
CanvasEntityIdentifier,
CanvasRenderableEntityState,
LifecycleCallback,
Rect,
} from 'features/controlLayers/store/types';
import { isRasterLayerEntityIdentifier } from 'features/controlLayers/store/types';
import { toast } from 'features/toast/toast';
import Konva from 'konva';
import { atom } from 'nanostores';
@@ -40,11 +41,6 @@ import stableHash from 'stable-hash';
import { assert } from 'tsafe';
import type { Jsonifiable, JsonObject } from 'type-fest';
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`. We'll need to do a
// type assertion below in the `onInit` method, which calls these callbacks.
type InitCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;
export abstract class CanvasEntityAdapterBase<
T extends CanvasRenderableEntityState,
U extends string,
@@ -118,7 +114,7 @@ export abstract class CanvasEntityAdapterBase<
/**
* Callbacks that are executed when the module is initialized.
*/
private static initCallbacks = new Set<InitCallback>();
private static initCallbacks = new Set<LifecycleCallback>();
/**
* Register a callback to be run when an entity adapter is initialized.
@@ -165,7 +161,7 @@ export abstract class CanvasEntityAdapterBase<
* return false;
* });
*/
static registerInitCallback = (callback: InitCallback) => {
static registerInitCallback = (callback: LifecycleCallback) => {
const wrapped = async (adapter: CanvasEntityAdapter) => {
const result = await callback(adapter);
if (result) {

View File

@@ -13,7 +13,7 @@ import {
roundRect,
} from 'features/controlLayers/konva/util';
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
import type { Coordinate, LifecycleCallback, Rect, RectWithRotation } from 'features/controlLayers/store/types';
import { toast } from 'features/toast/toast';
import Konva from 'konva';
import type { GroupConfig } from 'konva/lib/Group';
@@ -123,7 +123,7 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
/**
* Whether the transformer is currently calculating the rect of the parent.
*/
$isPendingRectCalculation = atom<boolean>(true);
$isPendingRectCalculation = atom<boolean>(false);
/**
* A set of subscriptions that should be cleaned up when the transformer is destroyed.
@@ -177,6 +177,11 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
*/
transformMutex = new Mutex();
/**
* Callbacks that are executed when the bbox is updated.
*/
private static bboxUpdatedCallbacks = new Set<LifecycleCallback>();
konva: {
transformer: Konva.Transformer;
proxyRect: Konva.Rect;
@@ -908,6 +913,8 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
this.parent.renderer.konva.objectGroup.setAttrs(groupAttrs);
this.parent.bufferRenderer.konva.group.setAttrs(groupAttrs);
}
CanvasEntityTransformer.runBboxUpdatedCallbacks(this.parent);
};
calculateRect = debounce(() => {
@@ -1026,6 +1033,23 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
this.konva.outlineRect.visible(false);
};
static registerBboxUpdatedCallback = (callback: LifecycleCallback) => {
const wrapped = async (adapter: CanvasEntityAdapter) => {
const result = await callback(adapter);
if (result) {
this.bboxUpdatedCallbacks.delete(wrapped);
}
return result;
};
this.bboxUpdatedCallbacks.add(wrapped);
};
private static runBboxUpdatedCallbacks = (adapter: CanvasEntityAdapter) => {
for (const callback of this.bboxUpdatedCallbacks) {
callback(adapter);
}
};
repr = () => {
return {
id: this.id,

View File

@@ -1,12 +1,12 @@
import type { Property } from 'csstype';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
import { getKonvaNodeDebugAttrs, getPrefixedId } from 'features/controlLayers/konva/util';
import { getKonvaNodeDebugAttrs, getPrefixedId, getRectUnion } from 'features/controlLayers/konva/util';
import type { Coordinate, Dimensions, Rect, StageAttrs } from 'features/controlLayers/store/types';
import Konva from 'konva';
import type { KonvaEventObject } from 'konva/lib/Node';
import { clamp } from 'lodash-es';
import { atom } from 'nanostores';
import { atom, computed } from 'nanostores';
import type { Logger } from 'roarr';
type CanvasStageModuleConfig = {
@@ -26,6 +26,14 @@ type CanvasStageModuleConfig = {
* The padding in pixels to use when fitting the layers to the stage.
*/
FIT_LAYERS_TO_STAGE_PADDING_PX: number;
/**
* The snap points for the scale of the canvas.
*/
SCALE_SNAP_POINTS: number[];
/**
* The tolerance for snapping the scale of the canvas, as a fraction of the scale.
*/
SCALE_SNAP_TOLERANCE: number;
};
const DEFAULT_CONFIG: CanvasStageModuleConfig = {
@@ -33,6 +41,8 @@ const DEFAULT_CONFIG: CanvasStageModuleConfig = {
MAX_SCALE: 20,
SCALE_FACTOR: 0.999,
FIT_LAYERS_TO_STAGE_PADDING_PX: 48,
SCALE_SNAP_POINTS: [0.25, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5],
SCALE_SNAP_TOLERANCE: 0.05,
};
export class CanvasStageModule extends CanvasModuleBase {
@@ -43,6 +53,11 @@ export class CanvasStageModule extends CanvasModuleBase {
readonly manager: CanvasManager;
readonly log: Logger;
// State for scale snapping logic
private _intendedScale: number = 1;
private _activeSnapPoint: number | null = null;
private _snapTimeout: number | null = null;
container: HTMLDivElement;
konva: { stage: Konva.Stage };
@@ -55,6 +70,7 @@ export class CanvasStageModule extends CanvasModuleBase {
height: 0,
scale: 0,
});
$scale = computed(this.$stageAttrs, (attrs) => attrs.scale);
subscriptions = new Set<() => void>();
resizeObserver: ResizeObserver | null = null;
@@ -76,6 +92,9 @@ export class CanvasStageModule extends CanvasModuleBase {
container,
}),
};
// Initialize intended scale to the default stage scale
this._intendedScale = this.konva.stage.scaleX();
}
setContainer = (container: HTMLDivElement) => {
@@ -167,6 +186,18 @@ export class CanvasStageModule extends CanvasModuleBase {
}
};
/**
* Fits the bbox and layers to the stage. The union of the bbox and the visible layers will be centered and scaled
* to fit the stage with some padding.
*/
fitBboxAndLayersToStage = (): void => {
const layersRect = this.manager.compositor.getVisibleRectOfType();
const bboxRect = this.manager.stateApi.getBbox().rect;
const unionRect = getRectUnion(layersRect, bboxRect);
this.log.trace({ bboxRect, layersRect, unionRect }, 'Fitting bbox and layers to stage');
this.fitRect(unionRect);
};
/**
* Fits a rectangle to the stage. The rectangle will be centered and scaled to fit the stage with some padding.
*
@@ -195,14 +226,27 @@ export class CanvasStageModule extends CanvasModuleBase {
-rect.y * scale + this.config.FIT_LAYERS_TO_STAGE_PADDING_PX + (availableHeight - rect.height * scale) / 2
);
this.konva.stage.setAttrs({
// When fitting the stage, we update the intended scale and reset any active snap.
this._intendedScale = scale;
this._activeSnapPoint = null;
const tween = new Konva.Tween({
node: this.konva.stage,
duration: 0.15,
x,
y,
scaleX: scale,
scaleY: scale,
easing: Konva.Easings.EaseInOut,
onUpdate: () => {
this.syncStageAttrs();
},
onFinish: () => {
this.syncStageAttrs();
tween.destroy();
},
});
this.syncStageAttrs({ x, y, scale });
tween.play();
};
/**
@@ -230,26 +274,41 @@ export class CanvasStageModule extends CanvasModuleBase {
* Constrains a scale to be within the valid range
*/
constrainScale = (scale: number): number => {
return clamp(Math.round(scale * 100) / 100, this.config.MIN_SCALE, this.config.MAX_SCALE);
return clamp(scale, this.config.MIN_SCALE, this.config.MAX_SCALE);
};
/**
* Sets the scale of the stage. If center is provided, the stage will zoom in/out on that point.
* @param scale The new scale to set
* @param center The center of the stage to zoom in/out on
* Programmatically sets the scale of the stage, overriding any active snapping.
* If a center point is provided, the stage will zoom on that point.
* @param scale The new scale to set.
* @param center The center point for the zoom.
*/
setScale = (scale: number, center: Coordinate = this.getCenter(true)): void => {
this.log.trace('Setting scale');
setScale = (scale: number, center?: Coordinate): void => {
this.log.trace({ scale }, 'Programmatically setting scale');
const newScale = this.constrainScale(scale);
const { x, y } = this.getPosition();
// When scale is set programmatically, update the intended scale and reset any active snap.
this._intendedScale = newScale;
this._activeSnapPoint = null;
this._applyScale(newScale, center);
};
/**
* Applies a scale to the stage, adjusting the position to keep the given center point stationary.
* This internal method does NOT modify snapping state.
*/
private _applyScale = (newScale: number, center?: Coordinate): void => {
const oldScale = this.getScale();
const deltaX = (center.x - x) / oldScale;
const deltaY = (center.y - y) / oldScale;
const _center = center ?? this.getCenter(true);
const { x, y } = this.getPosition();
const newX = Math.floor(center.x - deltaX * newScale);
const newY = Math.floor(center.y - deltaY * newScale);
const deltaX = (_center.x - x) / oldScale;
const deltaY = (_center.y - y) / oldScale;
const newX = _center.x - deltaX * newScale;
const newY = _center.y - deltaY * newScale;
this.konva.stage.setAttrs({
x: newX,
@@ -263,6 +322,7 @@ export class CanvasStageModule extends CanvasModuleBase {
onStageMouseWheel = (e: KonvaEventObject<WheelEvent>) => {
e.evt.preventDefault();
this._snapTimeout && window.clearTimeout(this._snapTimeout);
if (e.evt.ctrlKey || e.evt.metaKey) {
return;
@@ -271,12 +331,59 @@ export class CanvasStageModule extends CanvasModuleBase {
// We need the absolute cursor position - not the scaled position
const cursorPos = this.konva.stage.getPointerPosition();
if (cursorPos) {
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
const scale = this.manager.stage.getScale() * this.config.SCALE_FACTOR ** delta;
this.manager.stage.setScale(scale, cursorPos);
if (!cursorPos) {
return;
}
// When wheeling on trackpad, e.evt.ctrlKey is true - in that case, let's reverse the direction
const delta = e.evt.ctrlKey ? -e.evt.deltaY : e.evt.deltaY;
// Update the intended scale based on the last intended scale, creating a continuous zoom feel
const newIntendedScale = this._intendedScale * this.config.SCALE_FACTOR ** delta;
this._intendedScale = this.constrainScale(newIntendedScale);
// Pass control to the snapping logic
this._updateScaleWithSnapping(cursorPos);
this._snapTimeout = window.setTimeout(() => {
// After a short delay, we can reset the intended scale to the current scale
// This allows for continuous zooming without snapping back to the last snapped scale
this._intendedScale = this.getScale();
}, 100);
};
/**
* Implements "sticky" snap logic.
* - If not snapped, checks if the intended scale is close enough to a snap point to engage the snap.
* - If snapped, checks if the intended scale has moved far enough away to break the snap.
* - Applies the resulting scale to the stage.
*/
private _updateScaleWithSnapping = (center: Coordinate) => {
// If we are currently snapped, check if we should break out
if (this._activeSnapPoint !== null) {
const threshold = this._activeSnapPoint * this.config.SCALE_SNAP_TOLERANCE;
if (Math.abs(this._intendedScale - this._activeSnapPoint) > threshold) {
// User has scrolled far enough to break the snap
this._activeSnapPoint = null;
this._applyScale(this._intendedScale, center);
}
// Else, do nothing - we remain snapped at the current scale, creating a "dead zone"
return;
}
// If we are not snapped, check if we should snap to a point
for (const snapPoint of this.config.SCALE_SNAP_POINTS) {
const threshold = snapPoint * this.config.SCALE_SNAP_TOLERANCE;
if (Math.abs(this._intendedScale - snapPoint) < threshold) {
// Engage the snap
this._activeSnapPoint = snapPoint;
this._applyScale(snapPoint, center);
return;
}
}
// If we are not snapping and not breaking a snap, just update to the intended scale
this._applyScale(this._intendedScale, center);
};
onStagePointerDown = (e: KonvaEventObject<PointerEvent>) => {

View File

@@ -8,6 +8,7 @@ import { selectModel } from 'features/controlLayers/store/paramsSlice';
import { selectBbox } from 'features/controlLayers/store/selectors';
import type { Coordinate, Rect, Tool } from 'features/controlLayers/store/types';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import Konva from 'konva';
import { noop } from 'lodash-es';
import { atom } from 'nanostores';
@@ -235,7 +236,7 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
if (tool !== 'bbox') {
return NO_ANCHORS;
}
if (model?.base === 'imagen3' || model?.base === 'chatgpt-4o') {
if (model?.base && API_BASE_MODELS.includes(model.base)) {
// The bbox is not resizable in these modes
return NO_ANCHORS;
}

View File

@@ -32,6 +32,7 @@ import {
import { simplifyFlatNumbersArray } from 'features/controlLayers/util/simplify';
import { isMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
import { ASPECT_RATIO_MAP } from 'features/parameters/components/Bbox/constants';
import { API_BASE_MODELS } from 'features/parameters/types/constants';
import { getGridSize, getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
import type { IRect } from 'konva/lib/types';
import { isEqual, merge } from 'lodash-es';
@@ -68,7 +69,13 @@ import type {
IPMethodV2,
T2IAdapterConfig,
} from './types';
import { getEntityIdentifier, isChatGPT4oAspectRatioID, isImagen3AspectRatioID, isRenderableEntity } from './types';
import {
getEntityIdentifier,
isChatGPT4oAspectRatioID,
isFluxKontextAspectRatioID,
isImagenAspectRatioID,
isRenderableEntity,
} from './types';
import {
converters,
getControlLayerState,
@@ -80,6 +87,7 @@ import {
initialChatGPT4oReferenceImage,
initialControlLoRA,
initialControlNet,
initialFluxKontextReferenceImage,
initialFLUXRedux,
initialIPAdapter,
initialT2IAdapter,
@@ -685,6 +693,16 @@ export const canvasSlice = createSlice({
return;
}
if (entity.ipAdapter.model.base === 'flux-kontext') {
// Switching to flux-kontext
entity.ipAdapter = {
...initialFluxKontextReferenceImage,
image: entity.ipAdapter.image,
model: entity.ipAdapter.model,
};
return;
}
if (entity.ipAdapter.model.type === 'flux_redux') {
// Switching to flux_redux
entity.ipAdapter = {
@@ -1095,6 +1113,30 @@ export const canvasSlice = createSlice({
state.inpaintMasks.entities = [data];
state.selectedEntityIdentifier = { type: 'inpaint_mask', id: data.id };
},
inpaintMaskNoiseAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
const { entityIdentifier } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (entity && entity.type === 'inpaint_mask') {
entity.noiseLevel = 0.15; // Default noise level
}
},
inpaintMaskNoiseChanged: (
state,
action: PayloadAction<EntityIdentifierPayload<{ noiseLevel: number }, 'inpaint_mask'>>
) => {
const { entityIdentifier, noiseLevel } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (entity && entity.type === 'inpaint_mask') {
entity.noiseLevel = noiseLevel;
}
},
inpaintMaskNoiseDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
const { entityIdentifier } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (entity && entity.type === 'inpaint_mask') {
entity.noiseLevel = undefined;
}
},
inpaintMaskConvertedToRegionalGuidance: {
reducer: (
state,
@@ -1133,6 +1175,30 @@ export const canvasSlice = createSlice({
payload: { ...payload, newId: getPrefixedId('regional_guidance') },
}),
},
inpaintMaskDenoiseLimitAdded: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
const { entityIdentifier } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (entity && entity.type === 'inpaint_mask') {
entity.denoiseLimit = 1.0; // Default denoise limit
}
},
inpaintMaskDenoiseLimitChanged: (
state,
action: PayloadAction<EntityIdentifierPayload<{ denoiseLimit: number }, 'inpaint_mask'>>
) => {
const { entityIdentifier, denoiseLimit } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (entity && entity.type === 'inpaint_mask') {
entity.denoiseLimit = denoiseLimit;
}
},
inpaintMaskDenoiseLimitDeleted: (state, action: PayloadAction<EntityIdentifierPayload<void, 'inpaint_mask'>>) => {
const { entityIdentifier } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (entity && entity.type === 'inpaint_mask') {
entity.denoiseLimit = undefined;
}
},
//#region BBox
bboxScaledWidthChanged: (state, action: PayloadAction<number>) => {
const gridSize = getGridSize(state.bbox.modelBase);
@@ -1236,7 +1302,10 @@ export const canvasSlice = createSlice({
state.bbox.aspectRatio.id = id;
if (id === 'Free') {
state.bbox.aspectRatio.isLocked = false;
} else if (state.bbox.modelBase === 'imagen3' && isImagen3AspectRatioID(id)) {
} else if (
(state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'imagen4') &&
isImagenAspectRatioID(id)
) {
// Imagen3 has specific output sizes that are not exactly the same as the aspect ratio. Need special handling.
if (id === '16:9') {
state.bbox.rect.width = 1408;
@@ -1270,6 +1339,31 @@ export const canvasSlice = createSlice({
}
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
state.bbox.aspectRatio.isLocked = true;
} else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) {
if (id === '3:4') {
state.bbox.rect.width = 880;
state.bbox.rect.height = 1184;
} else if (id === '4:3') {
state.bbox.rect.width = 1184;
state.bbox.rect.height = 880;
} else if (id === '9:16') {
state.bbox.rect.width = 752;
state.bbox.rect.height = 1392;
} else if (id === '16:9') {
state.bbox.rect.width = 1392;
state.bbox.rect.height = 752;
} else if (id === '21:9') {
state.bbox.rect.width = 1568;
state.bbox.rect.height = 672;
} else if (id === '9:21') {
state.bbox.rect.width = 672;
state.bbox.rect.height = 1568;
} else if (id === '1:1') {
state.bbox.rect.width = 1024;
state.bbox.rect.height = 1024;
}
state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height;
state.bbox.aspectRatio.isLocked = true;
} else {
state.bbox.aspectRatio.isLocked = true;
state.bbox.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio;
@@ -1742,7 +1836,7 @@ export const canvasSlice = createSlice({
const base = model?.base;
if (isMainModelBase(base) && state.bbox.modelBase !== base) {
state.bbox.modelBase = base;
if (base === 'imagen3' || base === 'chatgpt-4o') {
if (API_BASE_MODELS.includes(base)) {
state.bbox.aspectRatio.isLocked = true;
state.bbox.aspectRatio.value = 1;
state.bbox.aspectRatio.id = '1:1';
@@ -1865,6 +1959,12 @@ export const {
// Inpaint mask
inpaintMaskAdded,
inpaintMaskConvertedToRegionalGuidance,
inpaintMaskNoiseAdded,
inpaintMaskNoiseChanged,
inpaintMaskNoiseDeleted,
inpaintMaskDenoiseLimitAdded,
inpaintMaskDenoiseLimitChanged,
inpaintMaskDenoiseLimitDeleted,
// inpaintMaskRecalled,
} = canvasSlice.actions;
@@ -1881,7 +1981,7 @@ export const canvasPersistConfig: PersistConfig<CanvasState> = {
};
const syncScaledSize = (state: CanvasState) => {
if (state.bbox.modelBase === 'imagen3' || state.bbox.modelBase === 'chatgpt-4o') {
if (API_BASE_MODELS.includes(state.bbox.modelBase)) {
// Imagen3 has fixed sizes. Scaled bbox is not supported.
return;
}

View File

@@ -381,7 +381,9 @@ export const selectIsFLUX = createParamsSelector((params) => params.model?.base
export const selectIsSD3 = createParamsSelector((params) => params.model?.base === 'sd-3');
export const selectIsCogView4 = createParamsSelector((params) => params.model?.base === 'cogview4');
export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3');
export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4');
export const selectIsChatGTP4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o');
export const selectIsFluxKontext = createParamsSelector((params) => params.model?.base === 'flux-kontext');
export const selectModel = createParamsSelector((params) => params.model);
export const selectModelKey = createParamsSelector((params) => params.model?.key);

View File

@@ -1,3 +1,4 @@
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
import { fetchModelConfigByIdentifier } from 'features/metadata/util/modelFetchingHelpers';
import { zMainModelBase, zModelIdentifierField } from 'features/nodes/types/common';
import type { ParameterLoRAModel } from 'features/parameters/types/parameterSchemas';
@@ -50,7 +51,7 @@ const zCLIPVisionModelV2 = z.enum(['ViT-H', 'ViT-G', 'ViT-L']);
export type CLIPVisionModelV2 = z.infer<typeof zCLIPVisionModelV2>;
export const isCLIPVisionModelV2 = (v: unknown): v is CLIPVisionModelV2 => zCLIPVisionModelV2.safeParse(v).success;
const zIPMethodV2 = z.enum(['full', 'style', 'composition']);
const zIPMethodV2 = z.enum(['full', 'style', 'composition', 'style_strong', 'style_precise']);
export type IPMethodV2 = z.infer<typeof zIPMethodV2>;
export const isIPMethodV2 = (v: unknown): v is IPMethodV2 => zIPMethodV2.safeParse(v).success;
@@ -257,6 +258,13 @@ const zChatGPT4oReferenceImageConfig = z.object({
});
export type ChatGPT4oReferenceImageConfig = z.infer<typeof zChatGPT4oReferenceImageConfig>;
const zFluxKontextReferenceImageConfig = z.object({
type: z.literal('flux_kontext_reference_image'),
image: zImageWithDims.nullable(),
model: zServerValidatedModelIdentifierField.nullable(),
});
export type FluxKontextReferenceImageConfig = z.infer<typeof zFluxKontextReferenceImageConfig>;
const zCanvasEntityBase = z.object({
id: zId,
name: zName,
@@ -267,7 +275,12 @@ const zCanvasEntityBase = z.object({
const zCanvasReferenceImageState = zCanvasEntityBase.extend({
type: z.literal('reference_image'),
// This should be named `referenceImage` but we need to keep it as `ipAdapter` for backwards compatibility
ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zChatGPT4oReferenceImageConfig]),
ipAdapter: z.discriminatedUnion('type', [
zIPAdapterConfig,
zFLUXReduxConfig,
zChatGPT4oReferenceImageConfig,
zFluxKontextReferenceImageConfig,
]),
});
export type CanvasReferenceImageState = z.infer<typeof zCanvasReferenceImageState>;
@@ -279,6 +292,9 @@ export const isFLUXReduxConfig = (config: CanvasReferenceImageState['ipAdapter']
export const isChatGPT4oReferenceImageConfig = (
config: CanvasReferenceImageState['ipAdapter']
): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image';
export const isFluxKontextReferenceImageConfig = (
config: CanvasReferenceImageState['ipAdapter']
): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image';
const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']);
export type FillStyle = z.infer<typeof zFillStyle>;
@@ -310,6 +326,8 @@ const zCanvasInpaintMaskState = zCanvasEntityBase.extend({
fill: zFill,
opacity: zOpacity,
objects: z.array(zCanvasObjectState),
noiseLevel: z.number().gte(0).lte(1).optional(),
denoiseLimit: z.number().gte(0).lte(1).optional(),
});
export type CanvasInpaintMaskState = z.infer<typeof zCanvasInpaintMaskState>;
@@ -403,16 +421,20 @@ export type StagingAreaImage = {
offsetY: number;
};
export const zAspectRatioID = z.enum(['Free', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']);
export const zAspectRatioID = z.enum(['Free', '21:9', '9:21', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']);
export const zImagen3AspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']);
export const isImagen3AspectRatioID = (v: unknown): v is z.infer<typeof zImagen3AspectRatioID> =>
export const isImagenAspectRatioID = (v: unknown): v is z.infer<typeof zImagen3AspectRatioID> =>
zImagen3AspectRatioID.safeParse(v).success;
export const zChatGPT4oAspectRatioID = z.enum(['3:2', '1:1', '2:3']);
export const isChatGPT4oAspectRatioID = (v: unknown): v is z.infer<typeof zChatGPT4oAspectRatioID> =>
zChatGPT4oAspectRatioID.safeParse(v).success;
export const zFluxKontextAspectRatioID = z.enum(['21:9', '4:3', '1:1', '3:4', '9:21', '16:9', '9:16']);
export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer<typeof zFluxKontextAspectRatioID> =>
zFluxKontextAspectRatioID.safeParse(v).success;
export type AspectRatioID = z.infer<typeof zAspectRatioID>;
export const isAspectRatioID = (v: unknown): v is AspectRatioID => zAspectRatioID.safeParse(v).success;
@@ -609,3 +631,7 @@ export const isMaskEntityIdentifier = (
): entityIdentifier is CanvasEntityIdentifier<'inpaint_mask' | 'regional_guidance'> => {
return isInpaintMaskEntityIdentifier(entityIdentifier) || isRegionalGuidanceEntityIdentifier(entityIdentifier);
};
// Ideally, we'd type `adapter` as `CanvasEntityAdapterBase`, but the generics make this tricky. `CanvasEntityAdapter`
// is a union of all entity adapters and is functionally identical to `CanvasEntityAdapterBase`.
export type LifecycleCallback = (adapter: CanvasEntityAdapter) => Promise<boolean>;

View File

@@ -10,6 +10,7 @@ import type {
ChatGPT4oReferenceImageConfig,
ControlLoRAConfig,
ControlNetConfig,
FluxKontextReferenceImageConfig,
FLUXReduxConfig,
ImageWithDims,
IPAdapterConfig,
@@ -83,6 +84,11 @@ export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = {
image: null,
model: null,
};
export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = {
type: 'flux_kontext_reference_image',
image: null,
model: null,
};
export const initialT2IAdapter: T2IAdapterConfig = {
type: 't2i_adapter',
model: null,
@@ -199,6 +205,8 @@ export const getInpaintMaskState = (
style: 'diagonal',
color: getInpaintMaskFillColor(),
},
noiseLevel: undefined,
denoiseLimit: undefined,
};
merge(entityState, overrides);
return entityState;

View File

@@ -1,7 +1,11 @@
import { roundToMultiple } from 'common/util/roundDownToMultiple';
import type { Dimensions } from 'features/controlLayers/store/types';
import type { MainModelBase } from 'features/nodes/types/common';
import { getGridSize, getOptimalDimension } from 'features/parameters/util/optimalDimension';
import {
getGridSize,
getOptimalDimension,
isInSDXLTrainingDimensions,
} from 'features/parameters/util/optimalDimension';
/**
* Scales the bounding box dimensions to the optimal dimension. The optimal dimensions should be the trained dimension
@@ -10,6 +14,11 @@ import { getGridSize, getOptimalDimension } from 'features/parameters/util/optim
* @param modelBase The base model
*/
export const getScaledBoundingBoxDimensions = (dimensions: Dimensions, modelBase: MainModelBase): Dimensions => {
// Special cases: Return original if SDXL and in training dimensions
if (modelBase === 'sdxl' && isInSDXLTrainingDimensions(dimensions.width, dimensions.height)) {
return { ...dimensions };
}
const optimalDimension = getOptimalDimension(modelBase);
const gridSize = getGridSize(modelBase);
const width = roundToMultiple(dimensions.width, gridSize);

View File

@@ -109,7 +109,7 @@ export const FullscreenDropzone = memo(() => {
const autoAddBoardId = selectAutoAddBoardId(getState());
if (isClientSideUploadEnabled) {
if (isClientSideUploadEnabled && files.length > 1) {
for (const [i, file] of files.entries()) {
await clientSideUpload(file, i);
}

View File

@@ -26,19 +26,26 @@ import { atom } from 'nanostores';
import { memo, useCallback, useMemo, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { useListAllImageNamesForBoardQuery } from 'services/api/endpoints/boards';
import { useDeleteBoardAndImagesMutation, useDeleteBoardMutation } from 'services/api/endpoints/images';
import {
useDeleteBoardAndImagesMutation,
useDeleteBoardMutation,
useDeleteUncategorizedImagesMutation,
} from 'services/api/endpoints/images';
import type { BoardDTO } from 'services/api/types';
export const $boardToDelete = atom<BoardDTO | null>(null);
export const $boardToDelete = atom<BoardDTO | 'none' | null>(null);
const DeleteBoardModal = () => {
useAssertSingleton('DeleteBoardModal');
const boardToDelete = useStore($boardToDelete);
const { t } = useTranslation();
const boardId = useMemo(() => (boardToDelete === 'none' ? 'none' : boardToDelete?.board_id), [boardToDelete]);
const { currentData: boardImageNames, isFetching: isFetchingBoardNames } = useListAllImageNamesForBoardQuery(
boardToDelete?.board_id
boardId
? {
board_id: boardToDelete?.board_id,
board_id: boardId,
categories: undefined,
is_intermediate: undefined,
}
@@ -71,10 +78,13 @@ const DeleteBoardModal = () => {
const [deleteBoardAndImages, { isLoading: isDeleteBoardAndImagesLoading }] = useDeleteBoardAndImagesMutation();
const [deleteUncategorizedImages, { isLoading: isDeleteUncategorizedImagesLoading }] =
useDeleteUncategorizedImagesMutation();
const imageUsageSummary = useAppSelector(selectImageUsageSummary);
const handleDeleteBoardOnly = useCallback(() => {
if (!boardToDelete) {
if (!boardToDelete || boardToDelete === 'none') {
return;
}
deleteBoardOnly(boardToDelete.board_id);
@@ -82,13 +92,21 @@ const DeleteBoardModal = () => {
}, [boardToDelete, deleteBoardOnly]);
const handleDeleteBoardAndImages = useCallback(() => {
if (!boardToDelete) {
if (!boardToDelete || boardToDelete === 'none') {
return;
}
deleteBoardAndImages(boardToDelete.board_id);
$boardToDelete.set(null);
}, [boardToDelete, deleteBoardAndImages]);
const handleDeleteUncategorizedImages = useCallback(() => {
if (!boardToDelete || boardToDelete !== 'none') {
return;
}
deleteUncategorizedImages();
$boardToDelete.set(null);
}, [boardToDelete, deleteUncategorizedImages]);
const handleClose = useCallback(() => {
$boardToDelete.set(null);
}, []);
@@ -96,8 +114,12 @@ const DeleteBoardModal = () => {
const cancelRef = useRef<HTMLButtonElement>(null);
const isLoading = useMemo(
() => isDeleteBoardAndImagesLoading || isDeleteBoardOnlyLoading || isFetchingBoardNames,
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames]
() =>
isDeleteBoardAndImagesLoading ||
isDeleteBoardOnlyLoading ||
isFetchingBoardNames ||
isDeleteUncategorizedImagesLoading,
[isDeleteBoardAndImagesLoading, isDeleteBoardOnlyLoading, isFetchingBoardNames, isDeleteUncategorizedImagesLoading]
);
if (!boardToDelete) {
@@ -109,7 +131,7 @@ const DeleteBoardModal = () => {
<AlertDialogOverlay>
<AlertDialogContent>
<AlertDialogHeader fontSize="lg" fontWeight="bold">
{t('common.delete')} {boardToDelete.board_name}
{t('common.delete')} {boardToDelete === 'none' ? t('boards.uncategorizedImages') : boardToDelete.board_name}
</AlertDialogHeader>
<AlertDialogBody>
@@ -125,11 +147,13 @@ const DeleteBoardModal = () => {
bottomMessage={t('boards.bottomMessage')}
/>
)}
<Text>
{boardToDelete.is_private
? t('boards.deletedPrivateBoardsCannotbeRestored')
: t('boards.deletedBoardsCannotbeRestored')}
</Text>
{boardToDelete !== 'none' && (
<Text>
{boardToDelete.is_private
? t('boards.deletedPrivateBoardsCannotbeRestored')
: t('boards.deletedBoardsCannotbeRestored')}
</Text>
)}
<Text>{t('gallery.deleteImagePermanent')}</Text>
</Flex>
</AlertDialogBody>
@@ -138,12 +162,21 @@ const DeleteBoardModal = () => {
<Button ref={cancelRef} onClick={handleClose}>
{t('boards.cancel')}
</Button>
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
{t('boards.deleteBoardOnly')}
</Button>
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
{t('boards.deleteBoardAndImages')}
</Button>
{boardToDelete !== 'none' && (
<Button colorScheme="warning" isLoading={isLoading} onClick={handleDeleteBoardOnly}>
{t('boards.deleteBoardOnly')}
</Button>
)}
{boardToDelete !== 'none' && (
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteBoardAndImages}>
{t('boards.deleteBoardAndImages')}
</Button>
)}
{boardToDelete === 'none' && (
<Button colorScheme="error" isLoading={isLoading} onClick={handleDeleteUncategorizedImages}>
{t('boards.deleteAllUncategorizedImages')}
</Button>
)}
</Flex>
</AlertDialogFooter>
</AlertDialogContent>

View File

@@ -7,9 +7,11 @@ import { autoAddBoardIdChanged } from 'features/gallery/store/gallerySlice';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiDownloadBold, PiPlusBold } from 'react-icons/pi';
import { PiDownloadBold, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
import { useBulkDownloadImagesMutation } from 'services/api/endpoints/images';
import { $boardToDelete } from './DeleteBoardModal';
type Props = {
children: ContextMenuProps<HTMLDivElement>['children'];
};
@@ -33,6 +35,10 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
bulkDownload({ image_names: [], board_id: 'none' });
}, [bulkDownload]);
const setUncategorizedImagesAsToBeDeleted = useCallback(() => {
$boardToDelete.set('none');
}, []);
const renderMenuFunc = useCallback(
() => (
<MenuList visibility="visible">
@@ -47,10 +53,26 @@ const NoBoardBoardContextMenu = ({ children }: Props) => {
{t('boards.downloadBoard')}
</MenuItem>
)}
<MenuItem
color="error.300"
icon={<PiTrashSimpleBold />}
onClick={setUncategorizedImagesAsToBeDeleted}
isDestructive
>
{t('boards.deleteAllUncategorizedImages')}
</MenuItem>
</MenuGroup>
</MenuList>
),
[autoAssignBoardOnClick, handleBulkDownload, handleSetAutoAdd, isBulkDownloadEnabled, isSelectedForAutoAdd, t]
[
autoAssignBoardOnClick,
handleBulkDownload,
handleSetAutoAdd,
isBulkDownloadEnabled,
isSelectedForAutoAdd,
t,
setUncategorizedImagesAsToBeDeleted,
]
);
return <ContextMenu renderMenu={renderMenuFunc}>{children}</ContextMenu>;

View File

@@ -19,9 +19,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
const imageViewer = useImageViewer();
const isBusy = useCanvasIsBusySafe();
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
const onClickNewCanvasWithRasterLayerFromImage = useCallback(async () => {
const { dispatch, getState } = store;
newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
await newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
@@ -31,9 +31,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
});
}, [imageDTO, imageViewer, store, t]);
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
const onClickNewCanvasWithControlLayerFromImage = useCallback(async () => {
const { dispatch, getState } = store;
newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
await newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
@@ -43,9 +43,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
});
}, [imageDTO, imageViewer, store, t]);
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(() => {
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(async () => {
const { dispatch, getState } = store;
newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
await newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({
@@ -55,9 +55,9 @@ export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
});
}, [imageDTO, imageViewer, store, t]);
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(() => {
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(async () => {
const { dispatch, getState } = store;
newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
await newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
dispatch(setActiveTab('canvas'));
imageViewer.close();
toast({

View File

@@ -19,6 +19,7 @@ type Props = {
withDownload?: boolean;
withCopy?: boolean;
extraCopyActions?: { label: string; getData: (data: unknown) => unknown }[];
wrapData?: boolean;
} & FlexProps;
const overlayscrollbarsOptions = getOverlayScrollbarsParams({
@@ -29,7 +30,16 @@ const overlayscrollbarsOptions = getOverlayScrollbarsParams({
const ChakraPre = chakra('pre');
const DataViewer = (props: Props) => {
const { label, data, fileName, withDownload = true, withCopy = true, extraCopyActions, ...rest } = props;
const {
label,
data,
fileName,
withDownload = true,
withCopy = true,
extraCopyActions,
wrapData = true,
...rest
} = props;
const dataString = useMemo(() => (isString(data) ? data : formatter.Serialize(data)) ?? '', [data]);
const shift = useShiftModifier();
const clipboard = useClipboard();
@@ -53,7 +63,7 @@ const DataViewer = (props: Props) => {
<Flex bg="base.800" borderRadius="base" flexGrow={1} w="full" h="full" position="relative" {...rest}>
<Box position="absolute" top={0} left={0} right={0} bottom={0} overflow="auto" p={2} fontSize="sm">
<OverlayScrollbarsComponent defer style={overlayScrollbarsStyles} options={overlayscrollbarsOptions}>
<ChakraPre whiteSpace="pre-wrap">{dataString}</ChakraPre>
<ChakraPre whiteSpace={wrapData ? 'pre-wrap' : undefined}>{dataString}</ChakraPre>
</OverlayScrollbarsComponent>
</Box>
<Flex position="absolute" top={0} insetInlineEnd={0} p={2}>

View File

@@ -1,7 +1,7 @@
import type { AppDispatch, RootState } from 'app/store/store';
import { deepClone } from 'common/util/deepClone';
import { selectDefaultIPAdapter, selectDefaultRefImageConfig } from 'features/controlLayers/hooks/addLayerHooks';
import { CanvasEntityAdapterBase } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterBase';
import { CanvasEntityTransformer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityTransformer';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import { canvasReset } from 'features/controlLayers/store/actions';
import {
@@ -20,6 +20,7 @@ import type {
CanvasControlLayerState,
CanvasEntityIdentifier,
CanvasEntityType,
CanvasImageState,
CanvasInpaintMaskState,
CanvasRasterLayerState,
CanvasRegionalGuidanceState,
@@ -34,7 +35,7 @@ import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
import type { FieldIdentifier } from 'features/nodes/types/field';
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
import { getOptimalDimension } from 'features/parameters/util/optimalDimension';
import { imagesApi } from 'services/api/endpoints/images';
import { imageDTOToFile, imagesApi, uploadImage } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
@@ -142,14 +143,14 @@ export const createNewCanvasEntityFromImage = (arg: {
*
* Using 'raster_layer' for the type and enabling `withResize` replicates the common img2img flow.
*/
export const newCanvasFromImage = (arg: {
export const newCanvasFromImage = async (arg: {
imageDTO: ImageDTO;
type: CanvasEntityType | 'regional_guidance_with_reference_image';
withResize: boolean;
withResize?: boolean;
dispatch: AppDispatch;
getState: () => RootState;
}) => {
const { type, imageDTO, withResize, dispatch, getState } = arg;
const { type, imageDTO, withResize = false, dispatch, getState } = arg;
const state = getState();
const base = selectBboxModelBase(state);
@@ -158,20 +159,29 @@ export const newCanvasFromImage = (arg: {
const optimalDimension = getOptimalDimension(base);
const { width, height } = calculateNewSize(ratio, optimalDimension ** 2, base);
const imageObject = imageDTOToImageObject(imageDTO);
const { x, y } = selectBboxRect(state);
let imageObject: CanvasImageState;
const addInitCallback = (id: string) => {
CanvasEntityAdapterBase.registerInitCallback(async (adapter) => {
if (withResize && (width !== imageDTO.width || height !== imageDTO.height)) {
const resizedImageDTO = await uploadImage({
file: await imageDTOToFile(imageDTO),
image_category: 'general',
is_intermediate: true,
silent: true,
resize_to: { width, height },
});
imageObject = imageDTOToImageObject(resizedImageDTO);
} else {
imageObject = imageDTOToImageObject(imageDTO);
}
const addFitOnLayerInitCallback = (adapterId: string) => {
CanvasEntityTransformer.registerBboxUpdatedCallback((adapter) => {
// Skip the callback if the adapter is not the one we are creating
if (adapter.id !== id) {
return false;
if (adapter.id !== adapterId) {
return Promise.resolve(false);
}
// Fit the layer to the bbox w/ fill strategy
await adapter.transformer.startTransform({ silent: true });
adapter.transformer.fitToBboxFill();
await adapter.transformer.applyTransform();
return true;
adapter.manager.stage.fitBboxAndLayersToStage();
return Promise.resolve(true);
});
};
@@ -180,11 +190,8 @@ export const newCanvasFromImage = (arg: {
const overrides = {
id: getPrefixedId('raster_layer'),
objects: [imageObject],
position: { x, y },
} satisfies Partial<CanvasRasterLayerState>;
if (withResize) {
addInitCallback(overrides.id);
}
addFitOnLayerInitCallback(overrides.id);
dispatch(canvasReset());
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
@@ -195,12 +202,9 @@ export const newCanvasFromImage = (arg: {
const overrides = {
id: getPrefixedId('control_layer'),
objects: [imageObject],
position: { x, y },
controlAdapter: deepClone(initialControlNet),
} satisfies Partial<CanvasControlLayerState>;
if (withResize) {
addInitCallback(overrides.id);
}
addFitOnLayerInitCallback(overrides.id);
dispatch(canvasReset());
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
@@ -211,11 +215,8 @@ export const newCanvasFromImage = (arg: {
const overrides = {
id: getPrefixedId('inpaint_mask'),
objects: [imageObject],
position: { x, y },
} satisfies Partial<CanvasInpaintMaskState>;
if (withResize) {
addInitCallback(overrides.id);
}
addFitOnLayerInitCallback(overrides.id);
dispatch(canvasReset());
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
@@ -226,11 +227,8 @@ export const newCanvasFromImage = (arg: {
const overrides = {
id: getPrefixedId('regional_guidance'),
objects: [imageObject],
position: { x, y },
} satisfies Partial<CanvasRegionalGuidanceState>;
if (withResize) {
addInitCallback(overrides.id);
}
addFitOnLayerInitCallback(overrides.id);
dispatch(canvasReset());
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));

View File

@@ -3,7 +3,7 @@ import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
import { useRelatedGroupedModelCombobox } from 'common/hooks/useRelatedGroupedModelCombobox';
import { loraAdded, selectLoRAsSlice } from 'features/controlLayers/store/lorasSlice';
import { selectBase } from 'features/controlLayers/store/paramsSlice';
import { memo, useCallback, useMemo } from 'react';
@@ -37,7 +37,7 @@ const LoRASelect = () => {
[dispatch]
);
const { options, onChange } = useGroupedModelCombobox({
const { options, onChange } = useRelatedGroupedModelCombobox({
modelConfigs,
getIsDisabled,
onChange: _onChange,

View File

@@ -17,7 +17,9 @@ export const BASE_COLOR_MAP: Record<BaseModelType, string> = {
flux: 'gold',
cogview4: 'red',
imagen3: 'pink',
imagen4: 'pink',
'chatgpt-4o': 'pink',
'flux-kontext': 'pink',
};
const ModelBaseBadge = ({ base }: Props) => {

Some files were not shown because too many files have changed in this diff Show More