Compare commits

...

645 Commits

Author SHA1 Message Date
psychedelicious
ccc55069d1 chore: bump version to v6.3.0 2025-08-05 10:30:26 +10:00
psychedelicious
61ff9ee3a7 feat(ui): add button to ref image to recall size & optimize for model
This is useful for FLUX Kontext, where you typically want the generation
size to at least roughly match the first ref image size.
2025-08-05 10:28:44 +10:00
psychedelicious
111408c046 feat(mm): add flux krea to starter models 2025-08-05 10:25:14 +10:00
psychedelicious
d7619d465e feat(mm): change anime upscaling model to one that doesn't trigger picklescan 2025-08-05 10:25:14 +10:00
Kent Keirsey
8ad4f6e56d updates & fix 2025-08-05 10:10:52 +10:00
Cursor Agent
bf4899526f Add 'shift+s' hotkey for fitting bbox to canvas
Co-authored-by: kent <kent@invoke.ai>
2025-08-05 10:10:52 +10:00
psychedelicious
6435d265c6 fix(ui): overflow w/ long board names 2025-08-05 10:06:55 +10:00
Linos
3163ef454d translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2065 of 2065 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-08-05 10:04:20 +10:00
Riccardo Giovanetti
7ea636df70 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2037 of 2065 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (2037 of 2065 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.5% (2036 of 2065 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (2014 of 2042 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-08-05 10:04:20 +10:00
Hosted Weblate
1869824803 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

translationBot(ui): update translation files

Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-08-05 10:04:20 +10:00
psychedelicious
66fc8af8a6 fix(ui): reset session button actions
- Do not reset dimensions when resetting generation settings (they are
model-dependent, and we don't change model-dependent settings w/ that
butotn)
- Do not reset bbox when resetting canvas layers
- Show reset canvas layers button only on canvas tab
- Show reset generation settings button only on canvas or generate tab
2025-08-05 10:01:22 +10:00
psychedelicious
48cb6b12f0 fix(ui): add style ref launchpad using wrong dnd config
I don't think this actually caused problems bc the two DND targets were
very similar, but it was wrong.
2025-08-05 09:57:11 +10:00
psychedelicious
68e30a9864 feat(ui): prevent creating new canvases while staging
Disable these items while staging:
- New Canvas From Image context menu
- Edit image hook & launchpad button
- Generate from Text launchpad button (only while on canvas tab)
- Use a Layout Image launchpad button
2025-08-05 09:57:11 +10:00
psychedelicious
f65dc2c081 chore(ui): typegen 2025-08-05 09:54:00 +10:00
psychedelicious
0cd77443a7 feat(app): add setting to disable picklescan
When unsafe_disable_picklescan is enabled, instead of erroring on
detections or scan failures, a warning is logged.

A warning is also logged on app startup when this setting is enabled.

The setting is disabled by default and there is no change in behaviour
when disabled.
2025-08-05 09:54:00 +10:00
Mary Hipp
185ed86424 fix graph building 2025-08-04 12:32:27 -04:00
Mary Hipp
fed817ab83 add image concatenation to flux kontext graph if more than one refernece image 2025-08-04 11:27:02 -04:00
Mary Hipp
e0b45db69a remove check in readiness for multiple reg images 2025-08-04 11:27:02 -04:00
psychedelicious
2beac1fb04 chore: bump version to v6.3.0rc2 2025-08-04 23:55:04 +10:00
psychedelicious
e522de33f8 refactor(nodes): roll back latent-space resizing of kontext images 2025-08-04 23:03:12 +10:00
psychedelicious
d591b50c25 feat(ui): use image-space concatenation in FLUX graphs 2025-08-04 23:03:12 +10:00
psychedelicious
b365aad6d8 chore(ui): typegen 2025-08-04 23:03:12 +10:00
psychedelicious
65ad392361 feat(nodes): add node to prep images for FLUX Kontext 2025-08-04 23:03:12 +10:00
psychedelicious
56d75e1c77 feat(backend): use VAE mean encoding for Kontext reference images
Use distribution mean without sampling noise for more stable and
consistent reference image encoding, matching ComfyUI implementation
2025-08-04 23:03:12 +10:00
psychedelicious
df77a12efe refactor(backend): use torchvision transforms for Kontext image preprocessing
Replace numpy-based normalization with torchvision transforms for
consistency with other image processing in the codebase
2025-08-04 23:03:12 +10:00
psychedelicious
faf662d12e refactor(backend): use BICUBIC resampling for Kontext images
Switch from LANCZOS to BICUBIC for smoother image resizing to reduce
artifacts in reference image processing
2025-08-04 23:03:12 +10:00
psychedelicious
44a7dfd486 fix(backend): use consistent idx_offset=1 for all Kontext images
Changes from per-image index offsets to a consistent value of 1 for
all reference images, matching the ComfyUI implementation
2025-08-04 23:03:12 +10:00
psychedelicious
bb15e5cf06 feat(backend): add spatial tiling for multiple Kontext reference images
Implements intelligent spatial tiling that arranges multiple reference
images in a virtual canvas, choosing between horizontal and vertical
placement to maintain a square-like aspect ratio
2025-08-04 23:03:12 +10:00
psychedelicious
1a1c846be3 feat(backend): include reference images in negative CFG pass for Kontext
Maintains consistency between positive and negative passes to prevent
CFG artifacts when using Kontext reference images
2025-08-04 23:03:12 +10:00
psychedelicious
93c896a370 fix(backend): use img_cond_seq to check for Kontext slicing
Was incorrectly checking img_input_ids instead of img_cond_seq
2025-08-04 23:03:12 +10:00
psychedelicious
053d7c8c8e feat(ui): support disabling roarr output styling via localstorage 2025-07-31 23:02:45 +10:00
psychedelicious
5296263954 feat(ui): add missing translations 2025-07-31 22:51:33 +10:00
psychedelicious
a36b70c01c fix(ui): add image name data attr to gallery placeholder image elements
This fixes an issue where gallery's auto-scroll-into-view for selected
images didn't work, and users instead saw a "Unable to find image..."
debug log message in JS console.
2025-07-31 22:48:42 +10:00
psychedelicious
854a2a5a7a chore: bump version to v6.3.0rc1 2025-07-31 14:17:18 +10:00
psychedelicious
f9c64b0609 chore(ui): update whats new 2025-07-31 14:17:18 +10:00
psychedelicious
5889fa536a feat(ui): add migration path for client state from IndexedDB to server-backed storage 2025-07-31 14:09:45 +10:00
Linos
0e71ba892f translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2044 of 2044 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-07-31 13:59:21 +10:00
Riccardo Giovanetti
d766a21223 translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2016 of 2044 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-07-31 13:59:21 +10:00
psychedelicious
5c8c54eab8 chore: ruff 2025-07-31 06:38:48 +10:00
psychedelicious
f296f4525c tidy(ui): disable logging middleware 2025-07-31 06:38:48 +10:00
psychedelicious
7c9ba4cb52 refactor(ui): add persistence gate logic to prevent race conditions with slow rehydration 2025-07-31 06:38:48 +10:00
psychedelicious
6784fd5b43 refactor(ui): use new routes for _all_ client state persistence (no override/custom drivers) 2025-07-31 06:38:48 +10:00
psychedelicious
11d68cc646 chore(ui): typegen 2025-07-31 06:38:48 +10:00
psychedelicious
ea8c877025 refactor(app): move client state persistence to own route, add queue_id 2025-07-31 06:38:48 +10:00
psychedelicious
7a3c2332dd feat(ui): add visual indicator when input field is added to form 2025-07-31 06:33:22 +10:00
psychedelicious
3835fd2f72 feat(ui): zhoosh image comparison ui 2025-07-30 07:20:47 -04:00
psychedelicious
6f8746040c docs(ui): update comments in readiness re: flux kontext via bfl api 2025-07-30 12:26:48 +10:00
psychedelicious
35e3940a09 feat(ui): update warning when using multiple ref images on BFL API kontext
It only supports 1 image.
2025-07-30 12:26:48 +10:00
psychedelicious
415616d83f feat(ui): support multiple kontext ref images in studio 2025-07-30 12:26:48 +10:00
psychedelicious
afb67efef9 chore(ui): typegen 2025-07-30 12:26:48 +10:00
psychedelicious
1ed1fefa60 feat(nodes): support multiple kontext ref images
Images are concatenated in latent space.
2025-07-30 12:26:48 +10:00
Ar7ific1al
fa94a05c77 Update CanvasStateApiModule.ts
Add temporary grid snap with ctrl, optional small step with ctrl+shift, while grid snap is off
2025-07-30 12:16:42 +10:00
psychedelicious
7a23d8266f feat(ui): simpler storage driver impl 2025-07-30 05:53:20 +10:00
psychedelicious
a44de079dd perf(ui): instantiate logger for storage error handler once 2025-07-30 05:53:20 +10:00
psychedelicious
c3c1a3edd8 chore(ui): typegen 2025-07-30 05:53:20 +10:00
psychedelicious
ea26b5b147 feat(app): client state persistence endpoints accept stringified data 2025-07-30 05:53:20 +10:00
Eugene Brodsky
4226b741b1 fix(docker) rocm 6.3 based image (#8152)
1. Fix the run script to properly read the GPU_DRIVER
2. Cloned and adjusted the ROCM dockerbuild for docker
3. Adjust the docker-compose.yml to use the cloned dockerbuild
2025-07-29 10:16:42 -04:00
Eugene Brodsky
1424b7c254 Merge branch 'main' into bugfix/heathen711/rocm-docker 2025-07-29 10:12:13 -04:00
psychedelicious
933fb2294c fix(ui): zod rejects any board id besides "none"
Turns out the string autocomplete TS hack does not translate to zod.
Widen the zod schema to any string, but use the hack for the TS type.
2025-07-29 08:45:16 -04:00
psychedelicious
5a181ee0fd build(ui): export loading component 2025-07-29 08:43:03 -04:00
psychedelicious
3b0d59e459 tests(app): update mm tests to test updated behaviour 2025-07-29 16:08:15 +10:00
psychedelicious
fec296e41d fix(app): move (not copy) models from install tmpdir to destination
It's not clear why we were copying downloaded models to the destination
dir instead of moving them. I cannot find a reason for it, and I am able
to install single-file and diffusers models just fine with the change.

This fixes an issue where model installation requires 2x the model's
size (bc we were copying the model over).
2025-07-29 16:08:15 +10:00
Heathen711
ae4e38c6d0 Merge branch 'main' into bugfix/heathen711/rocm-docker 2025-07-28 21:24:34 -07:00
psychedelicious
a9f3f1a4b2 fix(app): handle model files with periods in their name
Previously, we used pathlib's `with_suffix()` method to change add a
suffix (e.g. ".safetensors") to a model when installing it.

The intention is to add a suffix to the model's name - but that method
actually replaces everything after the first period.

This can cause different models to be installed under the same name!

For example, the FLUX models all end up with the same name:
- "FLUX.1 schnell.safetensors" -> "FLUX.safetensors"
- "FLUX.1 dev.safetensors" -> "FLUX.safetensors"

The fix is easy - append the suffix using string formatting instead of
using pathlib.

This issue has existed for a long time, but was exacerbated in
075345bffd in which I updated the names of
our starter models, adding ".1" to the FLUX model names. Whoops!
2025-07-29 14:15:59 +10:00
psychedelicious
8a73df4fe1 fix(ui): progress image does not hide on viewer with autoswitch disabled 2025-07-29 12:53:45 +10:00
psychedelicious
ea2e1ea8f0 fix(ui): queue count badge renders when left panel collapsed 2025-07-29 12:51:23 +10:00
psychedelicious
e8aa91931d fix(ui): connect metadata to output node for ext api nodes 2025-07-29 06:46:17 +10:00
psychedelicious
8d22a314a6 docs(ui): add some comments for race condition handling 2025-07-29 06:34:08 +10:00
psychedelicious
57ce2b8aa7 chore(ui): lint 2025-07-29 06:34:08 +10:00
psychedelicious
6b810cb3fb fix(ui): race condition w/ queue counts 2025-07-29 06:34:08 +10:00
psychedelicious
4f3a5dcc43 tidy(ui): remove unused progress related logic and components 2025-07-29 06:34:08 +10:00
psychedelicious
c3ae14cf73 fix(ui): ignore events for already-completed queue items 2025-07-29 06:34:08 +10:00
psychedelicious
b9c44b92d5 fix(ui): clear progress images from viewer at the right time 2025-07-29 06:34:08 +10:00
psychedelicious
5a68b4ddbc build(ui): skip logging ctx plugin when running tests 2025-07-29 06:31:30 +10:00
psychedelicious
18a722839b chore(ui): update knip conifg 2025-07-29 06:31:30 +10:00
psychedelicious
7370cb9be6 build(ui): add vite plugin to add relative file path to logger context 2025-07-29 06:31:30 +10:00
Kent Keirsey
cc4df52f82 feat: server-side client state persistence (#8314)
## Summary

Move client state persistence from browser to server.

- Add new client state persistence service to handle reading and writing
client state to db & associated router. The API mirrors that of
LocalStorage/IndexedDB where the set/get methods both operate on _keys_.
For example, when we persist the canvas state, we send only the new
canvas state to the backend - not the whole app state.
- The data is very flexibly-typed as a pydantic `JsonValue`. The client
is expected to handle all data parsing/validation (it must do this
anyways, and does this today).
- Change persistence from debounced to throttled at 2 seconds. Maybe
less is OK? Trying to not hammer the server.
- Add new persistence storage driver in client and use it in
redux-remember. It does its best to avoid extraneous persist requests,
caching the last data it persisted and noop-ing if there are no changes.
- Storage driver tracks pending persist actions using ref counts (bc
each slice is persisted independently). If there user navigates away
from the page during a persist request, it will give them the "you may
lose something if you navigate away" alert.
- This "lose something" alert message is not customizable (browser
security reasons).
- The alert is triggered only when the user closes the tape while a
persist network request is mid-flight. It's possible that the user makes
a change and closes the page before we start persisting. In this case,
they will lose the last 2 seconds of data.
- I tried making triggering the alert when a persist was waiting to
start, and it felt off.
- Maybe the alert isn't even necessary. Again you'd lose 2s of data at
most, probably a non issue. IMO after trying it, a subtle indicator
somewhere on the page is probably less confusing/intrusive.
- Fix an issue where the `redux-remember` enhancer was added _last_ in
the enhancer chain, which prevented us detecting when a persist has
succeeded. This required a small change to the `unserialze` utility
(used during rehydration) to ensure slices enhanced with `redux-undo`
are set up correctly as they are rehydrated.
- Restructure the redux store code to avoid circular dependencies. I
couldn't figure out how to do this without just smooshing it all into
the main `store.ts` file. Oh well.

Implications:
- Because client state is now on the server, different browsers will
have the same studio state. For example, if I start working on something
in Firefox, if I switch to Chrome, I have the same client state.
- Incognito windows won't do anything bc client state is server-side.
- It takes a bit longer for persistence to happen thanks to the
debounce, but there's now an indicator that tells you your stuff isn't
saved yet.
- Resetting the browser won't fix an issue with your studio state. You
must use `Reset Web UI` to fix it (or otherwise hit the appropriate
endpoint). It may be possible to end up in a Catch-22 where you can't
click the button and get stuck w/ a borked studio - I think to think
through this a bit more, might not be an issue.
- It probably takes a bit longer to start up, since we need to retrieve
client state over network instead of directly with browser APIs.

Other notes:
- We could explore adding an "incognito" mode, enabled via
`invokeai.yaml` setting or maybe in the UI. This would temporarily
disable persistence. Actually, I don't think this really makes sense, bc
all the images would be saved to disk.
- The studio state is stored in a single row in the DB. Currently, a
static row ID is used to force the studio state to be a singleton. It is
_possible_ to support multiple saved states. Might be a solve for app
workspaces.

## Related Issues / Discussions

n/a

## QA Instructions

Try it out. It's pretty straightforward. Error states are the main
things to test - for example, network blips. The new server-side
persistence driver is the only real functional change - everything else
is just kinda shuffling things around to support it.

## Merge Plan

n/a

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2025-07-25 12:08:47 -04:00
Kent Keirsey
1cb4ef05a4 add newline 2025-07-25 11:08:54 -04:00
Kent Keirsey
7da141101c Merge branch 'main' into psyche/feat/app/client-state-persistence 2025-07-25 11:07:17 -04:00
psychedelicious
2571e199c5 tidy(ui): remove unused props 2025-07-25 11:06:18 -04:00
psychedelicious
79e93f905e fix(ui): add separate wrapper components for notes and current image nodes that do not need invocation node context 2025-07-25 11:06:18 -04:00
psychedelicious
f562e4f835 fix(ui): ensure all node context provider wraps all calls to useInvocationNodeContext 2025-07-25 11:06:18 -04:00
psychedelicious
47e220aaf3 perf(ui): imperatively get nodes and edges in autolayout hook 2025-07-25 11:06:18 -04:00
psychedelicious
9365154bfe chore: bump version to v6.2.0 2025-07-25 11:06:18 -04:00
psychedelicious
afc6911c96 chore: bump version to v6.3.0a1 2025-07-25 19:07:08 +10:00
psychedelicious
afa1ee7ffd tidy(ui): enable devmode redux checks 2025-07-25 19:04:21 +10:00
psychedelicious
5a102f6b53 chore(ui): lint 2025-07-25 19:04:21 +10:00
psychedelicious
af345a33f3 fix(ui): infinite loop when setting tile controlnet model 2025-07-25 19:04:21 +10:00
psychedelicious
038b110a82 fix(ui): do not store whole model configs in state 2025-07-25 19:04:21 +10:00
psychedelicious
f3cd49d46e refactor(ui): just manually validate async stuff 2025-07-25 19:04:21 +10:00
psychedelicious
ca7d7c9d93 refactor(ui): work around zod async validation issue 2025-07-25 19:04:21 +10:00
psychedelicious
1addeb4b59 fix(ui): check initial retrieval and set as last persisted 2025-07-25 19:04:21 +10:00
psychedelicious
6ea4884b0c chore(ui): bump zod to latest
Checking if it fixes an issue w/ async validators
2025-07-25 19:04:21 +10:00
psychedelicious
aed9b1013e refactor(ui): use zod for all redux state 2025-07-25 19:04:21 +10:00
psychedelicious
6962536b4a refactor(ui): use zod for all redux state (wip)
needed for confidence w/ state rehydration logic
2025-07-25 19:04:21 +10:00
psychedelicious
7e59d040aa feat(ui): iterate on storage api 2025-07-25 19:04:20 +10:00
psychedelicious
e7c67da2c2 refactor(ui): restructure persistence driver creation to support custom drivers 2025-07-25 19:04:20 +10:00
psychedelicious
c44571bc36 revert(ui): temp changes to main.tsx for testing 2025-07-25 19:04:20 +10:00
psychedelicious
ca257650d4 revert(ui): temp disable eslint rule 2025-07-25 19:04:20 +10:00
psychedelicious
6a9962d2bb git: update gitignore 2025-07-25 19:04:20 +10:00
psychedelicious
9492569a2c wip 2025-07-25 19:04:20 +10:00
psychedelicious
61e711620d chore: ruff 2025-07-25 19:04:20 +10:00
psychedelicious
3cf82505bb tests(app): service mocks 2025-07-25 19:04:20 +10:00
psychedelicious
53bcbc58f5 chore(ui): lint 2025-07-25 19:04:20 +10:00
psychedelicious
42f3990f7a refactor(ui): iterate on persistence 2025-07-25 19:04:20 +10:00
psychedelicious
456205da17 refactor(ui): iterate on persistence 2025-07-25 19:04:20 +10:00
psychedelicious
ca0684700e refactor(ui): alternate approach to slice configs 2025-07-25 19:04:19 +10:00
psychedelicious
6a702821ef chore(ui): typegen 2025-07-25 19:04:19 +10:00
psychedelicious
682d271f6f feat(api): make client state key query not body 2025-07-25 19:04:19 +10:00
psychedelicious
e872c253b1 refactor(ui): cleaner slice definitions 2025-07-25 19:04:19 +10:00
psychedelicious
28633c9983 feat: server-side client state persistence 2025-07-25 19:04:19 +10:00
psychedelicious
70ac58e64a tidy(ui): remove unused props 2025-07-25 18:51:21 +10:00
psychedelicious
e653837236 fix(ui): add separate wrapper components for notes and current image nodes that do not need invocation node context 2025-07-25 18:51:21 +10:00
psychedelicious
2bbfcc2f13 fix(ui): ensure all node context provider wraps all calls to useInvocationNodeContext 2025-07-25 18:51:21 +10:00
psychedelicious
d6e0e439c5 perf(ui): imperatively get nodes and edges in autolayout hook 2025-07-25 18:50:59 +10:00
psychedelicious
26aab60f81 chore: bump version to v6.2.0 2025-07-25 18:41:00 +10:00
Riccardo Giovanetti
7bea2fa11f translationBot(ui): update translation (Italian)
Currently translated at 98.6% (2016 of 2044 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (2015 of 2043 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-07-25 17:15:01 +10:00
psychedelicious
169d58ea4c feat(ui): restore clear queue button
It is accessible in two places:
- The queue actions hamburger menu.
- On the queue tab.

If the clear queue app feature is disabled, it is not shown in either of
those places.
2025-07-23 23:38:53 +10:00
psychedelicious
b53d2250f7 feat(ui): reduce snap tolerance to make it easier to break the snap 2025-07-23 23:05:40 +10:00
psychedelicious
242eea8295 fix(ui): incorrect zoom direction w/ small scroll amounts 2025-07-23 23:05:40 +10:00
psychedelicious
4dabe09e0d tests(ui): remove test for no-longer-valid behaviour 2025-07-23 23:03:02 +10:00
psychedelicious
07fa0d3b77 fix(ui): do not attempt toggle when target panel isn't registered 2025-07-23 23:03:02 +10:00
psychedelicious
e97f82292f tests(ui): add tests for disposable handling 2025-07-23 23:03:02 +10:00
psychedelicious
005bab9035 fix(ui): tab disposables not being added correctly 2025-07-23 23:03:02 +10:00
psychedelicious
409173919c tests(ui): add tests for toggleViewer functionality 2025-07-23 23:03:02 +10:00
psychedelicious
7915180047 feat(ui): restore viewer toggle hotkey 2025-07-23 23:03:02 +10:00
Riccardo Giovanetti
4349b8387d translationBot(ui): update translation (Italian)
Currently translated at 97.9% (2000 of 2042 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-07-23 12:26:48 +10:00
Kent Keirsey
f95b686bdc reposition export button 2025-07-23 11:55:11 +10:00
Mary Hipp
72afb9c3fd fix iterations for all API models 2025-07-22 13:27:35 -04:00
Mary Hipp
f004fc31f1 update whats new 2025-07-22 12:24:10 -04:00
psychedelicious
2aa163b3a2 feat(ui): add default inpaint mask layer on canvas reset 2025-07-22 10:26:57 +10:00
psychedelicious
f40900c173 chore: bump version to v6.1.0 2025-07-22 08:24:31 +10:00
psychedelicious
2c1f2b2873 tidy(ui): move star hotkey into own hook & use reactive state for focus 2025-07-22 08:11:57 +10:00
Kent Keirsey
8418e34480 lint 2025-07-22 08:11:57 +10:00
Kent Keirsey
b548ac0ccf Add Star/Unstar Hotkey and fix hotkey translations 2025-07-22 08:11:57 +10:00
Linos
2af2b8b6c4 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2003 of 2003 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
Hosted Weblate
058dc06748 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
Riccardo Giovanetti
8acb1c0088 translationBot(ui): update translation (Italian)
Currently translated at 98.7% (1978 of 2003 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.7% (1978 of 2003 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.6% (1968 of 1994 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
Hosted Weblate
683732a37c translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
Riku
b990eacca0 translationBot(ui): update translation (German)
Currently translated at 62.1% (1251 of 2012 strings)

Co-authored-by: Riku <riku.block@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
RyoKoba
5f7e920deb translationBot(ui): update translation (Japanese)
Currently translated at 99.8% (2007 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 99.8% (2007 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 99.8% (2007 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 99.8% (2007 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 99.8% (2007 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 92.0% (1851 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 92.0% (1851 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 92.0% (1851 of 2011 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 87.4% (1744 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 87.4% (1744 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 81.0% (1616 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

translationBot(ui): update translation (Japanese)

Currently translated at 75.6% (1510 of 1995 strings)

Co-authored-by: RyoKoba <kobayashi_ryo@cyberagent.co.jp>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ja/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
Riccardo Giovanetti
55dfdc0a9c translationBot(ui): update translation (Italian)
Currently translated at 97.9% (1953 of 1994 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.7% (1986 of 2011 strings)

translationBot(ui): update translation (Italian)

Currently translated at 98.7% (1970 of 1995 strings)

translationBot(ui): update translation (Italian)

Currently translated at 97.8% (1910 of 1952 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
Linos
10d6d19e17 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (2012 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 100.0% (2012 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 99.7% (2006 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 99.7% (2006 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 99.5% (2002 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 99.5% (2002 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 97.8% (1968 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 97.8% (1968 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 97.8% (1968 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 97.8% (1968 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 96.4% (1940 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 96.4% (1940 of 2012 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 100.0% (1921 of 1921 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 100.0% (1917 of 1917 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2025-07-22 07:58:19 +10:00
skunkworxdark
15542b954d Fix nodes ui: Make nodes dot background to be the same as the snap to grid size and position
Fix nodes ui:  Make nodes dot background to be the same as the snap to grid size and position
Update to Flow.tsx

Changes the size and offset of the dots background to be the same size as the snap to grid, and also fix the background dot pattern alignment.

Currently, the snapGrid is 25x25, and the default background dot gap is 20x20, these do not align.  This is fixed by making the gap property of the background the same as the snapGrid.

Additionally, there is a bug in the rectFlow background code that incorrectly sets the offset to be the centre of the dot pattern with the default offset of 0.  To work around this issue, setting the background offset property to the snapGrid size will realign the dot pattern correctly. 

I have logged a bug for the rectFlow background issue in its repo. 
https://github.com/xyflow/xyflow/issues/5405
2025-07-22 07:46:52 +10:00
skunkworxdark
6430d830c1 Update nodes auto layout spacing for snap to grid size
Update workflowSettingsSlice.ts

Change the default settings for auto layout nodeSpacing and layerSpacing  to 30 instead of 32.    This will make the x position of auto layed nodes land on the snap to grid positions. 

Because the node width (320) + 30 = 350 which is divisible by the snap to grid size of 25.
2025-07-22 07:40:58 +10:00
Kent Keirsey
c3f6389291 fix ruff and remove unused API route 2025-07-22 07:33:48 +10:00
Kent Keirsey
070eef3eff remove whitespace 2025-07-22 07:33:48 +10:00
Kent Keirsey
b14d841d57 Extract util and fix model image logic 2025-07-22 07:33:48 +10:00
Kent Keirsey
dd35ab026a update logic and remove bad test 2025-07-22 07:33:48 +10:00
Cursor Agent
7fc06db8ad Add LoRA model metadata extraction from JSON and PNG files
Co-authored-by: kent <kent@invoke.ai>
2025-07-22 07:33:48 +10:00
psychedelicious
9d1f09c0f3 fix(ui): return wrapped history in redux-remember unserialize
We intermittently get an error like this:
```
TypeError: Cannot read properties of undefined (reading 'length')
```

This error is caused by a `redux-undo`-enhanced slice being rehydrated
without the extra stuff it adds to the slice to make it undoable (e.g.
an array of `past` states, the `present` state, array of `future`
states, and some other metadata).

`redux-undo` may need to check the length of the past/future arrays as
part of its internal functionality. These keys don't exist so we get the
error. I'm not sure _why_ they don't exist - my understanding of
`redux-undo` is that it should be checking and wrapping the state w/ the
history stuff automatically. Seems to be related to `redux-remember` -
may be a race condition.

The solution is to ensure we wrap rehydrated state for undoable slices
as we rehydrate them. I discovered the solution while troubleshooting
#8314 when the changes therein somehow triggered the issue to start
occuring every time instead of rarely.
2025-07-22 07:00:57 +10:00
skunkworxdark
cacfb183a6 Add auto layout controls to node editor (#8239)
* Add auto layout controls using elkjs to node editor

Introduces auto layout functionality for the node editor using elkjs, including a new UI popover for layout options (placement strategy, layering, spacing, direction). Adds related state and actions to workflowSettingsSlice, updates translations, and ensures elkjs is included in optimized dependencies.

* feat(nodes): Improve workflow auto-layout controls and accuracy

- The auto-layout settings panel is updated to use `Select` dropdowns and `NumberInput`
- The layout algorithm now uses the actual rendered dimensions of nodes from the DOM, falling back to estimates only when necessary. This results in a much more accurate and predictable layout.
- The ELKjs library integration is refactored to fix some warnings

* Update useAutoLayout.ts

prettier

* feat(nodes): Improve workflow auto-layout controls and accuracy

- The auto-layout settings panel is updated to use `Select` dropdowns and `NumberInput`
- The layout algorithm now uses the actual rendered dimensions of nodes from the DOM, falling back to estimates only when necessary. This results in a much more accurate and predictable layout.
- The ELKjs library integration is refactored to fix some warnings

* Update useAutoLayout.ts

prettier

* build(ui): import elkjs directly

* updated to use  dagrejs for autolayout

updated to use dagrejs - it has less layout options but is already included

but this is still WIP as some nodes don't report the height correctly. I am still investigating this...

* Update useAutoLayout.ts

update to fix layout issues

* minor updates

- pretty useAutoLayout.ts
- add missing type import in ViewportControls.tsx
- update pnpm-lock.yaml with elkjs removed

* Update ViewportControls.tsx

pnpm fix

* Fix Frontend check + single node selection fix

Fix Frontend check -  remove unused export from workflowSettingsSlice.ts
Update so that if you have a single node selected, it will auto layout all nodes, as this is a common thing to have a single node selected and means that you don't have to unselect it.

* feat(ui): misc improvements for autolayout

- Split popover into own component
- Add util functions to get node w/h
- Use magic wand icon for button
- Fix sizing of input components
- Use CompositeNumberInput instead of base chakra number input
- Add zod schemas for string values and use them in the component to
ensure state integrity

* chore(ui): lint

---------

Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-07-21 14:44:29 +10:00
psychedelicious
564f4f7a60 feat(ui): better icon for invert mask button 2025-07-21 13:47:02 +10:00
Kent Keirsey
113a118fcf fix potential for null data 2025-07-21 13:47:02 +10:00
Kent Keirsey
1f930cdaf2 fix 2025-07-21 13:47:02 +10:00
Kent Keirsey
c490e0ce08 feat(ui):invert mask 2025-07-21 13:47:02 +10:00
Kent Keirsey
7640ee307c feat(ui):Adjust-bbox-to-masks 2025-07-21 13:26:49 +10:00
psychedelicious
1f5f70f898 feat(ui): clean up picker compact view default state handling
- Name it `pickerCompactViewStates` bc its not exclusive to model
picker, it is used for all pickers
- Rename redux action to model an event
- Move selector to right file
- Use selector to derive state for individual picker
2025-07-21 13:18:09 +10:00
Mary Hipp
1430858112 cleanup 2025-07-21 13:18:09 +10:00
Mary Hipp
48c27ec117 persist model picker compact/expanded state 2025-07-21 13:18:09 +10:00
psychedelicious
af7737e804 fix(ui): context menu on staging area images
There was a subtle issue where the progress image wasn't ever cleared,
preventing the context menu from working on staging area preview images.

The staging area preview images were displaying the last progress image
_on top of_ the result image. Because the image elements were so small,
you wouldn't notice that you were looking at a low-res progress image.
Right clicking a progress image gets you no menu.

If you refresh the page or switch tabs, this would fix itself, because
those actions clear out the progress images. The result image would then
be the topmost element, and the context menu works.

Fixing this without introducing a flash of empty space as the progress
image was hidden required a bit of refactoring. We have to wait for the
result image element to load before clearing out the progress.

Result - progress images appear to "resolve" to result images in the
staging area without any blips or jank, and the context menu works after
that happens.
2025-07-21 13:15:34 +10:00
psychedelicious
3eca0d2ba0 fix(ui): staging area left/right hotkeys 2025-07-18 08:08:15 -04:00
psychedelicious
307259f096 fix(ui): ensure staging area always has the right state and session association 2025-07-18 08:08:15 -04:00
psychedelicious
bed01941a5 fix(ui): ensure we clean up when session id changes 2025-07-18 08:08:15 -04:00
psychedelicious
89fa43a3b6 docs(ui): update StagingAreaApi docstrings 2025-07-18 08:08:15 -04:00
psychedelicious
d8fcb08abf repo: update ignores 2025-07-18 08:08:15 -04:00
psychedelicious
c61bcd9f50 tests(ui): add test suite for StagingAreaApi 2025-07-18 08:08:15 -04:00
psychedelicious
3fb0fcbbfb tidy(ui): move staging area components to correct dir 2025-07-18 08:08:15 -04:00
psychedelicious
db9af5083f tidy(ui): move launchpad components to ui dir 2025-07-18 08:08:15 -04:00
psychedelicious
720f1bb65c chore(ui): rename context2.tsx -> context.tsx 2025-07-18 08:08:15 -04:00
psychedelicious
7dfb318ba2 chore(ui): lint 2025-07-18 08:08:15 -04:00
psychedelicious
9b024da2b4 refactor(ui): move staging area logic out side react
Was running into difficultlies reasoning about the logic and couldn't
write tests because it was all in react.

Moved logic outside react, updated context, make it testable.
2025-07-18 08:08:15 -04:00
psychedelicious
15ca3b727a wip 2025-07-18 08:08:15 -04:00
psychedelicious
74ca604ae0 fix(ui): unstyled error boundary 2025-07-18 08:08:15 -04:00
psychedelicious
6934b05c85 fix(ui): use invocation context provider in inspector panel 2025-07-18 08:08:15 -04:00
psychedelicious
1a47a5317c chore(ui): update dockview to latest
Remove extraneous fix now that the disableDnd issue is resolved upstream
2025-07-18 08:08:15 -04:00
psychedelicious
bc3ef21c64 chore(ui): bump version to v6.1.0rc2 2025-07-18 08:08:15 -04:00
psychedelicious
e329f5ad43 fix(ui): negative style prompt not recorded in metadata 2025-07-18 06:41:21 +10:00
psychedelicious
e6ad91bf89 chore(ui): update prettier config 2025-07-17 22:04:57 +10:00
psychedelicious
2f586416a5 chore(ui): remove unused pkgs 2025-07-17 22:04:57 +10:00
psychedelicious
33b56f421c chore(ui): lint 2025-07-17 22:04:57 +10:00
psychedelicious
e58ee4c492 chore(ui): upgrade zod 2025-07-17 22:04:57 +10:00
psychedelicious
49691aa07e chore(ui): upgrade rollup vis 2025-07-17 22:04:57 +10:00
psychedelicious
56570f235f chore(ui): actually upgrade storybook 2025-07-17 22:04:57 +10:00
psychedelicious
a2d95cf5b6 chore(ui): upgrade minor bump packages 2025-07-17 22:04:57 +10:00
psychedelicious
704dbfd04a chore(ui): upgrade storybook 2025-07-17 22:04:57 +10:00
psychedelicious
5d9e078043 chore(ui): finish eslint v9 migration 2025-07-17 22:04:57 +10:00
psychedelicious
875cde13ae chore(ui): migrate to eslint v9 (wip) 2025-07-17 22:04:57 +10:00
psychedelicious
77655aed86 chore(ui): update eslint config 2025-07-17 22:04:57 +10:00
psychedelicious
0628b92d63 chore: bump version to v6.1.0rc1 2025-07-17 19:30:38 +10:00
psychedelicious
9e526d00c2 chore(ui): lint 2025-07-17 15:36:24 +10:00
psychedelicious
1a24396be8 feat(ui): styling when nodes have error 2025-07-17 15:36:24 +10:00
psychedelicious
d97e73a565 chore(ui): lint 2025-07-17 15:36:24 +10:00
psychedelicious
55b14c8aaf perf(ui): optimize redux selectors for workflow editor
- Build selectors for each node in a react context so components can
re-use the same selectors
- Cache the selectors in the context
2025-07-17 15:36:24 +10:00
Heathen711
1cdd4b5980 bugfix(docs) link syntax 2025-07-17 04:26:06 +00:00
psychedelicious
79f65e57eb fix(ui): remove unnecessary coalescing operator 2025-07-17 14:21:02 +10:00
Kent Keirsey
b4c8950278 address comments 2025-07-17 14:21:02 +10:00
Kent Keirsey
400b2e9a55 unlint. 2025-07-17 14:21:02 +10:00
Kent Keirsey
3a687c583a lint 2025-07-17 14:21:02 +10:00
Kent Keirsey
833950078d commit tile size controls 2025-07-17 14:21:02 +10:00
Kent Keirsey
e698dcb148 unlint. 2025-07-17 14:21:02 +10:00
Kent Keirsey
218386e077 lint 2025-07-17 14:21:02 +10:00
Kent Keirsey
4426be9e64 commit tile size controls 2025-07-17 14:21:02 +10:00
Heathen711
89ceecc870 bugfix(docker) Ensure the correct extra install. 2025-07-17 04:19:22 +00:00
psychedelicious
86f4cf7857 feat(ui): related embedding styling/tidy 2025-07-17 14:12:29 +10:00
Kent Keirsey
49ae66d94a Added related model support 2025-07-17 14:12:29 +10:00
Cursor Agent
c10865c7ef Reorder embedding options in PromptTriggerSelect component
Co-authored-by: kent <kent@invoke.ai>
2025-07-17 14:12:29 +10:00
Heathen711
687cccdb99 cleanup(docker) 2025-07-17 04:00:42 +00:00
psychedelicious
f3478a189a fix(ui): able to drag empty space in tab bar and detach panels 2025-07-17 13:58:32 +10:00
Heathen711
c84f8465b8 bugfix(pyproject) Convert from dependency groups to extras and update docks to use UV's built in torch support 2025-07-17 03:58:26 +00:00
psychedelicious
43db29176a chore(ui): lint 2025-07-17 13:52:24 +10:00
psychedelicious
f38922929c docs(ui): comments in modelsLoaded 2025-07-17 13:52:24 +10:00
psychedelicious
7d02c58f86 fix(ui): move <ParamTileControlNetModel /> to <UpscaleTabAdvancedSettingsAccordion /> 2025-07-17 13:52:24 +10:00
Kent Keirsey
6edce8be87 Add scaling in 2025-07-17 13:52:24 +10:00
Kent Keirsey
31f63e38bd lint 2025-07-17 13:52:24 +10:00
Kent Keirsey
78a68ac3a7 Updated 2025-07-17 13:52:24 +10:00
Kent Keirsey
8cd3bcd1c0 Updates 2025-07-17 13:52:24 +10:00
Cursor Agent
264cc5ef46 Add tile ControlNet model selection to upscale settings
Co-authored-by: kent <kent@invoke.ai>
2025-07-17 13:52:24 +10:00
Heathen711
4b5c481b7a Merge remote-tracking branch 'origin' into bugfix/heathen711/rocm-docker 2025-07-17 01:03:03 +00:00
JPPhoto
8bfbea5ed3 Updated __init__.py 2025-07-17 06:33:56 +10:00
JPPhoto
f06a66da07 Updated schema.ts 2025-07-17 06:33:56 +10:00
Jonathan
337cae9b22 Update __init__.py
Added FluxConditioningField, FluxConditioningCollectionOutput, and FluxConditioningCollectionOutput,
2025-07-17 06:33:56 +10:00
Jonathan
bf926bb7d5 Update primitives.py
Added FluxConditioningCollectionOutput
2025-07-17 06:33:56 +10:00
psychedelicious
18ad9a6af3 feat(ui): canvas/viewer panel tabs show progress 2025-07-17 06:20:05 +10:00
psychedelicious
b6ed31c222 feat(ui): clicking invoke switches to viewer tab instead of canvas when save all images to gallery is enabled 2025-07-17 06:20:05 +10:00
psychedelicious
200beb5af5 feat(ui): make save all images to gallery option also bypass canvas 2025-07-17 06:20:05 +10:00
psychedelicious
f82a948bdd refactor(ui): canvas autoswitch logic
Simplify the canvas auto-switch logic to not rely on the preview images
loading. This fixes an issue where offscreen preview images didn't get
auto-switched to. Images are now loaded directly.
2025-07-17 06:20:05 +10:00
psychedelicious
dd03e3ddcd refactor(ui): simplify canvas session logic 2025-07-17 06:20:05 +10:00
psychedelicious
7561b73e8f fix(ui): uppercase file extensions blocked for image upload
Closes #8284
2025-07-17 00:48:36 +10:00
psychedelicious
caa97608c7 fix(ui): aspect ratios out of order 2025-07-16 23:27:37 +10:00
Mary Hipp
72a6d1edc1 simplify descriptoin styling 2025-07-16 09:19:33 -04:00
Mary Hipp
b8bf89c2f1 add fallback image and make sure description text is legible for model picker noncompact 2025-07-16 09:19:33 -04:00
psychedelicious
a1ade2b8c0 feat(ui): export apis & actions from package 2025-07-16 08:21:03 -04:00
Eugene Brodsky
4bdcae1f8f fix(docker): switch to pnpm10.x 2025-07-15 13:03:15 -04:00
Jonathan
4b22c84407 Update dev-environment.md
Document the latest changes required to build Invoke 6.0.
2025-07-15 15:21:01 +10:00
Eugene Brodsky
c9daf1db30 (fix) remove timeout from image prompt expansion (#8281) 2025-07-14 11:19:20 -04:00
psychedelicious
06d3cfbe97 gh: update bug report template
- Add require drop down for install method
- Make browser version optional
- Link to latest release
- Update verbiage for sys info section
2025-07-14 12:18:52 +10:00
psychedelicious
71e4901313 fix(ui): ignore disalbed ref images in readiness checks 2025-07-14 10:51:51 +10:00
Heathen711
2caa1b166d Merge remote-tracking branch 'origin' into bugfix/heathen711/rocm-docker 2025-07-13 00:55:39 +00:00
psychedelicious
82fb897b62 chore(ui): lint 2025-07-12 14:56:57 +10:00
psychedelicious
192b00d969 chore: bump version to v6.0.2 2025-07-12 14:56:57 +10:00
psychedelicious
7bb25ef1b4 fix(ui): gallery dnd 2025-07-12 14:56:57 +10:00
psychedelicious
62f52c74a8 fix(ui): linked negative style prompt not passed in
Closes #8256
2025-07-12 10:22:17 +10:00
psychedelicious
97439c1daa fix(ui): native context menu shown on right click on short fat images
Closes #8254
2025-07-12 10:22:17 +10:00
psychedelicious
b23bff1b53 fix(ui): center staging area images 2025-07-12 10:22:17 +10:00
psychedelicious
d9a1efbabf fix(ui): staging area images may be slightly too large 2025-07-12 10:22:17 +10:00
psychedelicious
d4e903ee2d chore: bump version to v6.0.1 2025-07-12 10:22:17 +10:00
Kevin Turner
bb3e5d16d8 feat(Model Manager): refuse to download a file when there's insufficient space 2025-07-12 10:14:25 +10:00
psychedelicious
e62d3f01a8 feat(app): better error message for failed model probe
- Old: No valid config found
- New: Unable to determine model type
2025-07-11 23:35:43 +10:00
psychedelicious
757ecdbf82 build(ui): downgrade idb-keyval
We have increased error rates after updating this package. Let's try
downgrading to see if that fixes the issue.
2025-07-11 15:00:10 +10:00
psychedelicious
694c85b041 fix(ui): language file filenames
Need to replace the underscores w/ dashes - this was missed in #8246.
2025-07-11 14:21:41 +10:00
psychedelicious
988d7ba24c chore: bump version to v6.0.1rc1 2025-07-11 09:05:24 +10:00
psychedelicious
ac981879ef fix(ui): runtime errors related to calling reduce on array iterator
Fix an issue in certain browsers/builds causing a runtime error.

A zod enum has a .options property, which is an array of all the options
for the enum. This is handy for when you need to derive something from a
zod schema.

In this case, we represented the possible focus regions in the zod enum,
then derived a mapping of region names to set of target HTML elements.
Why isn't important, but suffice to say, we were using the .options
property for this.

But actually, we were using .options.values(), then calling .reduce() on
that. An array's .values() method returns an _array iterator_. Array
iterators do not have .reduce() methods!

Except, apparently in some environments they do - it depends on the JS
engine and whether or not polyfills for iterator helpers were included
in the build.

Turns out my dev environment - and most user browsers - do provide
.reduce(), so we didn't catch this error. It took a large deployment and
error monitoring to catch it.

I've refactored the code to totally avoid deriving data from zod in this
way.
2025-07-11 08:25:47 +10:00
psychedelicious
fc71849c24 feat(app): expose a cursor, not a connection in db util 2025-07-11 08:20:06 +10:00
psychedelicious
a19aa3b032 feat(app): db abstraction to prevent threading conflicts
- Add a context manager to the SqliteDatabase class which abstracts away
creating a transaction, committing it on success and rolling back on
error.
- Use it everywhere. The context manager should be exited before
returning results. No business logic changes should be present.
2025-07-11 08:20:06 +10:00
psychedelicious
ef4d5d7377 feat(ui): virtualized list for staging area
Make the staging area a virtualized list so it doesn't choke when there
are a large number (i.e. more than a few hundred) of queue items.
2025-07-11 07:50:57 +10:00
Heathen711
1b6ebede7b Revert "cleanup(github actions)"
This reverts commit 017d38eee2.
2025-07-10 21:10:56 +00:00
Heathen711
017d38eee2 cleanup(github actions) 2025-07-10 21:04:48 +00:00
Heathen711
78eb6b0338 cleanup(docker) 2025-07-10 21:03:57 +00:00
Heathen711
3e8e0f6ddf Merge remote-tracking branch 'origin' into bugfix/heathen711/rocm-docker 2025-07-10 20:14:27 +00:00
Mary Hipp Rogers
6b0dfd8427 dont reset canvas if studio is loaded with canvas destination (#8252)
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2025-07-10 09:36:41 -04:00
psychedelicious
471c010217 fix(ui): invalid language crashes app
- Apparently locales must use hyphens instead of underscores. This must
have been a fairly recent change that we didn't catch. It caused i18n to
throw for Brasilian Portuguese and both Simplified and Traditional
Mandarin. Change the locales to use the right strings.
- Move the theme + locale provider inside of the error boundary. This
allows errors with locals to be caught by the error boundary instead of
hard-crashing the app. The error screen is unstyled if this happens but
at least it has the reset button.
- Add a migration for the system slice to fix existing users' language
selections. For example, if the user had an incorrect language setting
of `zh_CN`, it will be changed to the correct `zh-CN`.
2025-07-10 14:27:36 +10:00
psychedelicious
b1193022f7 fix(ui): sometimes images added to gallery show as placeholder only
The range-based fetching logic had a subtle bug - it didn't keep track
of what the _current_ visible range is - only the ranges that the user
last scrolled to.

When an image was added to the gallery, the logic saw that the images
had changed, but thought it had already loaded everything it needed to,
so it didn't load the new image.

The updated logic tracks the current visible range separately from the
accumulated scroll ranges to address this issue.
2025-07-10 14:27:36 +10:00
psychedelicious
2152ca092c fix(ui): workaround for dockview bug that lets you drag tabs in certain ways 2025-07-10 14:27:36 +10:00
psychedelicious
ccc62ba56d perf(ui): revised range-based fetching strategy
When the user scrolls in the gallery, we are alerted of the new range of
visible images. Then we fetch those specific images.

Previously, each change of range triggered a throttled function to fetch
that range. The throttle timeout was 100ms.

Now, each change of range appends that range to a list of ranges and
triggers the throttled fetch. The timeout is increased to 500ms, but to
compensate, each fetch handles all ranges that had been accumulated
since the last fetch.

The result is far fewer network requests, but each of them gets more
images.
2025-07-10 14:27:36 +10:00
psychedelicious
9cf82de8c5 fix(ui): check for absolute value of scroll velocity to handle scrolling up 2025-07-10 14:27:36 +10:00
psychedelicious
aced349152 perf(ui): increase viewport in gallery
This allows us to prefetch more images and reduce how often placeholders
are shown as we fetch more images in the gallery.
2025-07-10 14:27:36 +10:00
Heathen711
8213f62d3b bugfix(docker) render group controls the devices, but it needs to match the host's render group ID 2025-07-09 20:20:59 +00:00
psychedelicious
0d67ee6548 tests(ui): fix logging mock 2025-07-09 23:15:25 +10:00
psychedelicious
03c21d1607 fix(ui): gallery not updating when saving staging area image 2025-07-09 23:15:25 +10:00
psychedelicious
752e8db1f5 tidy(ui): demote logging in nav api to trace 2025-07-09 23:15:25 +10:00
psychedelicious
85fc861dd9 chore(ui): lint 2025-07-09 23:15:25 +10:00
psychedelicious
458cbfd874 fix(ui): selected model not highlighted 2025-07-09 23:15:25 +10:00
psychedelicious
04331c070a fix(ui): set denoise w/h when running flux fill 2025-07-09 23:15:25 +10:00
psychedelicious
632ddf0cb4 tests(ui): update tests for navigation api 2025-07-09 23:15:25 +10:00
psychedelicious
2b193ff416 fix(ui): delete stored state on error & save new state 2025-07-09 23:15:25 +10:00
psychedelicious
96ee394f9e refactor(ui): use dockview's own ser/de for persistence 2025-07-09 23:15:25 +10:00
psychedelicious
0badc80c0c fix(ui): ignore disabled ref images in readiness checks 2025-07-09 23:15:25 +10:00
psychedelicious
78e6cbf96e fix(ui): default tab is generate 2025-07-09 23:15:25 +10:00
psychedelicious
0b969a661b fix(ui): remove dep on focus from useDeleteImage 2025-07-09 23:15:25 +10:00
psychedelicious
6fe47ec9f8 feat(ui): improve ref image model autoswitch logic 2025-07-09 23:15:25 +10:00
Kent Keirsey
3850dd61f8 update comment 2025-07-09 23:15:25 +10:00
Kent Keirsey
75520eaf0f Match Chatgpt4o and kontext names exactly 2025-07-09 23:15:25 +10:00
Kent Keirsey
10e88c58c1 fix and lint 2025-07-09 23:15:25 +10:00
Kent Keirsey
30ed4dbd92 lint 2025-07-09 23:15:25 +10:00
Kent Keirsey
ed9c090f33 fixes 2025-07-09 23:15:25 +10:00
Kent Keirsey
d29f65ed22 lint fixes 2025-07-09 23:15:25 +10:00
Kent Keirsey
2062ec8ac0 Update invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts
Co-authored-by: Mary Hipp Rogers <maryhipp@gmail.com>
2025-07-09 23:15:25 +10:00
Cursor Agent
49e818338a Changes from background composer bc-abfadb27-a265-41a7-b0db-829879f4701e 2025-07-09 23:15:25 +10:00
Cursor Agent
1caab2b9c4 Implement automatic reference image model switching on base model change
Co-authored-by: kent <kent@invoke.ai>
2025-07-09 23:15:25 +10:00
psychedelicious
50079ea349 fix(ui): big red cancel button has diff behaviour than staging discard 2025-07-09 23:15:25 +10:00
psychedelicious
fffa1b24c4 fix(ui): isStaging selector could return wrong query cache 2025-07-09 23:15:25 +10:00
psychedelicious
a6d6170387 fix(ui): discarding 1 item when 2 items left in staging area discards both 2025-07-09 23:15:25 +10:00
psychedelicious
e5fceb0448 fix(ui): whole app scrolls while selecting staging area image 2025-07-09 23:15:25 +10:00
psychedelicious
059baf5b29 chore(ui): lint 2025-07-09 23:15:25 +10:00
psychedelicious
1be8a9a310 fix(ui): add metadata i18nKey to handler; fixes metadata toasts 2025-07-09 23:15:25 +10:00
psychedelicious
7adc33e04d refactor(ui): metadata recall buttons & hotkeys (WIP) 2025-07-09 23:15:25 +10:00
psychedelicious
7f2dd22d47 refactor(ui): metadata recall buttons & hotkeys (WIP) 2025-07-09 23:15:25 +10:00
psychedelicious
bb50f4b8a2 fix(ui): prevent panels from growing on init
This works but I think a better solution is to use dockview's provided
serialization API to store and restore layouts.
2025-07-09 23:15:25 +10:00
psychedelicious
a48958e0d4 chore(ui): lint 2025-07-09 23:15:25 +10:00
psychedelicious
e3a1e9af53 feat(ui): staging area updates
- Smaller staged image previews.
- Move autoswitch buttons to staging area toolbar, remove from settings
popover and the little three-dots menu. Use persisted autoswitch
setting, which is renamed from `defaultAutoSwitch` to
`stagingAreaAutoSwitch`.
- Fix issue with misaligned border radii in staging area preview images.
Required small changes to DndImage and its usage elsewhere.
- Fix issue where staging area toolbar could show up without any
previews in the list.
- Migrate canvas settings slice to use zod schema and inferred types for
its state.
2025-07-09 23:15:25 +10:00
psychedelicious
c6fe11c42f fix(ui): disable gallery hotkeys when in staging area 2025-07-09 23:15:25 +10:00
psychedelicious
4eb1bd67df fix(ui): hide staging area when there are no items 2025-07-09 23:15:25 +10:00
psychedelicious
c376f914d2 chore: bump version v6.0.0 2025-07-09 23:15:25 +10:00
Heathen711
233740a40e Merge remote-tracking branch 'origin' into bugfix/heathen711/rocm-docker 2025-07-09 03:27:42 +00:00
Kent Keirsey
b5d1c47ef7 final link fix 2025-07-09 10:17:38 +10:00
Kent Keirsey
004a52ca65 fix to direct links 2025-07-09 10:17:38 +10:00
Kent Keirsey
b1d5a51ddf add-quantized-kontext-dev 2025-07-09 10:17:38 +10:00
Kent Keirsey
2b2498eaa1 fix prettier quirk 2025-07-08 14:54:29 -04:00
Kent Keirsey
10dda4440e Fix label 2025-07-08 14:54:29 -04:00
Cursor Agent
98f78abefa Add default auto-switch mode setting for canvas sessions
Co-authored-by: kent <kent@invoke.ai>
2025-07-08 14:54:29 -04:00
Mary Hipp Rogers
cc93fa270f update whats new for v6 (#8234)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-07-08 18:24:33 +00:00
Mary Hipp Rogers
014b27680f fix flux kontext error (#8235)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-07-08 13:42:48 -04:00
Mary Hipp Rogers
c3d8f875de if on generate tab, recall dimensions instead of bbox (#8233)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-07-08 13:09:21 -04:00
Mary Hipp Rogers
79f9dc6e4a fix(ui): dont show option to add new layer from if on generate tab (#8231)
* dont show option to add new layer from if on generate tab

* only disable width/height recall is staging AND canvas tab

---------

Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-07-08 11:46:54 -04:00
psychedelicious
6e1c0c1105 chore: bump version to v6.0.0rc5 2025-07-08 11:26:47 -04:00
Mary Hipp Rogers
0362524040 remove hard-coded flux kontext dev guidance (#8230)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-07-08 10:26:20 -04:00
psychedelicious
dc6656459b docs(ui): updated comments for navigation api 2025-07-08 07:30:36 -04:00
psychedelicious
3ea1b97f6f fix(ui): protect against getting stuck on tab loading screen 2025-07-08 07:30:36 -04:00
psychedelicious
a7c7405ccc feat(ui): style model picker selected item 2025-07-08 07:28:07 -04:00
psychedelicious
c391f1117a fix(ui): traverse groups when finding selected model in picker 2025-07-08 07:28:07 -04:00
psychedelicious
b1e2cb8401 fix(ui): queue tab list of queue items
Reverted incomplete change to how queue items are listed. In the future
I think we should redo it to work like the gallery. For now, it is back
the way it was in v5.
2025-07-08 07:22:51 -04:00
Emmanuel Ferdman
db6af134b7 fix: resolve FastAPI deprecation warning for example fields
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
2025-07-08 20:54:08 +10:00
Emmanuel Ferdman
7e6cffb00c fix: resolve FastAPI deprecation warning for example fields
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
2025-07-08 20:54:08 +10:00
psychedelicious
5b187bcb00 fix(ui): pull bbox into ref image component 2025-07-08 14:54:43 +10:00
psychedelicious
0843d609a3 feat(ui): add list of warnings in tooltip on ref image 2025-07-08 14:54:43 +10:00
Kent Keirsey
95bd9cef18 Lint 2025-07-08 14:54:43 +10:00
Kent Keirsey
931d6521f6 Adds bbox to ref image button 2025-07-08 14:54:43 +10:00
psychedelicious
e37665ff59 tests(ui): add wiggle room to timeout tests 2025-07-08 12:55:33 +10:00
psychedelicious
56857fbbe6 tests(ui): add tests for panel storage 2025-07-08 12:55:33 +10:00
psychedelicious
43cfb8a574 tests(ui): get tests passing
Still need tests for panel storage.
2025-07-08 12:55:33 +10:00
psychedelicious
05b1682d15 fix(ui): handle collapsed panels when rehydrating their state 2025-07-08 12:55:33 +10:00
psychedelicious
69a08ee7f2 feat(ui): panel state persistence (WIP) 2025-07-08 12:55:33 +10:00
psychedelicious
18212c7d8a feat(ui): clean up navigation API surface and add comments 2025-07-08 12:55:33 +10:00
psychedelicious
7de26f8e69 feat(ui): clean up auto layout context for panels 2025-07-08 12:55:33 +10:00
Kent Keirsey
0652b12a6f Address comments 2025-07-08 12:31:11 +10:00
Kent Keirsey
43a361a00f prettier 2025-07-08 12:31:11 +10:00
Kent Keirsey
cf68ad9cbc update links to playlist instead of video 2025-07-08 12:31:11 +10:00
Kent Keirsey
ec02a39325 fixes 2025-07-08 12:31:11 +10:00
Kent Keirsey
e52d7a05c2 Update support links. 2025-07-08 12:31:11 +10:00
Cursor Agent
c9d4e2b761 Refactor support videos modal to simplify video and playlist handling
Co-authored-by: kent <kent@invoke.ai>
2025-07-08 12:31:11 +10:00
Kent Keirsey
ac26aa9508 fix 2025-07-08 12:31:11 +10:00
Cursor Agent
9ff6ada15b Add support for video playlists in support videos modal
Co-authored-by: kent <kent@invoke.ai>
2025-07-08 12:31:11 +10:00
psychedelicious
e81a115169 chore(ui): lint 2025-07-08 12:23:57 +10:00
Kent Keirsey
52827807de remove ref image from upscale 2025-07-08 12:23:57 +10:00
Kent Keirsey
b631de4cb5 consistency 2025-07-08 12:20:08 +10:00
Kent Keirsey
099ebdbc37 fix 2025-07-08 12:20:08 +10:00
psychedelicious
4de6549be9 refactor(ui): track discarded items instead of using delete method 2025-07-08 12:12:55 +10:00
psychedelicious
368be34949 chore(ui): lint 2025-07-08 12:12:55 +10:00
psychedelicious
5baa4bd916 refactor(ui): use cancelation for staging area (mostly) 2025-07-08 12:12:55 +10:00
psychedelicious
4229377532 fix(app): ensure cancel events are emitted for current item when bulk canceling
There was a bug where bulk cancel operations would cancel the current
queue item in the DB but not emit the status changed events correctly.
2025-07-08 12:12:55 +10:00
psychedelicious
2610772ffd feat(ui): tighten up launchpad content to fit better 2025-07-08 08:57:44 +10:00
psychedelicious
193de6a8f2 feat(ui): add launchpad container component 2025-07-08 08:57:44 +10:00
psychedelicious
7ea343c787 tidy(ui): remove "staging" from the new settings verbiage 2025-07-08 07:10:55 +10:00
Kent Keirsey
12179dabba fix prettier 2025-07-08 07:10:55 +10:00
Cursor Agent
ef135f9923 Add option to save all staging images to gallery in canvas mode
Co-authored-by: kent <kent@invoke.ai>
2025-07-08 07:10:55 +10:00
Mary Hipp
e6c67cc00f update toast for prompt expansion failed 2025-07-08 06:42:00 +10:00
psychedelicious
179b988148 fix(ui): prompt concat derived state recall 2025-07-08 06:37:43 +10:00
psychedelicious
d913a3c85b fix(ui): reset selected ref image when replacing all
Fixes an unhandled error in a selector that can throw.
2025-07-08 06:37:43 +10:00
psychedelicious
e79525c40c docs(ui): update comments 2025-07-08 06:11:32 +10:00
psychedelicious
f409f913ac fix(ui): navigation api usage 2025-07-08 06:11:32 +10:00
Mary Hipp
7a79f61d4c add claude nodes to blacklist for publishing 2025-07-08 05:50:40 +10:00
psychedelicious
ea182c234b chore: bump version to v6.0.0rc4 2025-07-07 22:15:28 +10:00
psychedelicious
f2eee4a82d chore(ui): lint 2025-07-07 22:05:49 +10:00
psychedelicious
e129525306 fix(app): handle None in queue count queries 2025-07-07 22:05:49 +10:00
psychedelicious
ecedfce758 feat(ui): support a min expanded size for collapsible panels 2025-07-07 22:05:49 +10:00
psychedelicious
702cb2cb1e fix(ui): flux kontext special handlign for ref image models 2025-07-07 22:05:49 +10:00
psychedelicious
2e8db3cce3 fix(ui): ensure noise is correctly sized 2025-07-07 22:05:49 +10:00
psychedelicious
7845623fa5 fix(ui): session context indexing bug 2025-07-07 22:05:49 +10:00
psychedelicious
e6a25ca7a2 feat(ui): render progress as indeterminate when percentage is 0
When percentage is zero, the progress bar looks the same as it does when
no generation is in progress. Render it as indeterminate (pulsing) when
percentage is zero to indicate that somethign is happenign.
2025-07-07 22:05:49 +10:00
psychedelicious
71e12bcebe fix(ui): when no negative prompt is provided, recall it as null 2025-07-07 22:05:49 +10:00
psychedelicious
863c7eb9e2 fix(ui): metadata display for primitive values 2025-07-07 22:05:49 +10:00
psychedelicious
9945c20d02 refactor(ui): simplifiy graph builders (WIP) 2025-07-07 22:05:49 +10:00
psychedelicious
e3c1334b1f refactor(ui): simplifiy graph builders (WIP) 2025-07-07 22:05:49 +10:00
psychedelicious
c143f63ef0 refactor(ui): simplifiy graph builders (WIP) 2025-07-07 22:05:49 +10:00
psychedelicious
067026a0d0 feat(ui): add autocomplete for Graph.addEdgeToMetadata 2025-07-07 22:05:49 +10:00
psychedelicious
66991334fc refactor(ui): simplify graph builder handling of VAE encode and seed 2025-07-07 22:05:49 +10:00
psychedelicious
b771c3b164 refactor(ui): update graphs to use the right w/h/aspect 2025-07-07 22:05:49 +10:00
psychedelicious
4925694dc1 feat(ui): generate tab has separate w/h/aspect 2025-07-07 22:05:49 +10:00
psychedelicious
0a737ced44 feat(ui): add dimensions to params slice 2025-07-07 22:05:49 +10:00
psychedelicious
8d83caaae0 feat(ui): extract aspect ratios from canvas reducers 2025-07-07 22:05:49 +10:00
psychedelicious
16c8017f1a feat(ui): more resilient gallery scrollIntoView 2025-07-07 22:05:49 +10:00
psychedelicious
61a35f1396 fix(ui): skip optimistic updates for gallery when using search term 2025-07-07 22:05:49 +10:00
psychedelicious
6bd004d868 fix(ui): clear ref images when recalling all
Closes #8202
2025-07-07 22:05:49 +10:00
psychedelicious
b6a6d406c7 chore(ui): typegen 2025-07-07 10:25:24 +10:00
psychedelicious
8e287c32ee chore(ui): lint 2025-07-07 10:25:24 +10:00
psychedelicious
2d8b5e26c2 build(ui): bump vite to latest 2025-07-07 10:25:24 +10:00
psychedelicious
50914b74ee chore(build): update pnpm to v10 2025-07-07 10:25:24 +10:00
psychedelicious
0fc1c33536 chore(ui): knip 2025-07-07 10:25:24 +10:00
psychedelicious
3b08c35f72 chore(ui): update knip config 2025-07-07 10:25:24 +10:00
psychedelicious
607b2561fd chore(ui): bump knip to latest 2025-07-07 10:25:24 +10:00
psychedelicious
d68f922efb fix(ui): restore upscale-tab-specific settings components 2025-07-07 10:25:24 +10:00
psychedelicious
2bbd74d418 feat(ui): restore canvas busy spinner 2025-07-07 10:25:24 +10:00
Heathen711
8c5fcfd0fd cleanup(docker) remove no cache argument 2025-07-05 15:25:26 +00:00
Heathen711
6d7b231196 Merge remote-tracking branch 'origin' into bugfix/heathen711/rocm-docker 2025-07-05 15:22:35 +00:00
Heathen711
31ca314b02 Missed files 2025-07-05 15:21:46 +00:00
Heathen711
0db304f1ee bugfix(uv) Lock torchvision and ensure the docker uses the same rocm version 2025-07-05 03:35:11 +00:00
psychedelicious
3a5392a9ee chore: bump version to v6.0.0rc3 2025-07-04 20:46:08 +10:00
psychedelicious
6f80efe71d fix(ui): bump expandprompt timeout to 15s 2025-07-04 20:46:08 +10:00
psychedelicious
7fac833813 fix(ui): ref image model types again 2025-07-04 20:35:29 +10:00
psychedelicious
b67eb4134d fix(ui): select next image when deleting 2025-07-04 20:35:29 +10:00
psychedelicious
522eeda2e2 fix(ui): ref image model types 2025-07-04 20:35:29 +10:00
psychedelicious
76233241f0 fix(ui): include ref image metadata for flux kontext 2025-07-04 20:35:29 +10:00
psychedelicious
54be9989c5 feat(ui): add 'replace' and 'merge' strategies for upsertMetadata 2025-07-04 20:35:29 +10:00
psychedelicious
0d3af08d27 fix(ui): prompt parsing in useImageActions 2025-07-04 20:35:29 +10:00
psychedelicious
767ac91f2c fix(nodes): revert unnecessary version bump 2025-07-04 20:35:29 +10:00
psychedelicious
68571ece8f tidy(app): remove unused methods 2025-07-04 20:35:29 +10:00
psychedelicious
01100a2b9a fix(ui): check for ref image config compatibility for flux kontext dev 2025-07-04 20:35:29 +10:00
psychedelicious
ce2e6d8ab6 fix(ui): kontext gen mode error tkey 2025-07-04 20:35:29 +10:00
psychedelicious
4887424ca3 chore: ruff 2025-07-04 20:35:29 +10:00
Kent Keirsey
28f6a20e71 format import block 2025-07-04 20:35:29 +10:00
Kent Keirsey
c4142e75b2 fix import 2025-07-04 20:35:29 +10:00
Kent Keirsey
fefe563127 fix resizing and versioning 2025-07-04 20:35:29 +10:00
Mary Hipp
1c72f1ff9f include flux kontext non-api models in ref image dropdown options 2025-07-04 20:35:29 +10:00
Mary Hipp
605cc7369d update flux kontext implementation to include flux kontext dev non-api models 2025-07-04 20:35:29 +10:00
Kent Keirsey
e7ce08cffa ruff format 2025-07-04 19:24:44 +10:00
Kent Keirsey
983cb5ebd2 ruff ruff 2025-07-04 19:24:44 +10:00
Kent Keirsey
52dbdb7118 ruff 2025-07-04 19:24:44 +10:00
Kent Keirsey
71e6f00e10 test fixes
fix

test

fix 2

fix 3

fix 4

yet another

attempt new fix

pray

more pray

lol
2025-07-04 19:24:44 +10:00
psychedelicious
e73150c3e6 feat(ui): improved automatic tab/panel switching on user actions 2025-07-04 19:18:03 +10:00
psychedelicious
f2426c3ab2 fix(ui): type for dnd action 2025-07-04 19:18:03 +10:00
psychedelicious
9d9c4c0f1a tidy(ui): remove unused old metadata impl 2025-07-04 17:53:47 +10:00
psychedelicious
acb930f6b9 fix(ui): flux redux saves metadata 2025-07-04 17:53:47 +10:00
psychedelicious
585b54dc7d feat(ui): ref image recall w/ old canvas metadata backup 2025-07-04 17:53:47 +10:00
psychedelicious
f65affc0ec fix(ui): do not attempt to recall ref images from canvas metadata 2025-07-04 17:53:47 +10:00
psychedelicious
22d574c92a feat(ui): canvas metadata recall 2025-07-04 17:53:47 +10:00
psychedelicious
f23be119fc refactor(ui): migrating to new metadata handlers 2025-07-04 17:53:47 +10:00
psychedelicious
2d06949e80 feat(ui): display cached metadata if it exists instead of always waiting for debounce 2025-07-04 17:53:47 +10:00
psychedelicious
67804313e1 fix(ui): add ref images to metadata 2025-07-04 17:53:47 +10:00
psychedelicious
dc23be117a refactor(ui): simplified metadata parsing (WIP) 2025-07-04 17:53:47 +10:00
psychedelicious
350de058fc refactor(ui): simplified metadata parsing (WIP) 2025-07-04 17:53:47 +10:00
psychedelicious
fd5cd707a3 refactor(ui): simplified metadata parsing (WIP) 2025-07-04 17:53:47 +10:00
psychedelicious
98ecefdce0 refactor(ui): simplified metadata parsing (WIP) 2025-07-04 17:53:47 +10:00
psychedelicious
42688a0993 refactor(ui): metadata parsing 2025-07-04 17:53:47 +10:00
psychedelicious
d94aa4abf7 feat(ui): enforce loader when switching tabs 2025-07-04 16:49:57 +10:00
psychedelicious
69a56aafed feat(ui): do not require root ref to focus on prompt 2025-07-04 16:49:57 +10:00
psychedelicious
56873f6936 feat(ui): queue and models tab are wrapped in dockview panels 2025-07-04 16:49:57 +10:00
psychedelicious
6bc6a680cf tests(ui): NavigationApi 2025-07-04 16:49:57 +10:00
psychedelicious
9a49682f60 feat(ui): utils to get tab/panel keys to prevent typos 2025-07-04 16:49:57 +10:00
psychedelicious
ff84b0a495 refactor(ui): navigation api 2025-07-04 16:49:57 +10:00
psychedelicious
bcced8a5e8 refactor(ui): navigation api 2025-07-04 16:49:57 +10:00
psychedelicious
4a18e9eaea refactor(ui): panel api (WIP) 2025-07-04 16:49:57 +10:00
psychedelicious
dde5bf61be feat(ui): use exact brand colors in loader 2025-07-04 16:49:57 +10:00
psychedelicious
987e401709 perf(ui): lora components 2025-07-04 14:55:52 +10:00
psychedelicious
5c5ac570e3 fix(ui): hardcode literals for run graph errors
When we build, the class names are minified. This hardcodes the values
to literals.
2025-07-04 14:52:08 +10:00
psychedelicious
309903fe0f feat(ui): refetch gallery image names on reconnect
Maybe fixes JP's issue (again)
2025-07-04 14:49:32 +10:00
psychedelicious
f16ea43e9a feat(ui): enable RTK Query's refetchOnReconnect 2025-07-04 14:49:32 +10:00
Heathen711
a3cb3e03f4 bugfix(ci) Clean up more space for typegen check 2025-07-03 21:22:11 +00:00
Heathen711
641a6cfdb7 bugfix(docker) Remove the need for UV index as that is now baked into the uv.lock 2025-07-03 21:15:03 +00:00
Jeremy Gooch
d794aedb43 fix(ui): sets cfg_rescael_multiplier to 0 if there is no default. Also fixes issue with truthiness check causing 0 value to be missed. See https://github.com/invoke-ai/InvokeAI/issues/7584 2025-07-04 06:20:14 +10:00
Heathen711
f27471cea7 bugfix(docker): Use uv.lock for docker, and update to newer index urls. 2025-07-03 20:08:28 +00:00
Heathen711
47508b8d6c bugfix(docker) combined the dockerfiles and reduced image size 2025-07-03 06:01:51 +00:00
psychedelicious
9930440f33 chore: bump version to v6.0.0rc2 2025-07-03 12:35:04 +10:00
psychedelicious
f0a6c4aa1f fix(ui): after canceling a filter, layer loses its content 2025-07-03 12:30:01 +10:00
psychedelicious
f36d22f13c fix(ui): control layers ignored in txt2img 2025-07-03 12:27:05 +10:00
Cursor Agent
e0d7fab524 Fix: Toggle right panel instead of left panel in navigation
Co-authored-by: kent <kent@invoke.ai>
2025-07-03 12:15:22 +10:00
Cursor Agent
f20c230f4a Add drag-and-drop comparison image target to ImageViewerPanel
Co-authored-by: kent <kent@invoke.ai>
2025-07-03 12:10:51 +10:00
Cursor Agent
05c9bc730e Fix canvas export layer bounds calculation in PSD export hook
Co-authored-by: kent <kent@invoke.ai>
2025-07-03 12:07:22 +10:00
Cursor Agent
f17ac06591 Fix PSD export to use layer content bounds and crop canvas
Co-authored-by: kent <kent@invoke.ai>
2025-07-03 12:07:22 +10:00
Kent Keirsey
b35f93d919 Change implementation to check $ispending 2025-07-03 12:04:27 +10:00
Cursor Agent
289d8076d8 Reset canvas session when queue item is canceled in current session
Co-authored-by: kent <kent@invoke.ai>
2025-07-03 12:04:27 +10:00
Heathen711
28e0242907 Fix tagging & remove force reinstall 2025-07-03 01:56:46 +00:00
skunkworxdark
604763d20f Update flux.py
Replace T5Tokenizer with T5TokenizerFast
2025-07-03 08:04:08 +10:00
Mary Hipp
7b452f098d lint 2025-07-02 16:27:44 -04:00
Mary Hipp
b41c18d35f disable dropzone if prompt expansion is disabled 2025-07-02 16:27:44 -04:00
Mary Hipp
8328081333 properly build batch for flux kontext api batches 2025-07-02 14:27:57 -04:00
Mary Hipp Rogers
07517cf2c2 remove pulsing animation (#8181)
Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
2025-07-02 16:12:52 +00:00
Kent Keirsey
6b98ad9095 Only display one icon on disabled state 2025-07-02 10:54:46 -04:00
Kent Keirsey
0de3967e7e remove stray file 2025-07-02 10:54:46 -04:00
Kent Keirsey
1335377fb1 Fixes 2025-07-02 10:54:46 -04:00
Cursor Agent
adbcc191d9 Add reference image enable/disable functionality
Co-authored-by: kent <kent@invoke.ai>
2025-07-02 10:54:46 -04:00
Kent Keirsey
11fc7af1c8 fix 2025-07-02 10:47:01 -04:00
Cursor Agent
6f12fd22b9 Optimize image API invalidation tags and simplify cache invalidation logic
Co-authored-by: kent <kent@invoke.ai>
2025-07-02 10:47:01 -04:00
Cursor Agent
324b6e2af4 Update LoRA select placeholder text for better clarity
Co-authored-by: kent <kent@invoke.ai>
2025-07-02 10:36:45 -04:00
Mary Hipp Rogers
038010a1ca feat(ui): prompt expansion (#8140)
* initializing prompt expansion and putting response in prompt box working for all methods

* properly disable UI and show loading state on prompt box when there is a pending prompt expansion item

* misc wrapup: disable apploying prompt templates, dont block textarea resize handle

* update progress to differentiate between prompt expansion and non

* cleanup

* lint

* more cleanup

* add image to background of loading state

* add allowPromptExpansion for front-end gating

* updated readiness text for needing to accept or discard

* fix tsc

* lint

* lint

* refactor(ui): prompt expansion logic

* tidy(ui): remove unnecessary changes

* revert(ui): unused arg on useImageUploadButton

* feat(ui): simplify prompt expansion state

* set pending for dragndrop and context menu

* add readiness logic for generate tab

* missing translation

* update error handling for prompt expansion

---------

Co-authored-by: Mary Hipp <maryhipp@Marys-Air.lan>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-07-02 10:26:48 -04:00
Cursor Agent
2dd1bc54c9 Set brush tool automatically when sending image to canvas
Co-authored-by: kent <kent@invoke.ai>
2025-07-02 10:09:22 -04:00
Kent Keirsey
8b69842678 lint 2025-07-02 09:46:32 -04:00
Kent Keirsey
9821f7c4fc Remove Canvas Session 2025-07-02 09:46:32 -04:00
Cursor Agent
2290ff4ad6 Fix: Focus viewer panel when switching to workflow view mode
Co-authored-by: kent <kent@invoke.ai>
2025-07-02 09:42:21 -04:00
psychedelicious
8d82ad6d0b fix(api): return HTTP errors from session queue handlers 2025-07-02 08:42:06 -04:00
Mary Hipp
8ed9f652e8 lint 2025-07-02 08:25:42 -04:00
Mary Hipp
ee8ed344bd add modelRelationships and aboutModal to disable-able features 2025-07-02 08:25:42 -04:00
Mary Hipp
6d16cfdbe2 missing import 2025-07-02 08:23:13 -04:00
Mary Hipp
3ef2872dda handle flux-kontext models 2025-07-02 08:23:13 -04:00
Cursor Agent
b52ba149b4 Update regional guidance empty state translation key
Co-authored-by: kent <kent@invoke.ai>
2025-07-02 08:09:42 -04:00
Kent Keirsey
c6126c6875 Remove all references to New Sessions entirely. 2025-07-01 17:20:35 -04:00
psychedelicious
3f78ac9295 fix(ui): really do not load disabled tabs
Ensure disabled tabs are never mounted:
- Add didLoad flag to configSlice, default false
- Always merge in config - even it is is empty
- On first merge, set didLoad to true
- Until didLoad is true, mark _all_ tabs as disabled

This gets around an issue where tabs are all enabled for a brief moment
before the config is loaded.

A bit hacky but it works.
2025-07-01 10:52:28 -04:00
psychedelicious
79fea1ac40 chore: bump version to v6.0.0rc1 2025-07-02 00:14:13 +10:00
psychedelicious
6eade5781d feat(ui): remove mini metadata viewer 2025-07-01 23:37:31 +10:00
psychedelicious
3d8f865fb0 fix(ui): initial panel sizing 2025-07-01 23:37:31 +10:00
psychedelicious
dc9cd22d9d feat(ui): better naming for panel apis 2025-07-01 23:37:31 +10:00
psychedelicious
fe115ff8f9 fix(ui): models & queue tab styling 2025-07-01 23:37:31 +10:00
psychedelicious
1d35aad213 feat(ui): move more things over to pane lreg 2025-07-01 23:37:31 +10:00
psychedelicious
195d6ce893 refactor(ui): implement global panel registry, replace context-based panel API 2025-07-01 23:37:31 +10:00
psychedelicious
f13ced7ed4 fix(ui): rebase conflicts 2025-07-01 23:37:31 +10:00
psychedelicious
735fc276e5 tidy(ui): clean up focus/layout container 2025-07-01 23:37:31 +10:00
psychedelicious
cd3caf8c30 fix(ui): delete image hotkey 2025-07-01 23:37:31 +10:00
psychedelicious
e9012280ab fix(ui): upscaling tab boards/gallery collapse 2025-07-01 23:37:31 +10:00
psychedelicious
fa72a97794 refactor(ui): even more better focus handling 2025-07-01 23:37:31 +10:00
psychedelicious
e817631ba3 refactor(ui): focus handling for new layout system (WIP) 2025-07-01 23:37:31 +10:00
psychedelicious
d0619c033f feat(ui): add edit button to current image buttons 2025-07-01 16:29:20 +10:00
psychedelicious
6f4850f34f tidy(ui): launchpad tab with icon cleanup 2025-07-01 15:37:06 +10:00
Kent Keirsey
072cd9dee7 Styling Fixes 2025-07-01 15:37:06 +10:00
Cursor Agent
19b6dc1c1f Add custom Launchpad tab with dynamic icon based on active tab
Co-authored-by: kent <kent@invoke.ai>
2025-07-01 15:37:06 +10:00
Cursor Agent
7566d0d6c6 Enhance workflow mode toggle with panel navigation and focus
Co-authored-by: kent <kent@invoke.ai>
2025-07-01 15:27:21 +10:00
psychedelicious
f123888b46 feat(ui): tidy workflows tab launchapd 2025-07-01 15:24:08 +10:00
psychedelicious
aeab7d0cab feat(ui): tidy upscaling tab launchapd 2025-07-01 15:24:08 +10:00
Kent Keirsey
3f1b2c39ab Model Guide link update 2025-07-01 15:24:08 +10:00
Kent Keirsey
72e3a4b4be Fixes & Updates 2025-07-01 15:24:08 +10:00
Kent Keirsey
58e0f80138 Lint 2025-07-01 15:24:08 +10:00
Kent Keirsey
8b8e29d22d Fixes & Styling updates 2025-07-01 15:24:08 +10:00
Kent Keirsey
90201be670 lint 2025-07-01 15:24:08 +10:00
Kent Keirsey
46a5619100 Update all text to translations 2025-07-01 15:24:08 +10:00
Kent Keirsey
d608a7469e Upscale Workflow Launchpad updates & translation updates 2025-07-01 15:24:08 +10:00
Cursor Agent
a7d413d372 Refactor Upscaling and Workflows Launchpad Panels with enhanced UI
Co-authored-by: kent <kent@invoke.ai>
2025-07-01 15:24:08 +10:00
Cursor Agent
f5c9e68dbf Fix division by zero in multi-diffusion pipeline with creativity values
Co-authored-by: kent <kent@invoke.ai>

Revert unnecessary validation changes in multi-diffusion

Fix in python instead of graphbuilder

tidy(ui): remove extraneous comment
2025-07-01 15:00:02 +10:00
psychedelicious
1ded459f03 refactor(ui): clean up related models impl for picker 2025-07-01 14:52:26 +10:00
Kent Keirsey
d9024dc230 linting fixes 2025-07-01 14:52:26 +10:00
Kent Keirsey
40528692c3 Update icon 2025-07-01 14:52:26 +10:00
Kent Keirsey
f35b05be43 simplifies Modelpicker wrapper 2025-07-01 14:52:26 +10:00
Kent Keirsey
29e87fc615 lints 2025-07-01 14:52:26 +10:00
Kent Keirsey
ca26b2718e Small Changes 2025-07-01 14:52:26 +10:00
Cursor Agent
5fa6c0b413 Enhance model picker with related models and improved filtering
Co-authored-by: kent <kent@invoke.ai>
2025-07-01 14:52:26 +10:00
psychedelicious
c37c8c50cd tidy(ui): clean up psd export 2025-07-01 14:12:14 +10:00
Kent Keirsey
f0a4de245d Moved size constants to a reasonable spot... 2025-07-01 14:12:14 +10:00
Kent Keirsey
5db62f8643 Fix Type refs 2025-07-01 14:12:14 +10:00
Kent Keirsey
e1c478f94c Size Updates 2025-07-01 14:12:14 +10:00
Kent Keirsey
11fe3b6332 Comments 2025-07-01 14:12:14 +10:00
Kent Keirsey
e4aae1a591 prettier 2025-07-01 14:12:14 +10:00
Kent Keirsey
4d83d1c56d Linting 2025-07-01 14:12:14 +10:00
Kent Keirsey
34def323e8 Restyle & locate 2025-07-01 14:12:14 +10:00
Kent Keirsey
854956316b Fix export layers 2025-07-01 14:12:14 +10:00
Cursor Agent
91afe7884a Add PSD export functionality for canvas layers
Co-authored-by: kent <kent@invoke.ai>
2025-07-01 14:12:14 +10:00
psychedelicious
8417ee8a7b chore(ui): lint 2025-06-30 23:42:53 +10:00
psychedelicious
a035645ed3 refactor(ui): graph building respects selected tab 2025-06-30 23:42:53 +10:00
psychedelicious
e00ccba7d3 perf(ui): select only loading state for enqueueBatch mutation 2025-06-30 23:42:53 +10:00
psychedelicious
fb883d63aa refactor(ui): dedicated enqueue funcs for each tab 2025-06-30 23:42:53 +10:00
psychedelicious
b113c57fc4 refactor(ui): use redux-provided hooks for accessing store 2025-06-30 23:42:53 +10:00
psychedelicious
7636007349 fix(ui): useAppStore uses correct types 2025-06-30 23:42:53 +10:00
psychedelicious
fda86ae981 fix(app): incorrect node mappings when preparing collect nodes
The previous logic had a subtle python bug related the scope and nested
generators.

Python generators are lazily evaluated - the expressions are stored and
only evaluated when needed (e.g. calling next() or list() on them)

The old logic used a variable `s`, which was continually overwritten as
the generator expressions were created. As a result, the final mappings
all use the _final_ value for `s`.

Following the consequences of this down the line, we find that collect
nodes can end up with multiple edges from exactly one of their ancestor
nodes, instead of one edge from each ancestor. Notably, it's only the
source _node_id_ that is affected - the source _fields_ have the correct
values.

So the invalid edges will point to a real node and a real field, but the
field exists on a different node.

---

This can result in a number of cryptic problems - include an error about
incompatible field types:

```
InvalidEdgeError: Field types are incompatible
(31758fd5-14a8-4de7-a840-b73ec1a1b94f.value ->
3459c793-41a2-4d82-9204-7df2d6d099ba.item)
```

Here are the conditions that lead to this error:
- The collect node has at least two incoming connections.
- The two incoming connections come from nodes of different types.
- The nodes both output a value of the same type, but the name of the
output field differs between them.

---

This commit uses non-generator logic to build up the mappings, avoiding
the issue entirely. As a bonus, it is much easier to read.
2025-06-30 23:39:28 +10:00
psychedelicious
c02be4bdf4 refactor(app): lean on pydantic to get field types in edge validation logic
Previously we used python's own type introspection utilties to determine
input and output field types. We can use pydantic to get the field types
in a clearer, more direct way.

This improvement also exposed an awkward behaviour in this utility,
where it would return None when a field doesn't exist. I've added a
comment in the code describing the issue, but changing it would require
some significant changes and I don't want to risk breaking anything.
2025-06-30 23:39:28 +10:00
psychedelicious
ed7772d993 tests(app): add more tests for complex iterate/collect graph topologies 2025-06-30 23:39:28 +10:00
psychedelicious
baae998b5b tests(app): add failing test for collector edge case
squash

squash
2025-06-30 23:39:28 +10:00
DustyShoe
4077ffe595 Fixed a typo 2025-06-30 15:44:23 +10:00
psychedelicious
c1937b1379 chore: ruff 2025-06-30 12:56:51 +10:00
psychedelicious
5c66dfed8e fix(app): remove errant comment from prev impl 2025-06-30 12:56:51 +10:00
psychedelicious
126dcc96c0 feat(ui): clean up logging and comments in runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
cb9c7b4a28 feat(ui): simplify runGraph logic for error handling 2025-06-30 12:56:51 +10:00
psychedelicious
e8c4f49a14 feat(ui): add .wrap() method to WrappedError 2025-06-30 12:56:51 +10:00
psychedelicious
30fffae637 feat(ui): runGraph settlement callbacks can simply return or throw 2025-06-30 12:56:51 +10:00
psychedelicious
4558a292b6 tests(ui): update runGraph tests for separate options 2025-06-30 12:56:51 +10:00
psychedelicious
825d17441c feat(ui): separate options arg for runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
9b16504af9 docs(ui): improved runGraph docstring 2025-06-30 12:56:51 +10:00
psychedelicious
46c92fadff feat(ui): use system logger for runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
c0467b82ac tests(ui): update runGraph tests for new error state 2025-06-30 12:56:51 +10:00
psychedelicious
6dafa67286 feat(ui): improved logging for runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
eb406aa07e feat(ui): mark runGraph error properties public readonly 2025-06-30 12:56:51 +10:00
psychedelicious
d9422ffebd tests(ui): add testes for enriched cancel/timeout errors 2025-06-30 12:56:51 +10:00
psychedelicious
d5c033be4d feat(ui): enrich cancel/timeout errors when queue item cancel fails 2025-06-30 12:56:51 +10:00
psychedelicious
4662cd6f15 fix(ui): await cancelation of queue item before returning 2025-06-30 12:56:51 +10:00
psychedelicious
a740a22613 feat(ui): runGraph uses settle for all promise handling, better comments 2025-06-30 12:56:51 +10:00
psychedelicious
bf4016b4bc feat(ui): add getNodes method to Graph 2025-06-30 12:56:51 +10:00
psychedelicious
6fa7c8c2ee feat(ui): better exception naming and docstrings in runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
ea40f582da tweak(ui): naming, code style 2025-06-30 12:56:51 +10:00
psychedelicious
01caf56251 feat(ui): clearer naming in WrappedError 2025-06-30 12:56:51 +10:00
psychedelicious
42d577e65a tests(ui): check for error instance instead of message 2025-06-30 12:56:51 +10:00
psychedelicious
38d80c9ce5 fix(ui): clear cleanupFunctions when finished calling them 2025-06-30 12:56:51 +10:00
psychedelicious
6acaa8abbf refactor(ui): use deferred promise as workaround to antipattern of async promise executor 2025-06-30 12:56:51 +10:00
psychedelicious
4b84e34599 refactor(ui): better race condition handling in runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
bbd21b1eb2 feat(ui): rename isSettled -> isFinished 2025-06-30 12:56:51 +10:00
psychedelicious
4fa83a6228 feat(ui): better error handling for runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
051876dcff feat(ui): ensure promise always marked as settled, better comments 2025-06-30 12:56:51 +10:00
psychedelicious
8dc6d0b5ae feat(ui): use runGraph in canvas 2025-06-30 12:56:51 +10:00
psychedelicious
40e9624954 tests(ui): edge cases in runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
ae27c83dc4 feat(ui): log when cancelation fails 2025-06-30 12:56:51 +10:00
psychedelicious
161059551b fix(ui): handle errors during cleanup 2025-06-30 12:56:51 +10:00
psychedelicious
c196f8a5d5 tests(ui): add tests for runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
2c6d22664e feat(ui): use DI to make runGraph testable 2025-06-30 12:56:51 +10:00
psychedelicious
b9ce5389ef fix(ui): clean up signal 2025-06-30 12:56:51 +10:00
psychedelicious
d1cbf56695 feat(ui): iterate on runGraph 2025-06-30 12:56:51 +10:00
psychedelicious
e379ac12c3 feat(ui): abstraction to make a graph await-able 2025-06-30 12:56:51 +10:00
psychedelicious
aa10373292 feat(ui): loosen typings for Result 2025-06-30 12:56:51 +10:00
psychedelicious
780f3692a0 chore(ui): typegen 2025-06-30 12:56:51 +10:00
psychedelicious
3604dcfdd1 feat(api): return list of enqueued item ids when enqueuing 2025-06-30 12:56:51 +10:00
Jonathan
2b1cffde5e typegen 2025-06-30 11:28:02 +10:00
Jonathan
83d642ed15 Update flux_denoise.py
Fixed version to 4.0.0
2025-06-30 11:28:02 +10:00
Jonathan
455c73235e Update flux_denoise.py
Updated version, removed WithBoard and WithMetadata
2025-06-30 11:28:02 +10:00
psychedelicious
8efef8da41 feat(ui): workflows styling tweaks 2025-06-30 11:17:29 +10:00
psychedelicious
060a9e57b9 fix(ui): prevent NaN from getting into konva internals 2025-06-30 10:43:11 +10:00
skunkworxdark
099d75ca1e use "\u2581" instead of the character itself for clarity 2025-06-30 10:40:31 +10:00
skunkworxdark
bbb5d68146 Update flux_text_encoder.py
Added tokenizer logging to flux
2025-06-30 10:40:31 +10:00
Heathen711
96523ca01f fix(docker) Add cloned dockerbuild 2025-06-29 22:07:11 +00:00
Heathen711
c10a6fdab1 fix(docker) rocm 2.4.6 based image 2025-06-29 22:02:40 +00:00
psychedelicious
9066dc1839 tidy(nodes): remove extraneous comments & add useful ones 2025-06-27 18:27:46 +10:00
psychedelicious
075345bffd feat(app): add flux kontext dev to starter modelss 2025-06-27 18:27:46 +10:00
psychedelicious
74d1239c87 chore(ui): typegen 2025-06-27 18:27:46 +10:00
Kent Keirsey
51e1c56636 ruff 2025-06-27 18:27:46 +10:00
Kent Keirsey
ca1df60e54 Explain the Magic 2025-06-27 18:27:46 +10:00
Cursor Agent
7549c1250d Add FLUX Kontext conditioning support for reference images
Co-authored-by: kent <kent@invoke.ai>

Fix Kontext sequence length handling in Flux denoise invocation

Co-authored-by: kent <kent@invoke.ai>

Fix Kontext step callback to handle combined token sequences

Co-authored-by: kent <kent@invoke.ai>

fix ruff

Fix Flux Kontext
2025-06-27 18:27:46 +10:00
psychedelicious
df8751b5a1 fix(ui): remove extraneous rect in stagingareamodule 2025-06-27 15:45:53 +10:00
psychedelicious
651b80b997 fix(ui): remove extraneous syncPlaceholderSize method and calls 2025-06-27 15:45:53 +10:00
psychedelicious
5d236ae4e7 fix(ui): canvas staging waiting for image placeholder sizing and layout 2025-06-27 15:45:53 +10:00
psychedelicious
e5dc606f5e fix(ui): get accurate theme tokens 2025-06-27 15:45:53 +10:00
Kent Keirsey
dc6b8e13bd prettier 2025-06-27 15:45:53 +10:00
Cursor Agent
c1b34e1f11 Standardize UI spacing and constants across canvas and image components
Co-authored-by: kent <kent@invoke.ai>
2025-06-27 15:45:53 +10:00
Cursor Agent
89f1684072 Improve placeholder styling with badge and refined text positioning
Co-authored-by: kent <kent@invoke.ai>
2025-06-27 15:45:53 +10:00
Kent Keirsey
14fbee17a3 Rule of 3rds Composition Guide (#8130)
* Add Rule of 4 composition guide to canvas settings and rendering

Co-authored-by: kent <kent@invoke.ai>

* Rename Rule of 4 Guide to Rule of Thirds in canvas composition guide

Co-authored-by: kent <kent@invoke.ai>

* Updates to comp guide and naming

* Fix reference

* Update translation keys and organize settings.

* revert to previous canvas manager for conflict

* Re-add composition guide.

* Fix lint

* prettier

* feat(ui): improve markup in canvas settings popover

* feat(ui): use brand colors for canvas rule of thirds guide

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2025-06-27 15:05:34 +10:00
psychedelicious
5dbc32e06e feat(ui): minor restyle of style preset list 2025-06-27 14:40:35 +10:00
psychedelicious
23baf61e51 fix(ui): remove extraneous slice migration for style presets 2025-06-27 14:40:35 +10:00
Kent Keirsey
5e55f6074b prettier 2025-06-27 14:40:35 +10:00
Kent Keirsey
f7c555e501 Change to Toggle Tooltip 2025-06-27 14:40:35 +10:00
Cursor Agent
6aa605e811 Add toggle for showing/hiding style preset prompt previews
Co-authored-by: kent <kent@invoke.ai>
2025-06-27 14:40:35 +10:00
psychedelicious
f51014e108 feat(ui): make launchpad button its own component 2025-06-27 14:37:30 +10:00
psychedelicious
9862ba9210 feat(ui): improved starter model buttons & tooltips 2025-06-27 14:37:30 +10:00
psychedelicious
920aea08cc tidy(ui): remove unused translation strings 2025-06-27 14:37:30 +10:00
psychedelicious
39e584297e feat(ui): fix missing translations 2025-06-27 14:37:30 +10:00
psychedelicious
62a14bb935 feat(ui): use enriched starter model metadata 2025-06-27 14:37:30 +10:00
psychedelicious
d7ae2cdf75 chore(ui): typegen 2025-06-27 14:37:30 +10:00
psychedelicious
6172c859ac feat(api): enrich starer model bundle metadata 2025-06-27 14:37:30 +10:00
psychedelicious
b26fb1f617 feat(ui): simplify markup for install models launchpad form 2025-06-27 14:37:30 +10:00
psychedelicious
05167dfd7a feat(ui): use existing design language for install model bundle buttons 2025-06-27 14:37:30 +10:00
psychedelicious
c090ea7387 feat(ui): use existing design language for install model launchpad buttons 2025-06-27 14:37:30 +10:00
psychedelicious
7ba6c67049 feat(ui): named install models tabs 2025-06-27 14:37:30 +10:00
psychedelicious
3de186061d chore(ui): lint 2025-06-27 14:37:30 +10:00
Kent Keirsey
a716381733 Model Launchpad prettier 2025-06-27 14:37:30 +10:00
Kent Keirsey
fb5df06835 Updating toinclude translations and import fixes 2025-06-27 14:37:30 +10:00
Kent Keirsey
33c597c224 fix lint 2025-06-27 14:37:30 +10:00
Kent Keirsey
19d882d038 Address comments 2025-06-27 14:37:30 +10:00
Kent Keirsey
ee4bc49bd4 Prettier. 2025-06-27 14:37:30 +10:00
Kent Keirsey
188cf37f48 fix lint 2025-06-27 14:37:30 +10:00
Kent Keirsey
15a0a7134c fix circ dependency 2025-06-27 14:37:30 +10:00
Kent Keirsey
22cea0de8b Remove scrap 2025-06-27 14:37:30 +10:00
Kent Keirsey
cd21816d12 Model Launchpad 2025-06-27 14:37:30 +10:00
psychedelicious
605b912ba4 fix(ui): remove noop hook 2025-06-27 11:37:47 +10:00
psychedelicious
52e31112f9 chore(ui): lint 2025-06-27 11:37:47 +10:00
Kent Keirsey
a4c9346cd7 lint 2025-06-27 11:37:47 +10:00
Kent Keirsey
a1647e4c6e Address comments 2025-06-27 11:37:47 +10:00
Kent Keirsey
8c9ca088a7 update tooltip 2025-06-27 11:37:47 +10:00
Cursor Agent
7a7a2e147c Add toggle for non-raster layers with hotkey and UI button 2025-06-27 11:37:47 +10:00
psychedelicious
adf4cc750a fix(ui): Fix LoRA picker to default to current base model architecture (#8135)
Enhance LoRA picker to default filter by current base model architecture

## Summary
Fixes new LoRA picker to auto select the architecture filter for the
current model group

## Related Issues / Discussions
N/A

## QA Instructions

Open LoRA menu with any model group selected. The right models should be
filtered.

## Merge Plan
Merge when ready.

## Checklist

- [X] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2025-06-27 11:21:39 +10:00
psychedelicious
9f1ea9d1c7 fix(ui): use existing GroupStatusMap type 2025-06-27 11:19:24 +10:00
Cursor Agent
571d286506 Enhance LoRA picker to default to current base model architecture
Co-authored-by: kent <kent@invoke.ai>

Enhance LoRA picker to default filter by current base model architecture

Co-authored-by: kent <kent@invoke.ai>
2025-06-26 20:43:43 -04:00
Mary Hipp
1320a2c5f8 add option to override text for no options available 2025-06-26 18:09:57 -04:00
Mary Hipp
26a9b3131d convert LoRA picker to use new model picker component 2025-06-26 18:09:57 -04:00
psychedelicious
d48140b35d fix(ui): regional guidance ref image not selecting 2025-06-26 10:05:25 -04:00
719 changed files with 33858 additions and 20789 deletions

View File

@@ -21,6 +21,20 @@ body:
- label: I have searched the existing issues
required: true
- type: dropdown
id: install_method
attributes:
label: Install method
description: How did you install Invoke?
multiple: false
options:
- "Invoke's Launcher"
- 'Stability Matrix'
- 'Pinokio'
- 'Manual'
validations:
required: true
- type: markdown
attributes:
value: __Describe your environment__
@@ -76,8 +90,8 @@ body:
attributes:
label: Version number
description: |
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
placeholder: ex. 3.6.1
The version of Invoke you have installed. If it is not the [latest version](https://github.com/invoke-ai/InvokeAI/releases/latest), please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
placeholder: ex. v6.0.2
validations:
required: true
@@ -85,17 +99,17 @@ body:
id: browser-version
attributes:
label: Browser
description: Your web browser and version.
description: Your web browser and version, if you do not use the Launcher's provided GUI.
placeholder: ex. Firefox 123.0b3
validations:
required: true
required: false
- type: textarea
id: python-deps
attributes:
label: Python dependencies
label: System Information
description: |
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
Click the gear icon at the bottom left corner, then click "About". Click the copy button and then paste here.
validations:
required: false

View File

@@ -3,15 +3,15 @@ description: Installs frontend dependencies with pnpm, with caching
runs:
using: 'composite'
steps:
- name: setup node 18
- name: setup node 20
uses: actions/setup-node@v4
with:
node-version: '18'
node-version: '20'
- name: setup pnpm
uses: pnpm/action-setup@v4
with:
version: 8.15.6
version: 10
run_install: false
- name: get pnpm store directory

View File

@@ -39,6 +39,18 @@ jobs:
- name: checkout
uses: actions/checkout@v4
- name: Free up more disk space on the runner
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
run: |
echo "----- Free space before cleanup"
df -h
sudo rm -rf /usr/share/dotnet
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
sudo swapoff /mnt/swapfile
sudo rm -rf /mnt/swapfile
echo "----- Free space after cleanup"
df -h
- name: check for changed files
if: ${{ inputs.always_run != true }}
id: changed-files

2
.gitignore vendored
View File

@@ -190,3 +190,5 @@ installer/update.bat
installer/update.sh
installer/InvokeAI-Installer/
.aider*
.claude/

View File

@@ -22,6 +22,10 @@
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
# GPU_DRIVER=cuda #| rocm
## If you are using ROCM, you will need to ensure that the render group within the container and the host system use the same group ID.
## To obtain the group ID of the render group on the host system, run `getent group render` and grab the number.
# RENDER_GROUP_ID=
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
# CONTAINER_UID=1000

View File

@@ -5,8 +5,7 @@
FROM docker.io/node:22-slim AS web-builder
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack use pnpm@8.x
RUN corepack enable
RUN corepack use pnpm@10.x && corepack enable
WORKDIR /build
COPY invokeai/frontend/web/ ./
@@ -44,7 +43,6 @@ ENV \
UV_MANAGED_PYTHON=1 \
UV_LINK_MODE=copy \
UV_PROJECT_ENVIRONMENT=/opt/venv \
UV_INDEX="https://download.pytorch.org/whl/cu124" \
INVOKEAI_ROOT=/invokeai \
INVOKEAI_HOST=0.0.0.0 \
INVOKEAI_PORT=9090 \
@@ -75,19 +73,17 @@ RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=uv.lock,target=uv.lock \
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
--mount=type=bind,source=invokeai/version,target=invokeai/version \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
fi && \
uv sync --frozen
# build patchmatch
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
RUN python -c "from patchmatch import patch_match"
ulimit -n 30000 && \
uv sync --extra $GPU_DRIVER --frozen
# Link amdgpu.ids for ROCm builds
# contributed by https://github.com/Rubonnek
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" && groupadd render
# build patchmatch
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
RUN python -c "from patchmatch import patch_match"
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
@@ -106,8 +102,6 @@ COPY invokeai ${INVOKEAI_SRC}/invokeai
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
--mount=type=bind,source=uv.lock,target=uv.lock \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then UV_INDEX="https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then UV_INDEX="https://download.pytorch.org/whl/rocm6.2"; \
fi && \
uv pip install -e .
ulimit -n 30000 && \
uv pip install -e .[$GPU_DRIVER]

136
docker/Dockerfile-rocm-full Normal file
View File

@@ -0,0 +1,136 @@
# syntax=docker/dockerfile:1.4
#### Web UI ------------------------------------
FROM docker.io/node:22-slim AS web-builder
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack use pnpm@8.x
RUN corepack enable
WORKDIR /build
COPY invokeai/frontend/web/ ./
RUN --mount=type=cache,target=/pnpm/store \
pnpm install --frozen-lockfile
RUN npx vite build
## Backend ---------------------------------------
FROM library/ubuntu:24.04
ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN --mount=type=cache,target=/var/cache/apt \
--mount=type=cache,target=/var/lib/apt \
apt update && apt install -y --no-install-recommends \
ca-certificates \
git \
gosu \
libglib2.0-0 \
libgl1 \
libglx-mesa0 \
build-essential \
libopencv-dev \
libstdc++-10-dev \
wget
ENV \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
VIRTUAL_ENV=/opt/venv \
INVOKEAI_SRC=/opt/invokeai \
PYTHON_VERSION=3.12 \
UV_PYTHON=3.12 \
UV_COMPILE_BYTECODE=1 \
UV_MANAGED_PYTHON=1 \
UV_LINK_MODE=copy \
UV_PROJECT_ENVIRONMENT=/opt/venv \
INVOKEAI_ROOT=/invokeai \
INVOKEAI_HOST=0.0.0.0 \
INVOKEAI_PORT=9090 \
PATH="/opt/venv/bin:$PATH" \
CONTAINER_UID=${CONTAINER_UID:-1000} \
CONTAINER_GID=${CONTAINER_GID:-1000}
ARG GPU_DRIVER=cuda
# Install `uv` for package management
COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/
# Install python & allow non-root user to use it by traversing the /root dir without read permissions
RUN --mount=type=cache,target=/root/.cache/uv \
uv python install ${PYTHON_VERSION} && \
# chmod --recursive a+rX /root/.local/share/uv/python
chmod 711 /root
WORKDIR ${INVOKEAI_SRC}
# Install project's dependencies as a separate layer so they aren't rebuilt every commit.
# bind-mount instead of copy to defer adding sources to the image until next layer.
#
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
# x86_64/CUDA is the default
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
--mount=type=bind,source=uv.lock,target=uv.lock \
# this is just to get the package manager to recognize that the project exists, without making changes to the docker layer
--mount=type=bind,source=invokeai/version,target=invokeai/version \
ulimit -n 30000 && \
uv sync --extra $GPU_DRIVER --frozen
RUN --mount=type=cache,target=/var/cache/apt \
--mount=type=cache,target=/var/lib/apt \
if [ "$GPU_DRIVER" = "rocm" ]; then \
wget -O /tmp/amdgpu-install.deb \
https://repo.radeon.com/amdgpu-install/6.3.4/ubuntu/noble/amdgpu-install_6.3.60304-1_all.deb && \
apt install -y /tmp/amdgpu-install.deb && \
apt update && \
amdgpu-install --usecase=rocm -y && \
apt-get autoclean && \
apt clean && \
rm -rf /tmp/* /var/tmp/* && \
usermod -a -G render ubuntu && \
usermod -a -G video ubuntu && \
echo "\\n/opt/rocm/lib\\n/opt/rocm/lib64" >> /etc/ld.so.conf.d/rocm.conf && \
ldconfig && \
update-alternatives --auto rocm; \
fi
## Heathen711: Leaving this for review input, will remove before merge
# RUN --mount=type=cache,target=/var/cache/apt \
# --mount=type=cache,target=/var/lib/apt \
# if [ "$GPU_DRIVER" = "rocm" ]; then \
# groupadd render && \
# usermod -a -G render ubuntu && \
# usermod -a -G video ubuntu; \
# fi
## Link amdgpu.ids for ROCm builds
## contributed by https://github.com/Rubonnek
# RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
# ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
# build patchmatch
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
RUN python -c "from patchmatch import patch_match"
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
COPY docker/docker-entrypoint.sh ./
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
CMD ["invokeai-web"]
# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
# add sources last to minimize image changes on code changes
COPY invokeai ${INVOKEAI_SRC}/invokeai
# this should not increase image size because we've already installed dependencies
# in a previous layer
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
--mount=type=bind,source=uv.lock,target=uv.lock \
ulimit -n 30000 && \
uv pip install -e .[$GPU_DRIVER]

View File

@@ -47,8 +47,9 @@ services:
invokeai-rocm:
<<: *invokeai
devices:
- /dev/kfd:/dev/kfd
- /dev/dri:/dev/dri
environment:
- AMD_VISIBLE_DEVICES=all
- RENDER_GROUP_ID=${RENDER_GROUP_ID}
runtime: amd
profiles:
- rocm

View File

@@ -21,6 +21,17 @@ _=$(id ${USER} 2>&1) || useradd -u ${USER_ID} ${USER}
# ensure the UID is correct
usermod -u ${USER_ID} ${USER} 1>/dev/null
## ROCM specific configuration
# render group within the container must match the host render group
# otherwise the container will not be able to access the host GPU.
if [[ -v "RENDER_GROUP_ID" ]] && [[ ! -z "${RENDER_GROUP_ID}" ]]; then
# ensure the render group exists
groupmod -g ${RENDER_GROUP_ID} render
usermod -a -G render ${USER}
usermod -a -G video ${USER}
fi
### Set the $PUBLIC_KEY env var to enable SSH access.
# We do not install openssh-server in the image by default to avoid bloat.
# but it is useful to have the full SSH server e.g. on Runpod.

View File

@@ -13,7 +13,7 @@ run() {
# parse .env file for build args
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
profile="$(awk -F '=' '/GPU_DRIVER=/ {print $2}' .env)"
# default to 'cuda' profile
[[ -z "$profile" ]] && profile="cuda"
@@ -30,7 +30,7 @@ run() {
printf "%s\n" "starting service $service_name"
docker compose --profile "$profile" up -d "$service_name"
docker compose logs -f
docker compose --profile "$profile" logs -f
}
run

View File

@@ -41,7 +41,7 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
With the modifications made, the install command should look something like this:
```sh
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu126 --reinstall
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall
```
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
@@ -50,11 +50,11 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
7. Install the frontend dev toolchain:
7. Install the frontend dev toolchain, paying attention to versions:
- [`nodejs`](https://nodejs.org/) (v20+)
- [`nodejs`](https://nodejs.org/) (tested on LTS, v22)
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
- [`pnpm`](https://pnpm.io/installation) (tested on v10)
8. Do a production build of the frontend:

View File

@@ -297,7 +297,7 @@ Migration logic is in [migrations.ts].
<!-- links -->
[pydantic]: https://github.com/pydantic/pydantic 'pydantic'
[zod]: https://github.com/colinhacks/zod 'zod/v4'
[zod]: https://github.com/colinhacks/zod 'zod'
[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types'
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions

View File

@@ -69,34 +69,34 @@ The following commands vary depending on the version of Invoke being installed a
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
7. Determine the torch backend to use for installation, if any. This is necessary to get the right version of torch installed. This is acheived by using [UV's built in torch support.](https://docs.astral.sh/uv/guides/integration/pytorch/#automatic-backend-selection)
=== "Invoke v5.12 and later"
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
- **In all other cases, do not use an index.**
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu128`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.3`.
- **In all other cases, do not use a torch backend.**
=== "Invoke v5.10.0 to v5.11.0"
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
- If you are on Windows or Linux with an Nvidia GPU, use `--torch-backend=cu126`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.2.4`.
- **In all other cases, do not use an index.**
=== "Invoke v5.0.0 to v5.9.1"
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.1`.
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm6.1`.
- **In all other cases, do not use an index.**
=== "Invoke v4"
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm5.2`.
- If you are on Windows with an Nvidia GPU, use `--torch-backend=cu124`.
- If you are on Linux with no GPU, use `--torch-backend=cpu`.
- If you are on Linux with an AMD GPU, use `--torch-backend=rocm5.2`.
- **In all other cases, do not use an index.**
8. Install the `invokeai` package. Substitute the package specifier and version.
@@ -105,10 +105,10 @@ The following commands vary depending on the version of Invoke being installed a
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --force-reinstall
```
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
If you determined you needed to use a torch backend in the previous step, you'll need to set the backend like this:
```sh
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.12 --python-preference only-managed --torch-backend=<VERSION> --force-reinstall
```
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:

View File

@@ -35,7 +35,7 @@ More detail on system requirements can be found [here](./requirements.md).
## Step 2: Download
Download the most launcher for your operating system:
Download the most recent launcher for your operating system:
- [Download for Windows](https://download.invoke.ai/Invoke%20Community%20Edition.exe)
- [Download for macOS](https://download.invoke.ai/Invoke%20Community%20Edition.dmg)

View File

@@ -10,6 +10,7 @@ from invokeai.app.services.board_images.board_images_default import BoardImagesS
from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage
from invokeai.app.services.boards.boards_default import BoardService
from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService
from invokeai.app.services.client_state_persistence.client_state_persistence_sqlite import ClientStatePersistenceSqlite
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.download.download_default import DownloadQueueService
from invokeai.app.services.events.events_fastapievents import FastAPIEventService
@@ -151,6 +152,7 @@ class ApiDependencies:
style_preset_records = SqliteStylePresetRecordsStorage(db=db)
style_preset_image_files = StylePresetImageFileStorageDisk(style_presets_folder / "images")
workflow_thumbnails = WorkflowThumbnailFileStorageDisk(workflow_thumbnails_folder)
client_state_persistence = ClientStatePersistenceSqlite(db=db)
services = InvocationServices(
board_image_records=board_image_records,
@@ -181,6 +183,7 @@ class ApiDependencies:
style_preset_records=style_preset_records,
style_preset_image_files=style_preset_image_files,
workflow_thumbnails=workflow_thumbnails,
client_state_persistence=client_state_persistence,
)
ApiDependencies.invoker = Invoker(services)

View File

@@ -0,0 +1,58 @@
from fastapi import Body, HTTPException, Path, Query
from fastapi.routing import APIRouter
from invokeai.app.api.dependencies import ApiDependencies
from invokeai.backend.util.logging import logging
client_state_router = APIRouter(prefix="/v1/client_state", tags=["client_state"])
@client_state_router.get(
"/{queue_id}/get_by_key",
operation_id="get_client_state_by_key",
response_model=str | None,
)
async def get_client_state_by_key(
queue_id: str = Path(description="The queue id to perform this operation on"),
key: str = Query(..., description="Key to get"),
) -> str | None:
"""Gets the client state"""
try:
return ApiDependencies.invoker.services.client_state_persistence.get_by_key(queue_id, key)
except Exception as e:
logging.error(f"Error getting client state: {e}")
raise HTTPException(status_code=500, detail="Error setting client state")
@client_state_router.post(
"/{queue_id}/set_by_key",
operation_id="set_client_state",
response_model=str,
)
async def set_client_state(
queue_id: str = Path(description="The queue id to perform this operation on"),
key: str = Query(..., description="Key to set"),
value: str = Body(..., description="Stringified value to set"),
) -> str:
"""Sets the client state"""
try:
return ApiDependencies.invoker.services.client_state_persistence.set_by_key(queue_id, key, value)
except Exception as e:
logging.error(f"Error setting client state: {e}")
raise HTTPException(status_code=500, detail="Error setting client state")
@client_state_router.post(
"/{queue_id}/delete",
operation_id="delete_client_state",
responses={204: {"description": "Client state deleted"}},
)
async def delete_client_state(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> None:
"""Deletes the client state"""
try:
ApiDependencies.invoker.services.client_state_persistence.delete(queue_id)
except Exception as e:
logging.error(f"Error deleting client state: {e}")
raise HTTPException(status_code=500, detail="Error deleting client state")

View File

@@ -72,7 +72,7 @@ async def upload_image(
resize_to: Optional[str] = Body(
default=None,
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
example='"[1024,1024]"',
examples=['"[1024,1024]"'],
),
metadata: Optional[str] = Body(
default=None,

View File

@@ -41,6 +41,7 @@ from invokeai.backend.model_manager.starter_models import (
STARTER_BUNDLES,
STARTER_MODELS,
StarterModel,
StarterModelBundle,
StarterModelWithoutDependencies,
)
@@ -291,7 +292,7 @@ async def get_hugging_face_models(
)
async def update_model_record(
key: Annotated[str, Path(description="Unique key of model")],
changes: Annotated[ModelRecordChanges, Body(description="Model config", example=example_model_input)],
changes: Annotated[ModelRecordChanges, Body(description="Model config", examples=[example_model_input])],
) -> AnyModelConfig:
"""Update a model's config."""
logger = ApiDependencies.invoker.services.logger
@@ -449,7 +450,7 @@ async def install_model(
access_token: Optional[str] = Query(description="access token for the remote resource", default=None),
config: ModelRecordChanges = Body(
description="Object containing fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
example={"name": "string", "description": "string"},
examples=[{"name": "string", "description": "string"}],
),
) -> ModelInstallJob:
"""Install a model using a string identifier.
@@ -799,7 +800,7 @@ async def convert_model(
class StarterModelResponse(BaseModel):
starter_models: list[StarterModel]
starter_bundles: dict[str, list[StarterModel]]
starter_bundles: dict[str, StarterModelBundle]
def get_is_installed(
@@ -833,7 +834,7 @@ async def get_starter_models() -> StarterModelResponse:
model.dependencies = missing_deps
for bundle in starter_bundles.values():
for model in bundle:
for model in bundle.models:
model.is_installed = get_is_installed(model, installed_models)
# Remove already-installed dependencies
missing_deps: list[StarterModelWithoutDependencies] = []

View File

@@ -1,6 +1,6 @@
from typing import Optional
from fastapi import Body, Path, Query
from fastapi import Body, HTTPException, Path, Query
from fastapi.routing import APIRouter
from pydantic import BaseModel, Field
@@ -22,6 +22,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
RetryItemsResult,
SessionQueueCountsByDestination,
SessionQueueItem,
SessionQueueItemNotFoundError,
SessionQueueStatus,
)
from invokeai.app.services.shared.pagination import CursorPaginatedResults
@@ -59,10 +60,12 @@ async def enqueue_batch(
),
) -> EnqueueBatchResult:
"""Processes a batch and enqueues the output graphs for execution."""
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
queue_id=queue_id, batch=batch, prepend=prepend
)
try:
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
queue_id=queue_id, batch=batch, prepend=prepend
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while enqueuing batch: {e}")
@session_queue_router.get(
@@ -82,14 +85,17 @@ async def list_queue_items(
) -> CursorPaginatedResults[SessionQueueItem]:
"""Gets cursor-paginated queue items"""
return ApiDependencies.invoker.services.session_queue.list_queue_items(
queue_id=queue_id,
limit=limit,
status=status,
cursor=cursor,
priority=priority,
destination=destination,
)
try:
return ApiDependencies.invoker.services.session_queue.list_queue_items(
queue_id=queue_id,
limit=limit,
status=status,
cursor=cursor,
priority=priority,
destination=destination,
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all items: {e}")
@session_queue_router.get(
@@ -104,11 +110,13 @@ async def list_all_queue_items(
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
) -> list[SessionQueueItem]:
"""Gets all queue items"""
return ApiDependencies.invoker.services.session_queue.list_all_queue_items(
queue_id=queue_id,
destination=destination,
)
try:
return ApiDependencies.invoker.services.session_queue.list_all_queue_items(
queue_id=queue_id,
destination=destination,
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue items: {e}")
@session_queue_router.put(
@@ -120,7 +128,10 @@ async def resume(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> SessionProcessorStatus:
"""Resumes session processor"""
return ApiDependencies.invoker.services.session_processor.resume()
try:
return ApiDependencies.invoker.services.session_processor.resume()
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while resuming queue: {e}")
@session_queue_router.put(
@@ -132,7 +143,10 @@ async def Pause(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> SessionProcessorStatus:
"""Pauses session processor"""
return ApiDependencies.invoker.services.session_processor.pause()
try:
return ApiDependencies.invoker.services.session_processor.pause()
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while pausing queue: {e}")
@session_queue_router.put(
@@ -144,7 +158,10 @@ async def cancel_all_except_current(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> CancelAllExceptCurrentResult:
"""Immediately cancels all queue items except in-processing items"""
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
try:
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling all except current: {e}")
@session_queue_router.put(
@@ -156,7 +173,10 @@ async def delete_all_except_current(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> DeleteAllExceptCurrentResult:
"""Immediately deletes all queue items except in-processing items"""
return ApiDependencies.invoker.services.session_queue.delete_all_except_current(queue_id=queue_id)
try:
return ApiDependencies.invoker.services.session_queue.delete_all_except_current(queue_id=queue_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting all except current: {e}")
@session_queue_router.put(
@@ -169,7 +189,12 @@ async def cancel_by_batch_ids(
batch_ids: list[str] = Body(description="The list of batch_ids to cancel all queue items for", embed=True),
) -> CancelByBatchIDsResult:
"""Immediately cancels all queue items from the given batch ids"""
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(queue_id=queue_id, batch_ids=batch_ids)
try:
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(
queue_id=queue_id, batch_ids=batch_ids
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling by batch id: {e}")
@session_queue_router.put(
@@ -182,9 +207,12 @@ async def cancel_by_destination(
destination: str = Query(description="The destination to cancel all queue items for"),
) -> CancelByDestinationResult:
"""Immediately cancels all queue items with the given origin"""
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
queue_id=queue_id, destination=destination
)
try:
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
queue_id=queue_id, destination=destination
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling by destination: {e}")
@session_queue_router.put(
@@ -197,7 +225,10 @@ async def retry_items_by_id(
item_ids: list[int] = Body(description="The queue item ids to retry"),
) -> RetryItemsResult:
"""Immediately cancels all queue items with the given origin"""
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
try:
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while retrying queue items: {e}")
@session_queue_router.put(
@@ -211,11 +242,14 @@ async def clear(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> ClearResult:
"""Clears the queue entirely, immediately canceling the currently-executing session"""
queue_item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
if queue_item is not None:
ApiDependencies.invoker.services.session_queue.cancel_queue_item(queue_item.item_id)
clear_result = ApiDependencies.invoker.services.session_queue.clear(queue_id)
return clear_result
try:
queue_item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
if queue_item is not None:
ApiDependencies.invoker.services.session_queue.cancel_queue_item(queue_item.item_id)
clear_result = ApiDependencies.invoker.services.session_queue.clear(queue_id)
return clear_result
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while clearing queue: {e}")
@session_queue_router.put(
@@ -229,7 +263,10 @@ async def prune(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> PruneResult:
"""Prunes all completed or errored queue items"""
return ApiDependencies.invoker.services.session_queue.prune(queue_id)
try:
return ApiDependencies.invoker.services.session_queue.prune(queue_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while pruning queue: {e}")
@session_queue_router.get(
@@ -243,7 +280,10 @@ async def get_current_queue_item(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> Optional[SessionQueueItem]:
"""Gets the currently execution queue item"""
return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
try:
return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting current queue item: {e}")
@session_queue_router.get(
@@ -257,7 +297,10 @@ async def get_next_queue_item(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> Optional[SessionQueueItem]:
"""Gets the next queue item, without executing it"""
return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
try:
return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting next queue item: {e}")
@session_queue_router.get(
@@ -271,9 +314,12 @@ async def get_queue_status(
queue_id: str = Path(description="The queue id to perform this operation on"),
) -> SessionQueueAndProcessorStatus:
"""Gets the status of the session queue"""
queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id)
processor = ApiDependencies.invoker.services.session_processor.get_status()
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
try:
queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id)
processor = ApiDependencies.invoker.services.session_processor.get_status()
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting queue status: {e}")
@session_queue_router.get(
@@ -288,7 +334,10 @@ async def get_batch_status(
batch_id: str = Path(description="The batch to get the status of"),
) -> BatchStatus:
"""Gets the status of the session queue"""
return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
try:
return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while getting batch status: {e}")
@session_queue_router.get(
@@ -304,7 +353,12 @@ async def get_queue_item(
item_id: int = Path(description="The queue item to get"),
) -> SessionQueueItem:
"""Gets a queue item"""
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
try:
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
except SessionQueueItemNotFoundError:
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching queue item: {e}")
@session_queue_router.delete(
@@ -316,7 +370,10 @@ async def delete_queue_item(
item_id: int = Path(description="The queue item to delete"),
) -> None:
"""Deletes a queue item"""
ApiDependencies.invoker.services.session_queue.delete_queue_item(item_id)
try:
ApiDependencies.invoker.services.session_queue.delete_queue_item(item_id)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting queue item: {e}")
@session_queue_router.put(
@@ -331,8 +388,12 @@ async def cancel_queue_item(
item_id: int = Path(description="The queue item to cancel"),
) -> SessionQueueItem:
"""Deletes a queue item"""
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
try:
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
except SessionQueueItemNotFoundError:
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling queue item: {e}")
@session_queue_router.get(
@@ -345,9 +406,12 @@ async def counts_by_destination(
destination: str = Query(description="The destination to query"),
) -> SessionQueueCountsByDestination:
"""Gets the counts of queue items by destination"""
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
queue_id=queue_id, destination=destination
)
try:
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
queue_id=queue_id, destination=destination
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching counts by destination: {e}")
@session_queue_router.delete(
@@ -360,6 +424,9 @@ async def delete_by_destination(
destination: str = Path(description="The destination to query"),
) -> DeleteByDestinationResult:
"""Deletes all items with the given destination"""
return ApiDependencies.invoker.services.session_queue.delete_by_destination(
queue_id=queue_id, destination=destination
)
try:
return ApiDependencies.invoker.services.session_queue.delete_by_destination(
queue_id=queue_id, destination=destination
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting by destination: {e}")

View File

@@ -19,6 +19,7 @@ from invokeai.app.api.routers import (
app_info,
board_images,
boards,
client_state,
download_queue,
images,
model_manager,
@@ -131,6 +132,7 @@ app.include_router(app_info.app_router, prefix="/api")
app.include_router(session_queue.session_queue_router, prefix="/api")
app.include_router(workflows.workflows_router, prefix="/api")
app.include_router(style_presets.style_presets_router, prefix="/api")
app.include_router(client_state.client_state_router, prefix="/api")
app.openapi = get_openapi_func(app)
@@ -155,6 +157,12 @@ def overridden_redoc() -> HTMLResponse:
web_root_path = Path(list(web_dir.__path__)[0])
if app_config.unsafe_disable_picklescan:
logger.warning(
"The unsafe_disable_picklescan option is enabled. This disables malware scanning while installing and"
"loading models, which may allow malicious code to be executed. Use at your own risk."
)
try:
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
except RuntimeError:

View File

@@ -215,6 +215,7 @@ class FieldDescriptions:
flux_redux_conditioning = "FLUX Redux conditioning tensor"
vllm_model = "The VLLM model to use"
flux_fill_conditioning = "FLUX Fill conditioning tensor"
flux_kontext_conditioning = "FLUX Kontext conditioning (reference image)"
class ImageField(BaseModel):
@@ -291,6 +292,12 @@ class FluxFillConditioningField(BaseModel):
mask: TensorField = Field(description="The FLUX Fill inpaint mask.")
class FluxKontextConditioningField(BaseModel):
"""A conditioning field for FLUX Kontext (reference image)."""
image: ImageField = Field(description="The Kontext reference image.")
class SD3ConditioningField(BaseModel):
"""A conditioning tensor primitive value"""

View File

@@ -16,13 +16,12 @@ from invokeai.app.invocations.fields import (
FieldDescriptions,
FluxConditioningField,
FluxFillConditioningField,
FluxKontextConditioningField,
FluxReduxConditioningField,
ImageField,
Input,
InputField,
LatentsField,
WithBoard,
WithMetadata,
)
from invokeai.app.invocations.flux_controlnet import FluxControlNetField
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
@@ -34,6 +33,7 @@ from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXCo
from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlNetFlux
from invokeai.backend.flux.denoise import denoise
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
from invokeai.backend.flux.extensions.kontext_extension import KontextExtension
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
@@ -63,9 +63,9 @@ from invokeai.backend.util.devices import TorchDevice
title="FLUX Denoise",
tags=["image", "flux"],
category="image",
version="3.3.0",
version="4.1.0",
)
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
class FluxDenoiseInvocation(BaseInvocation):
"""Run denoising process with a FLUX transformer model."""
# If latents is provided, this means we are doing image-to-image.
@@ -145,11 +145,20 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
description=FieldDescriptions.vae,
input=Input.Connection,
)
# This node accepts a images for features like FLUX Fill, ControlNet, and Kontext, but needs to operate on them in
# latent space. We'll run the VAE to encode them in this node instead of requiring the user to run the VAE in
# upstream nodes.
ip_adapter: IPAdapterField | list[IPAdapterField] | None = InputField(
description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection
)
kontext_conditioning: FluxKontextConditioningField | list[FluxKontextConditioningField] | None = InputField(
default=None,
description="FLUX Kontext conditioning (reference image).",
input=Input.Connection,
)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = self._run_diffusion(context)
@@ -376,6 +385,29 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
dtype=inference_dtype,
)
kontext_extension = None
if self.kontext_conditioning:
if not self.controlnet_vae:
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
kontext_extension = KontextExtension(
context=context,
kontext_conditioning=self.kontext_conditioning
if isinstance(self.kontext_conditioning, list)
else [self.kontext_conditioning],
vae_field=self.controlnet_vae,
device=TorchDevice.choose_torch_device(),
dtype=inference_dtype,
)
# Prepare Kontext conditioning if provided
img_cond_seq = None
img_cond_seq_ids = None
if kontext_extension is not None:
# Ensure batch sizes match
kontext_extension.ensure_batch_size(x.shape[0])
img_cond_seq, img_cond_seq_ids = kontext_extension.kontext_latents, kontext_extension.kontext_ids
x = denoise(
model=transformer,
img=x,
@@ -391,6 +423,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
pos_ip_adapter_extensions=pos_ip_adapter_extensions,
neg_ip_adapter_extensions=neg_ip_adapter_extensions,
img_cond=img_cond,
img_cond_seq=img_cond_seq,
img_cond_seq_ids=img_cond_seq_ids,
)
x = unpack(x.float(), self.height, self.width)
@@ -865,7 +899,10 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
def step_callback(state: PipelineIntermediateState) -> None:
state.latents = unpack(state.latents.float(), self.height, self.width).squeeze()
# The denoise function now handles Kontext conditioning correctly,
# so we don't need to slice the latents here
latents = state.latents.float()
state.latents = unpack(latents, self.height, self.width).squeeze()
context.util.flux_step_callback(state)
return step_callback

View File

@@ -0,0 +1,40 @@
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import (
FieldDescriptions,
FluxKontextConditioningField,
InputField,
OutputField,
)
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.services.shared.invocation_context import InvocationContext
@invocation_output("flux_kontext_output")
class FluxKontextOutput(BaseInvocationOutput):
"""The conditioning output of a FLUX Kontext invocation."""
kontext_cond: FluxKontextConditioningField = OutputField(
description=FieldDescriptions.flux_kontext_conditioning, title="Kontext Conditioning"
)
@invocation(
"flux_kontext",
title="Kontext Conditioning - FLUX",
tags=["conditioning", "kontext", "flux"],
category="conditioning",
version="1.0.0",
)
class FluxKontextInvocation(BaseInvocation):
"""Prepares a reference image for FLUX Kontext conditioning."""
image: ImageField = InputField(description="The Kontext reference image.")
def invoke(self, context: InvocationContext) -> FluxKontextOutput:
"""Packages the provided image into a Kontext conditioning field."""
return FluxKontextOutput(kontext_cond=FluxKontextConditioningField(image=self.image))

View File

@@ -1,5 +1,5 @@
from contextlib import ExitStack
from typing import Iterator, Literal, Optional, Tuple
from typing import Iterator, Literal, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast
@@ -111,6 +111,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
if context.config.get().log_tokenization:
self._log_t5_tokenization(context, t5_tokenizer)
context.util.signal_progress("Running T5 encoder")
prompt_embeds = t5_encoder(prompt)
@@ -151,6 +154,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
if context.config.get().log_tokenization:
self._log_clip_tokenization(context, clip_tokenizer)
context.util.signal_progress("Running CLIP encoder")
pooled_prompt_embeds = clip_encoder(prompt)
@@ -170,3 +176,88 @@ class FluxTextEncoderInvocation(BaseInvocation):
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info
def _log_t5_tokenization(
self,
context: InvocationContext,
tokenizer: Union[T5Tokenizer, T5TokenizerFast],
) -> None:
"""Logs the tokenization of a prompt for a T5-based model like FLUX."""
# Tokenize the prompt using the same parameters as the model's text encoder.
# T5 tokenizers add an EOS token (</s>) and then pad to max_length.
tokenized_output = tokenizer(
self.prompt,
padding="max_length",
max_length=self.t5_max_seq_len,
truncation=True,
add_special_tokens=True, # This is important for T5 to add the EOS token.
return_tensors="pt",
)
input_ids = tokenized_output.input_ids[0]
tokens = tokenizer.convert_ids_to_tokens(input_ids)
# The T5 tokenizer uses a space-like character ' ' (U+2581) to denote spaces.
# We'll replace it with a regular space for readability.
tokens = [t.replace("\u2581", " ") for t in tokens]
tokenized_str = ""
used_tokens = 0
for token in tokens:
if token == tokenizer.eos_token:
tokenized_str += f"\x1b[0;31m{token}\x1b[0m" # Red for EOS
used_tokens += 1
elif token == tokenizer.pad_token:
# tokenized_str += f"\x1b[0;34m{token}\x1b[0m" # Blue for PAD
continue
else:
color = (used_tokens % 6) + 1 # Cycle through 6 colors
tokenized_str += f"\x1b[0;3{color}m{token}\x1b[0m"
used_tokens += 1
context.logger.info(f">> [T5 TOKENLOG] Tokens ({used_tokens}/{self.t5_max_seq_len}):")
context.logger.info(f"{tokenized_str}\x1b[0m")
def _log_clip_tokenization(
self,
context: InvocationContext,
tokenizer: CLIPTokenizer,
) -> None:
"""Logs the tokenization of a prompt for a CLIP-based model."""
max_length = tokenizer.model_max_length
tokenized_output = tokenizer(
self.prompt,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
input_ids = tokenized_output.input_ids[0]
attention_mask = tokenized_output.attention_mask[0]
tokens = tokenizer.convert_ids_to_tokens(input_ids)
# The CLIP tokenizer uses '</w>' to denote spaces.
# We'll replace it with a regular space for readability.
tokens = [t.replace("</w>", " ") for t in tokens]
tokenized_str = ""
used_tokens = 0
for i, token in enumerate(tokens):
if attention_mask[i] == 0:
# Do not log padding tokens.
continue
if token == tokenizer.bos_token:
tokenized_str += f"\x1b[0;32m{token}\x1b[0m" # Green for BOS
elif token == tokenizer.eos_token:
tokenized_str += f"\x1b[0;31m{token}\x1b[0m" # Red for EOS
else:
color = (used_tokens % 6) + 1 # Cycle through 6 colors
tokenized_str += f"\x1b[0;3{color}m{token}\x1b[0m"
used_tokens += 1
context.logger.info(f">> [CLIP TOKENLOG] Tokens ({used_tokens}/{max_length}):")
context.logger.info(f"{tokenized_str}\x1b[0m")

View File

@@ -1347,3 +1347,96 @@ class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoar
image_dto = context.images.save(image=target_image)
return ImageOutput.build(image_dto)
@invocation(
"flux_kontext_image_prep",
title="FLUX Kontext Image Prep",
tags=["image", "concatenate", "flux", "kontext"],
category="image",
version="1.0.0",
)
class FluxKontextConcatenateImagesInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Prepares an image or images for use with FLUX Kontext. The first/single image is resized to the nearest
preferred Kontext resolution. All other images are concatenated horizontally, maintaining their aspect ratio."""
images: list[ImageField] = InputField(
description="The images to concatenate",
min_length=1,
max_length=10,
)
use_preferred_resolution: bool = InputField(
default=True, description="Use FLUX preferred resolutions for the first image"
)
def invoke(self, context: InvocationContext) -> ImageOutput:
from invokeai.backend.flux.util import PREFERED_KONTEXT_RESOLUTIONS
# Step 1: Load all images
pil_images = []
for image_field in self.images:
image = context.images.get_pil(image_field.image_name, mode="RGBA")
pil_images.append(image)
# Step 2: Determine target resolution for the first image
first_image = pil_images[0]
width, height = first_image.size
if self.use_preferred_resolution:
aspect_ratio = width / height
# Find the closest preferred resolution for the first image
_, target_width, target_height = min(
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
)
# Apply BFL's scaling formula
scaled_height = 2 * int(target_height / 16)
final_height = 8 * scaled_height # This will be consistent for all images
scaled_width = 2 * int(target_width / 16)
first_width = 8 * scaled_width
else:
# Use original dimensions of first image, ensuring divisibility by 16
final_height = 16 * (height // 16)
first_width = 16 * (width // 16)
# Ensure minimum dimensions
if final_height < 16:
final_height = 16
if first_width < 16:
first_width = 16
# Step 3: Process and resize all images with consistent height
processed_images = []
total_width = 0
for i, image in enumerate(pil_images):
if i == 0:
# First image uses the calculated dimensions
final_width = first_width
else:
# Subsequent images maintain aspect ratio with the same height
img_aspect_ratio = image.width / image.height
# Calculate width that maintains aspect ratio at the target height
calculated_width = int(final_height * img_aspect_ratio)
# Ensure width is divisible by 16 for proper VAE encoding
final_width = 16 * (calculated_width // 16)
# Ensure minimum width
if final_width < 16:
final_width = 16
# Resize image to calculated dimensions
resized_image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
processed_images.append(resized_image)
total_width += final_width
# Step 4: Concatenate images horizontally
concatenated_image = Image.new("RGB", (total_width, final_height))
x_offset = 0
for img in processed_images:
concatenated_image.paste(img, (x_offset, 0))
x_offset += img.width
# Save the concatenated image
image_dto = context.images.save(image=concatenated_image)
return ImageOutput.build(image_dto)

View File

@@ -430,6 +430,15 @@ class FluxConditioningOutput(BaseInvocationOutput):
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
@invocation_output("flux_conditioning_collection_output")
class FluxConditioningCollectionOutput(BaseInvocationOutput):
"""Base class for nodes that output a collection of conditioning tensors"""
collection: list[FluxConditioningField] = OutputField(
description="The output conditioning tensors",
)
@invocation_output("sd3_conditioning_output")
class SD3ConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a single SD3 conditioning tensor"""

View File

@@ -14,15 +14,14 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def add_image_to_board(
self,
board_id: str,
image_name: str,
) -> None:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
INSERT INTO board_images (board_id, image_name)
@@ -31,17 +30,12 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
""",
(board_id, image_name, board_id),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise e
def remove_image_from_board(
self,
image_name: str,
) -> None:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
DELETE FROM board_images
@@ -49,10 +43,6 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
""",
(image_name,),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise e
def get_images_for_board(
self,
@@ -60,27 +50,26 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
offset: int = 0,
limit: int = 10,
) -> OffsetPaginatedResults[ImageRecord]:
# TODO: this isn't paginated yet?
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT images.*
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE board_images.board_id = ?
ORDER BY board_images.updated_at DESC;
""",
(board_id,),
)
result = cast(list[sqlite3.Row], cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT images.*
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE board_images.board_id = ?
ORDER BY board_images.updated_at DESC;
""",
(board_id,),
)
result = cast(list[sqlite3.Row], cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
cursor.execute(
"""--sql
SELECT COUNT(*) FROM images WHERE 1=1;
"""
)
count = cast(int, cursor.fetchone()[0])
cursor.execute(
"""--sql
SELECT COUNT(*) FROM images WHERE 1=1;
"""
)
count = cast(int, cursor.fetchone()[0])
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
@@ -90,56 +79,55 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
categories: list[ImageCategory] | None,
is_intermediate: bool | None,
) -> list[str]:
params: list[str | bool] = []
with self._db.transaction() as cursor:
params: list[str | bool] = []
# Base query is a join between images and board_images
stmt = """
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
# Base query is a join between images and board_images
stmt = """
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
# Handle board_id filter
if board_id == "none":
stmt += """--sql
AND board_images.board_id IS NULL
"""
else:
stmt += """--sql
AND board_images.board_id = ?
"""
params.append(board_id)
# Handle board_id filter
if board_id == "none":
stmt += """--sql
AND board_images.board_id IS NULL
"""
else:
stmt += """--sql
AND board_images.board_id = ?
"""
params.append(board_id)
# Add the category filter
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
stmt += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
# Add the category filter
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
stmt += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
# Unpack the included categories into the query params
for c in category_strings:
params.append(c)
# Unpack the included categories into the query params
for c in category_strings:
params.append(c)
# Add the is_intermediate filter
if is_intermediate is not None:
stmt += """--sql
AND images.is_intermediate = ?
"""
params.append(is_intermediate)
# Add the is_intermediate filter
if is_intermediate is not None:
stmt += """--sql
AND images.is_intermediate = ?
"""
params.append(is_intermediate)
# Put a ring on it
stmt += ";"
# Put a ring on it
stmt += ";"
# Execute the query
cursor = self._conn.cursor()
cursor.execute(stmt, params)
cursor.execute(stmt, params)
result = cast(list[sqlite3.Row], cursor.fetchall())
result = cast(list[sqlite3.Row], cursor.fetchall())
image_names = [r[0] for r in result]
return image_names
@@ -147,31 +135,31 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
self,
image_name: str,
) -> Optional[str]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT board_id
FROM board_images
WHERE image_name = ?;
""",
(image_name,),
)
result = cursor.fetchone()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT board_id
FROM board_images
WHERE image_name = ?;
""",
(image_name,),
)
result = cursor.fetchone()
if result is None:
return None
return cast(str, result[0])
def get_image_count_for_board(self, board_id: str) -> int:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT COUNT(*)
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE images.is_intermediate = FALSE
AND board_images.board_id = ?;
""",
(board_id,),
)
count = cast(int, cursor.fetchone()[0])
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT COUNT(*)
FROM board_images
INNER JOIN images ON board_images.image_name = images.image_name
WHERE images.is_intermediate = FALSE
AND board_images.board_id = ?;
""",
(board_id,),
)
count = cast(int, cursor.fetchone()[0])
return count

View File

@@ -20,61 +20,57 @@ from invokeai.app.util.misc import uuid_string
class SqliteBoardRecordStorage(BoardRecordStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def delete(self, board_id: str) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
DELETE FROM boards
WHERE board_id = ?;
""",
(board_id,),
)
self._conn.commit()
except Exception as e:
self._conn.rollback()
raise BoardRecordDeleteException from e
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
DELETE FROM boards
WHERE board_id = ?;
""",
(board_id,),
)
except Exception as e:
raise BoardRecordDeleteException from e
def save(
self,
board_name: str,
) -> BoardRecord:
try:
board_id = uuid_string()
cursor = self._conn.cursor()
cursor.execute(
"""--sql
INSERT OR IGNORE INTO boards (board_id, board_name)
VALUES (?, ?);
""",
(board_id, board_name),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise BoardRecordSaveException from e
with self._db.transaction() as cursor:
try:
board_id = uuid_string()
cursor.execute(
"""--sql
INSERT OR IGNORE INTO boards (board_id, board_name)
VALUES (?, ?);
""",
(board_id, board_name),
)
except sqlite3.Error as e:
raise BoardRecordSaveException from e
return self.get(board_id)
def get(
self,
board_id: str,
) -> BoardRecord:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM boards
WHERE board_id = ?;
""",
(board_id,),
)
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
SELECT *
FROM boards
WHERE board_id = ?;
""",
(board_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
except sqlite3.Error as e:
raise BoardRecordNotFoundException from e
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
except sqlite3.Error as e:
raise BoardRecordNotFoundException from e
if result is None:
raise BoardRecordNotFoundException
return BoardRecord(**dict(result))
@@ -84,45 +80,43 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
board_id: str,
changes: BoardChanges,
) -> BoardRecord:
try:
cursor = self._conn.cursor()
# Change the name of a board
if changes.board_name is not None:
cursor.execute(
"""--sql
UPDATE boards
SET board_name = ?
WHERE board_id = ?;
""",
(changes.board_name, board_id),
)
with self._db.transaction() as cursor:
try:
# Change the name of a board
if changes.board_name is not None:
cursor.execute(
"""--sql
UPDATE boards
SET board_name = ?
WHERE board_id = ?;
""",
(changes.board_name, board_id),
)
# Change the cover image of a board
if changes.cover_image_name is not None:
cursor.execute(
"""--sql
UPDATE boards
SET cover_image_name = ?
WHERE board_id = ?;
""",
(changes.cover_image_name, board_id),
)
# Change the cover image of a board
if changes.cover_image_name is not None:
cursor.execute(
"""--sql
UPDATE boards
SET cover_image_name = ?
WHERE board_id = ?;
""",
(changes.cover_image_name, board_id),
)
# Change the archived status of a board
if changes.archived is not None:
cursor.execute(
"""--sql
UPDATE boards
SET archived = ?
WHERE board_id = ?;
""",
(changes.archived, board_id),
)
# Change the archived status of a board
if changes.archived is not None:
cursor.execute(
"""--sql
UPDATE boards
SET archived = ?
WHERE board_id = ?;
""",
(changes.archived, board_id),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise BoardRecordSaveException from e
except sqlite3.Error as e:
raise BoardRecordSaveException from e
return self.get(board_id)
def get_many(
@@ -133,78 +127,77 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
limit: int = 10,
include_archived: bool = False,
) -> OffsetPaginatedResults[BoardRecord]:
cursor = self._conn.cursor()
# Build base query
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY {order_by} {direction}
LIMIT ? OFFSET ?;
"""
# Determine archived filter condition
archived_filter = "" if include_archived else "WHERE archived = 0"
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
# Execute query to fetch boards
cursor.execute(final_query, (limit, offset))
result = cast(list[sqlite3.Row], cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
# Determine count query
if include_archived:
count_query = """
SELECT COUNT(*)
FROM boards;
"""
else:
count_query = """
SELECT COUNT(*)
with self._db.transaction() as cursor:
# Build base query
base_query = """
SELECT *
FROM boards
WHERE archived = 0;
{archived_filter}
ORDER BY {order_by} {direction}
LIMIT ? OFFSET ?;
"""
# Execute count query
cursor.execute(count_query)
# Determine archived filter condition
archived_filter = "" if include_archived else "WHERE archived = 0"
count = cast(int, cursor.fetchone()[0])
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
# Execute query to fetch boards
cursor.execute(final_query, (limit, offset))
result = cast(list[sqlite3.Row], cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
# Determine count query
if include_archived:
count_query = """
SELECT COUNT(*)
FROM boards;
"""
else:
count_query = """
SELECT COUNT(*)
FROM boards
WHERE archived = 0;
"""
# Execute count query
cursor.execute(count_query)
count = cast(int, cursor.fetchone()[0])
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
def get_all(
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
) -> list[BoardRecord]:
cursor = self._conn.cursor()
if order_by == BoardRecordOrderBy.Name:
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY LOWER(board_name) {direction}
"""
else:
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY {order_by} {direction}
"""
with self._db.transaction() as cursor:
if order_by == BoardRecordOrderBy.Name:
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY LOWER(board_name) {direction}
"""
else:
base_query = """
SELECT *
FROM boards
{archived_filter}
ORDER BY {order_by} {direction}
"""
archived_filter = "" if include_archived else "WHERE archived = 0"
archived_filter = "" if include_archived else "WHERE archived = 0"
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
final_query = base_query.format(
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
)
cursor.execute(final_query)
cursor.execute(final_query)
result = cast(list[sqlite3.Row], cursor.fetchall())
result = cast(list[sqlite3.Row], cursor.fetchall())
boards = [deserialize_board_record(dict(r)) for r in result]
return boards

View File

@@ -0,0 +1,42 @@
from abc import ABC, abstractmethod
class ClientStatePersistenceABC(ABC):
"""
Base class for client persistence implementations.
This class defines the interface for persisting client data.
"""
@abstractmethod
def set_by_key(self, queue_id: str, key: str, value: str) -> str:
"""
Set a key-value pair for the client.
Args:
key (str): The key to set.
value (str): The value to set for the key.
Returns:
str: The value that was set.
"""
pass
@abstractmethod
def get_by_key(self, queue_id: str, key: str) -> str | None:
"""
Get the value for a specific key of the client.
Args:
key (str): The key to retrieve the value for.
Returns:
str | None: The value associated with the key, or None if the key does not exist.
"""
pass
@abstractmethod
def delete(self, queue_id: str) -> None:
"""
Delete all client state.
"""
pass

View File

@@ -0,0 +1,65 @@
import json
from invokeai.app.services.client_state_persistence.client_state_persistence_base import ClientStatePersistenceABC
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class ClientStatePersistenceSqlite(ClientStatePersistenceABC):
"""
Base class for client persistence implementations.
This class defines the interface for persisting client data.
"""
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._db = db
self._default_row_id = 1
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
def _get(self) -> dict[str, str] | None:
with self._db.transaction() as cursor:
cursor.execute(
f"""
SELECT data FROM client_state
WHERE id = {self._default_row_id}
"""
)
row = cursor.fetchone()
if row is None:
return None
return json.loads(row[0])
def set_by_key(self, queue_id: str, key: str, value: str) -> str:
state = self._get() or {}
state.update({key: value})
with self._db.transaction() as cursor:
cursor.execute(
f"""
INSERT INTO client_state (id, data)
VALUES ({self._default_row_id}, ?)
ON CONFLICT(id) DO UPDATE
SET data = excluded.data;
""",
(json.dumps(state),),
)
return value
def get_by_key(self, queue_id: str, key: str) -> str | None:
state = self._get()
if state is None:
return None
return state.get(key, None)
def delete(self, queue_id: str) -> None:
with self._db.transaction() as cursor:
cursor.execute(
f"""
DELETE FROM client_state
WHERE id = {self._default_row_id}
"""
)

View File

@@ -107,6 +107,7 @@ class InvokeAIAppConfig(BaseSettings):
hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
scan_models_on_startup: Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.
unsafe_disable_picklescan: UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.
"""
_root: Optional[Path] = PrivateAttr(default=None)
@@ -196,6 +197,7 @@ class InvokeAIAppConfig(BaseSettings):
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
scan_models_on_startup: bool = Field(default=False, description="Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes.")
unsafe_disable_picklescan: bool = Field(default=False, description="UNSAFE. Disable the picklescan security check during model installation. Recommended only for development and testing purposes. This will allow arbitrary code execution during model installation, so should never be used in production.")
# fmt: on

View File

@@ -8,6 +8,7 @@ import time
import traceback
from pathlib import Path
from queue import Empty, PriorityQueue
from shutil import disk_usage
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set
import requests
@@ -335,6 +336,14 @@ class DownloadQueueService(DownloadQueueServiceBase):
assert job.download_path
free_space = disk_usage(job.download_path.parent).free
GB = 2**30
self._logger.debug(f"Download is {job.total_bytes / GB:.2f} GB of {free_space / GB:.2f} GB free.")
if free_space < job.total_bytes:
raise RuntimeError(
f"Free disk space {free_space / GB:.2f} GB is not enough for download of {job.total_bytes / GB:.2f} GB."
)
# Don't clobber an existing file. See commit 82c2c85202f88c6d24ff84710f297cfc6ae174af
# for code that instead resumes an interrupted download.
if job.download_path.exists():

View File

@@ -24,22 +24,22 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteImageRecordStorage(ImageRecordStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def get(self, image_name: str) -> ImageRecord:
try:
cursor = self._conn.cursor()
cursor.execute(
f"""--sql
SELECT {IMAGE_DTO_COLS} FROM images
WHERE image_name = ?;
""",
(image_name,),
)
with self._db.transaction() as cursor:
try:
cursor.execute(
f"""--sql
SELECT {IMAGE_DTO_COLS} FROM images
WHERE image_name = ?;
""",
(image_name,),
)
result = cast(Optional[sqlite3.Row], cursor.fetchone())
except sqlite3.Error as e:
raise ImageRecordNotFoundException from e
result = cast(Optional[sqlite3.Row], cursor.fetchone())
except sqlite3.Error as e:
raise ImageRecordNotFoundException from e
if not result:
raise ImageRecordNotFoundException
@@ -47,17 +47,20 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
return deserialize_image_record(dict(result))
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT metadata FROM images
WHERE image_name = ?;
""",
(image_name,),
)
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
SELECT metadata FROM images
WHERE image_name = ?;
""",
(image_name,),
)
result = cast(Optional[sqlite3.Row], cursor.fetchone())
result = cast(Optional[sqlite3.Row], cursor.fetchone())
except sqlite3.Error as e:
raise ImageRecordNotFoundException from e
if not result:
raise ImageRecordNotFoundException
@@ -65,64 +68,60 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
as_dict = dict(result)
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None
except sqlite3.Error as e:
raise ImageRecordNotFoundException from e
def update(
self,
image_name: str,
changes: ImageRecordChanges,
) -> None:
try:
cursor = self._conn.cursor()
# Change the category of the image
if changes.image_category is not None:
cursor.execute(
"""--sql
UPDATE images
SET image_category = ?
WHERE image_name = ?;
""",
(changes.image_category, image_name),
)
with self._db.transaction() as cursor:
try:
# Change the category of the image
if changes.image_category is not None:
cursor.execute(
"""--sql
UPDATE images
SET image_category = ?
WHERE image_name = ?;
""",
(changes.image_category, image_name),
)
# Change the session associated with the image
if changes.session_id is not None:
cursor.execute(
"""--sql
UPDATE images
SET session_id = ?
WHERE image_name = ?;
""",
(changes.session_id, image_name),
)
# Change the session associated with the image
if changes.session_id is not None:
cursor.execute(
"""--sql
UPDATE images
SET session_id = ?
WHERE image_name = ?;
""",
(changes.session_id, image_name),
)
# Change the image's `is_intermediate`` flag
if changes.is_intermediate is not None:
cursor.execute(
"""--sql
UPDATE images
SET is_intermediate = ?
WHERE image_name = ?;
""",
(changes.is_intermediate, image_name),
)
# Change the image's `is_intermediate`` flag
if changes.is_intermediate is not None:
cursor.execute(
"""--sql
UPDATE images
SET is_intermediate = ?
WHERE image_name = ?;
""",
(changes.is_intermediate, image_name),
)
# Change the image's `starred`` state
if changes.starred is not None:
cursor.execute(
"""--sql
UPDATE images
SET starred = ?
WHERE image_name = ?;
""",
(changes.starred, image_name),
)
# Change the image's `starred`` state
if changes.starred is not None:
cursor.execute(
"""--sql
UPDATE images
SET starred = ?
WHERE image_name = ?;
""",
(changes.starred, image_name),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordSaveException from e
except sqlite3.Error as e:
raise ImageRecordSaveException from e
def get_many(
self,
@@ -136,170 +135,162 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> OffsetPaginatedResults[ImageRecord]:
cursor = self._conn.cursor()
# Manually build two queries - one for the count, one for the records
count_query = """--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
images_query = f"""--sql
SELECT {IMAGE_DTO_COLS}
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
with self._db.transaction() as cursor:
# Manually build two queries - one for the count, one for the records
count_query = """--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
# Unpack the included categories into the query params
for c in category_strings:
query_params.append(c)
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
images_query = f"""--sql
SELECT {IMAGE_DTO_COLS}
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
"""
query_params.append(is_intermediate)
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
# board_id of "none" is reserved for images without a board
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
# Search term condition
if search_term:
query_conditions += """--sql
AND (
images.metadata LIKE ?
OR images.created_at LIKE ?
)
"""
query_params.append(f"%{search_term.lower()}%")
query_params.append(f"%{search_term.lower()}%")
if categories is not None:
# Convert the enum values to unique list of strings
category_strings = [c.value for c in set(categories)]
# Create the correct length of placeholders
placeholders = ",".join("?" * len(category_strings))
if starred_first:
query_pagination = f"""--sql
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
else:
query_pagination = f"""--sql
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
# Final images query with pagination
images_query += query_conditions + query_pagination + ";"
# Add all the parameters
images_params = query_params.copy()
# Add the pagination parameters
images_params.extend([limit, offset])
# Unpack the included categories into the query params
for c in category_strings:
query_params.append(c)
# Build the list of images, deserializing each row
cursor.execute(images_query, images_params)
result = cast(list[sqlite3.Row], cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
"""
# Set up and execute the count query, without pagination
count_query += query_conditions + ";"
count_params = query_params.copy()
cursor.execute(count_query, count_params)
count = cast(int, cursor.fetchone()[0])
query_params.append(is_intermediate)
# board_id of "none" is reserved for images without a board
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
# Search term condition
if search_term:
query_conditions += """--sql
AND (
images.metadata LIKE ?
OR images.created_at LIKE ?
)
"""
query_params.append(f"%{search_term.lower()}%")
query_params.append(f"%{search_term.lower()}%")
if starred_first:
query_pagination = f"""--sql
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
else:
query_pagination = f"""--sql
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
"""
# Final images query with pagination
images_query += query_conditions + query_pagination + ";"
# Add all the parameters
images_params = query_params.copy()
# Add the pagination parameters
images_params.extend([limit, offset])
# Build the list of images, deserializing each row
cursor.execute(images_query, images_params)
result = cast(list[sqlite3.Row], cursor.fetchall())
images = [deserialize_image_record(dict(r)) for r in result]
# Set up and execute the count query, without pagination
count_query += query_conditions + ";"
count_params = query_params.copy()
cursor.execute(count_query, count_params)
count = cast(int, cursor.fetchone()[0])
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
def delete(self, image_name: str) -> None:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
DELETE FROM images
WHERE image_name = ?;
""",
(image_name,),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
DELETE FROM images
WHERE image_name = ?;
""",
(image_name,),
)
except sqlite3.Error as e:
raise ImageRecordDeleteException from e
def delete_many(self, image_names: list[str]) -> None:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
try:
placeholders = ",".join("?" for _ in image_names)
placeholders = ",".join("?" for _ in image_names)
# Construct the SQLite query with the placeholders
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
# Construct the SQLite query with the placeholders
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
# Execute the query with the list of IDs as parameters
cursor.execute(query, image_names)
# Execute the query with the list of IDs as parameters
cursor.execute(query, image_names)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
except sqlite3.Error as e:
raise ImageRecordDeleteException from e
def get_intermediates_count(self) -> int:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT COUNT(*) FROM images
WHERE is_intermediate = TRUE;
"""
)
count = cast(int, cursor.fetchone()[0])
self._conn.commit()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT COUNT(*) FROM images
WHERE is_intermediate = TRUE;
"""
)
count = cast(int, cursor.fetchone()[0])
return count
def delete_intermediates(self) -> list[str]:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT image_name FROM images
WHERE is_intermediate = TRUE;
"""
)
result = cast(list[sqlite3.Row], cursor.fetchall())
image_names = [r[0] for r in result]
cursor.execute(
"""--sql
DELETE FROM images
WHERE is_intermediate = TRUE;
"""
)
self._conn.commit()
return image_names
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordDeleteException from e
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
SELECT image_name FROM images
WHERE is_intermediate = TRUE;
"""
)
result = cast(list[sqlite3.Row], cursor.fetchall())
image_names = [r[0] for r in result]
cursor.execute(
"""--sql
DELETE FROM images
WHERE is_intermediate = TRUE;
"""
)
except sqlite3.Error as e:
raise ImageRecordDeleteException from e
return image_names
def save(
self,
@@ -315,73 +306,71 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
node_id: Optional[str] = None,
metadata: Optional[str] = None,
) -> datetime:
try:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
INSERT OR IGNORE INTO images (
image_name,
image_origin,
image_category,
width,
height,
node_id,
session_id,
metadata,
is_intermediate,
starred,
has_workflow
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""",
(
image_name,
image_origin.value,
image_category.value,
width,
height,
node_id,
session_id,
metadata,
is_intermediate,
starred,
has_workflow,
),
)
self._conn.commit()
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
INSERT OR IGNORE INTO images (
image_name,
image_origin,
image_category,
width,
height,
node_id,
session_id,
metadata,
is_intermediate,
starred,
has_workflow
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""",
(
image_name,
image_origin.value,
image_category.value,
width,
height,
node_id,
session_id,
metadata,
is_intermediate,
starred,
has_workflow,
),
)
cursor.execute(
"""--sql
SELECT created_at
FROM images
WHERE image_name = ?;
""",
(image_name,),
)
cursor.execute(
"""--sql
SELECT created_at
FROM images
WHERE image_name = ?;
""",
(image_name,),
)
created_at = datetime.fromisoformat(cursor.fetchone()[0])
created_at = datetime.fromisoformat(cursor.fetchone()[0])
return created_at
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordSaveException from e
except sqlite3.Error as e:
raise ImageRecordSaveException from e
return created_at
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT images.*
FROM images
JOIN board_images ON images.image_name = board_images.image_name
WHERE board_images.board_id = ?
AND images.is_intermediate = FALSE
ORDER BY images.starred DESC, images.created_at DESC
LIMIT 1;
""",
(board_id,),
)
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT images.*
FROM images
JOIN board_images ON images.image_name = board_images.image_name
WHERE board_images.board_id = ?
AND images.is_intermediate = FALSE
ORDER BY images.starred DESC, images.created_at DESC
LIMIT 1;
""",
(board_id,),
)
result = cast(Optional[sqlite3.Row], cursor.fetchone())
result = cast(Optional[sqlite3.Row], cursor.fetchone())
if result is None:
return None
@@ -398,85 +387,84 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
board_id: Optional[str] = None,
search_term: Optional[str] = None,
) -> ImageNamesResult:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
# Build query conditions (reused for both starred count and image names queries)
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
# Build query conditions (reused for both starred count and image names queries)
query_conditions = ""
query_params: list[Union[int, str, bool]] = []
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
if image_origin is not None:
query_conditions += """--sql
AND images.image_origin = ?
"""
query_params.append(image_origin.value)
if categories is not None:
category_strings = [c.value for c in set(categories)]
placeholders = ",".join("?" * len(category_strings))
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
for c in category_strings:
query_params.append(c)
if categories is not None:
category_strings = [c.value for c in set(categories)]
placeholders = ",".join("?" * len(category_strings))
query_conditions += f"""--sql
AND images.image_category IN ( {placeholders} )
"""
for c in category_strings:
query_params.append(c)
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
"""
query_params.append(is_intermediate)
if is_intermediate is not None:
query_conditions += """--sql
AND images.is_intermediate = ?
"""
query_params.append(is_intermediate)
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
if board_id == "none":
query_conditions += """--sql
AND board_images.board_id IS NULL
"""
elif board_id is not None:
query_conditions += """--sql
AND board_images.board_id = ?
"""
query_params.append(board_id)
if search_term:
query_conditions += """--sql
AND (
images.metadata LIKE ?
OR images.created_at LIKE ?
)
"""
query_params.append(f"%{search_term.lower()}%")
query_params.append(f"%{search_term.lower()}%")
if search_term:
query_conditions += """--sql
AND (
images.metadata LIKE ?
OR images.created_at LIKE ?
)
"""
query_params.append(f"%{search_term.lower()}%")
query_params.append(f"%{search_term.lower()}%")
# Get starred count if starred_first is enabled
starred_count = 0
if starred_first:
starred_count_query = f"""--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE images.starred = TRUE AND (1=1{query_conditions})
"""
cursor.execute(starred_count_query, query_params)
starred_count = cast(int, cursor.fetchone()[0])
# Get starred count if starred_first is enabled
starred_count = 0
if starred_first:
starred_count_query = f"""--sql
SELECT COUNT(*)
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE images.starred = TRUE AND (1=1{query_conditions})
"""
cursor.execute(starred_count_query, query_params)
starred_count = cast(int, cursor.fetchone()[0])
# Get all image names with proper ordering
if starred_first:
names_query = f"""--sql
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1{query_conditions}
ORDER BY images.starred DESC, images.created_at {order_dir.value}
"""
else:
names_query = f"""--sql
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1{query_conditions}
ORDER BY images.created_at {order_dir.value}
"""
# Get all image names with proper ordering
if starred_first:
names_query = f"""--sql
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1{query_conditions}
ORDER BY images.starred DESC, images.created_at {order_dir.value}
"""
else:
names_query = f"""--sql
SELECT images.image_name
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1{query_conditions}
ORDER BY images.created_at {order_dir.value}
"""
cursor.execute(names_query, query_params)
result = cast(list[sqlite3.Row], cursor.fetchall())
cursor.execute(names_query, query_params)
result = cast(list[sqlite3.Row], cursor.fetchall())
image_names = [row[0] for row in result]
return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names))

View File

@@ -17,6 +17,7 @@ if TYPE_CHECKING:
from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase
from invokeai.app.services.boards.boards_base import BoardServiceABC
from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase
from invokeai.app.services.client_state_persistence.client_state_persistence_base import ClientStatePersistenceABC
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.download import DownloadQueueServiceBase
from invokeai.app.services.events.events_base import EventServiceBase
@@ -73,6 +74,7 @@ class InvocationServices:
style_preset_records: "StylePresetRecordsStorageBase",
style_preset_image_files: "StylePresetImageFileStorageBase",
workflow_thumbnails: "WorkflowThumbnailServiceBase",
client_state_persistence: "ClientStatePersistenceABC",
):
self.board_images = board_images
self.board_image_records = board_image_records
@@ -102,3 +104,4 @@ class InvocationServices:
self.style_preset_records = style_preset_records
self.style_preset_image_files = style_preset_image_files
self.workflow_thumbnails = workflow_thumbnails
self.client_state_persistence = client_state_persistence

View File

@@ -7,7 +7,7 @@ import threading
import time
from pathlib import Path
from queue import Empty, Queue
from shutil import copyfile, copytree, move, rmtree
from shutil import move, rmtree
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
@@ -51,6 +51,7 @@ from invokeai.backend.model_manager.metadata import (
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
from invokeai.backend.model_manager.search import ModelSearch
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
from invokeai.backend.model_manager.util.lora_metadata_extractor import apply_lora_metadata
from invokeai.backend.util import InvokeAILogger
from invokeai.backend.util.catch_sigint import catch_sigint
from invokeai.backend.util.devices import TorchDevice
@@ -185,13 +186,14 @@ class ModelInstallService(ModelInstallServiceBase):
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
if preferred_name := config.name:
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
preferred_name = f"{preferred_name}{model_path.suffix}"
dest_path = (
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
)
try:
new_path = self._copy_model(model_path, dest_path)
new_path = self._move_model(model_path, dest_path)
except FileExistsError as excp:
raise DuplicateModelException(
f"A model named {model_path.name} is already installed at {dest_path.as_posix()}"
@@ -616,16 +618,6 @@ class ModelInstallService(ModelInstallServiceBase):
self.record_store.update_model(key, ModelRecordChanges(path=model.path))
return model
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
if old_path == new_path:
return old_path
new_path.parent.mkdir(parents=True, exist_ok=True)
if old_path.is_dir():
copytree(old_path, new_path)
else:
copyfile(old_path, new_path)
return new_path
def _move_model(self, old_path: Path, new_path: Path) -> Path:
if old_path == new_path:
return old_path
@@ -667,6 +659,10 @@ class ModelInstallService(ModelInstallServiceBase):
info = info or self._probe(model_path, config)
# Apply LoRA metadata if applicable
model_images_path = self.app_config.models_path / "model_images"
apply_lora_metadata(info, model_path.resolve(), model_images_path)
model_path = model_path.resolve()
# Models in the Invoke-managed models dir should use relative paths.

View File

@@ -87,9 +87,21 @@ class ModelLoadService(ModelLoadServiceBase):
def torch_load_file(checkpoint: Path) -> AnyModel:
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
if self._app_config.unsafe_disable_picklescan:
self._logger.warning(
f"Model at {checkpoint} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise Exception(f"The model at {checkpoint} is potentially infected by malware. Aborting load.")
if scan_result.scan_err:
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
if self._app_config.unsafe_disable_picklescan:
self._logger.warning(
f"Error scanning model at {checkpoint} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise Exception(f"Error scanning model at {checkpoint} for malware. Aborting load.")
result = torch_load(checkpoint, map_location="cpu")
return result

View File

@@ -78,11 +78,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
self._db = db
self._logger = logger
@property
def db(self) -> SqliteDatabase:
"""Return the underlying database."""
return self._db
def add_model(self, config: AnyModelConfig) -> AnyModelConfig:
"""
Add a model to the database.
@@ -93,38 +88,33 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
"""
try:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
INSERT INTO models (
id,
config
)
VALUES (?,?);
""",
(
config.key,
config.model_dump_json(),
),
)
self._db.conn.commit()
with self._db.transaction() as cursor:
try:
cursor.execute(
"""--sql
INSERT INTO models (
id,
config
)
VALUES (?,?);
""",
(
config.key,
config.model_dump_json(),
),
)
except sqlite3.IntegrityError as e:
self._db.conn.rollback()
if "UNIQUE constraint failed" in str(e):
if "models.path" in str(e):
msg = f"A model with path '{config.path}' is already installed"
elif "models.name" in str(e):
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
except sqlite3.IntegrityError as e:
if "UNIQUE constraint failed" in str(e):
if "models.path" in str(e):
msg = f"A model with path '{config.path}' is already installed"
elif "models.name" in str(e):
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
else:
msg = f"A model with key '{config.key}' is already installed"
raise DuplicateModelException(msg) from e
else:
msg = f"A model with key '{config.key}' is already installed"
raise DuplicateModelException(msg) from e
else:
raise e
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
raise e
return self.get_model(config.key)
@@ -136,8 +126,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
Can raise an UnknownModelException
"""
try:
cursor = self._db.conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
DELETE FROM models
@@ -147,22 +136,17 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
)
if cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig:
record = self.get_model(key)
with self._db.transaction() as cursor:
record = self.get_model(key)
# Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic.
for field_name in changes.model_fields_set:
setattr(record, field_name, getattr(changes, field_name))
# Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic.
for field_name in changes.model_fields_set:
setattr(record, field_name, getattr(changes, field_name))
json_serialized = record.model_dump_json()
json_serialized = record.model_dump_json()
try:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
UPDATE models
@@ -174,10 +158,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
)
if cursor.rowcount == 0:
raise UnknownModelException("model not found")
self._db.conn.commit()
except sqlite3.Error as e:
self._db.conn.rollback()
raise e
return self.get_model(key)
@@ -189,30 +169,30 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
Exceptions: UnknownModelException
"""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE id=?;
""",
(key,),
)
rows = cursor.fetchone()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE id=?;
""",
(key,),
)
rows = cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
return model
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
rows = cursor.fetchone()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
rows = cursor.fetchone()
if not rows:
raise UnknownModelException("model not found")
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
@@ -224,15 +204,15 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
:param key: Unique key for the model to be deleted
"""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
select count(*) FROM models
WHERE id=?;
""",
(key,),
)
count = cursor.fetchone()[0]
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
select count(*) FROM models
WHERE id=?;
""",
(key,),
)
count = cursor.fetchone()[0]
return count > 0
def search_by_attr(
@@ -255,43 +235,42 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
If none of the optional filters are passed, will return all
models in the database.
"""
with self._db.transaction() as cursor:
assert isinstance(order_by, ModelRecordOrderBy)
ordering = {
ModelRecordOrderBy.Default: "type, base, name, format",
ModelRecordOrderBy.Type: "type",
ModelRecordOrderBy.Base: "base",
ModelRecordOrderBy.Name: "name",
ModelRecordOrderBy.Format: "format",
}
assert isinstance(order_by, ModelRecordOrderBy)
ordering = {
ModelRecordOrderBy.Default: "type, base, name, format",
ModelRecordOrderBy.Type: "type",
ModelRecordOrderBy.Base: "base",
ModelRecordOrderBy.Name: "name",
ModelRecordOrderBy.Format: "format",
}
where_clause: list[str] = []
bindings: list[str] = []
if model_name:
where_clause.append("name=?")
bindings.append(model_name)
if base_model:
where_clause.append("base=?")
bindings.append(base_model)
if model_type:
where_clause.append("type=?")
bindings.append(model_type)
if model_format:
where_clause.append("format=?")
bindings.append(model_format)
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
where_clause: list[str] = []
bindings: list[str] = []
if model_name:
where_clause.append("name=?")
bindings.append(model_name)
if base_model:
where_clause.append("base=?")
bindings.append(base_model)
if model_type:
where_clause.append("type=?")
bindings.append(model_type)
if model_format:
where_clause.append("format=?")
bindings.append(model_format)
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
cursor = self._db.conn.cursor()
cursor.execute(
f"""--sql
SELECT config, strftime('%s',updated_at)
FROM models
{where}
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
""",
tuple(bindings),
)
result = cursor.fetchall()
cursor.execute(
f"""--sql
SELECT config, strftime('%s',updated_at)
FROM models
{where}
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
""",
tuple(bindings),
)
result = cursor.fetchall()
# Parse the model configs.
results: list[AnyModelConfig] = []
@@ -313,69 +292,68 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
"""Return models with the indicated path."""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE path=?;
""",
(str(path),),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE path=?;
""",
(str(path),),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
return results
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
"""Return models with the indicated hash."""
cursor = self._db.conn.cursor()
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT config, strftime('%s',updated_at) FROM models
WHERE hash=?;
""",
(hash,),
)
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
return results
def list_models(
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
) -> PaginatedResults[ModelSummary]:
"""Return a paginated summary listing of each model in the database."""
assert isinstance(order_by, ModelRecordOrderBy)
ordering = {
ModelRecordOrderBy.Default: "type, base, name, format",
ModelRecordOrderBy.Type: "type",
ModelRecordOrderBy.Base: "base",
ModelRecordOrderBy.Name: "name",
ModelRecordOrderBy.Format: "format",
}
with self._db.transaction() as cursor:
assert isinstance(order_by, ModelRecordOrderBy)
ordering = {
ModelRecordOrderBy.Default: "type, base, name, format",
ModelRecordOrderBy.Type: "type",
ModelRecordOrderBy.Base: "base",
ModelRecordOrderBy.Name: "name",
ModelRecordOrderBy.Format: "format",
}
cursor = self._db.conn.cursor()
# Lock so that the database isn't updated while we're doing the two queries.
# query1: get the total number of model configs
cursor.execute(
"""--sql
select count(*) from models;
""",
(),
)
total = int(cursor.fetchone()[0])
# Lock so that the database isn't updated while we're doing the two queries.
# query1: get the total number of model configs
cursor.execute(
"""--sql
select count(*) from models;
""",
(),
)
total = int(cursor.fetchone()[0])
# query2: fetch key fields
cursor.execute(
f"""--sql
SELECT config
FROM models
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
LIMIT ?
OFFSET ?;
""",
(
per_page,
page * per_page,
),
)
rows = cursor.fetchall()
# query2: fetch key fields
cursor.execute(
f"""--sql
SELECT config
FROM models
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
LIMIT ?
OFFSET ?;
""",
(
per_page,
page * per_page,
),
)
rows = cursor.fetchall()
items = [ModelSummary.model_validate(dict(x)) for x in rows]
return PaginatedResults(page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items)

View File

@@ -1,5 +1,3 @@
import sqlite3
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
ModelRelationshipRecordStorageBase,
)
@@ -9,58 +7,49 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
class SqliteModelRelationshipRecordStorage(ModelRelationshipRecordStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
if model_key_1 == model_key_2:
raise ValueError("Cannot relate a model to itself.")
a, b = sorted([model_key_1, model_key_2])
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
if model_key_1 == model_key_2:
raise ValueError("Cannot relate a model to itself.")
a, b = sorted([model_key_1, model_key_2])
cursor.execute(
"INSERT OR IGNORE INTO model_relationships (model_key_1, model_key_2) VALUES (?, ?)",
(a, b),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise e
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
a, b = sorted([model_key_1, model_key_2])
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
a, b = sorted([model_key_1, model_key_2])
cursor.execute(
"DELETE FROM model_relationships WHERE model_key_1 = ? AND model_key_2 = ?",
(a, b),
)
self._conn.commit()
except sqlite3.Error as e:
self._conn.rollback()
raise e
def get_related_model_keys(self, model_key: str) -> list[str]:
cursor = self._conn.cursor()
cursor.execute(
"""
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
UNION
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
""",
(model_key, model_key),
)
return [row[0] for row in cursor.fetchall()]
with self._db.transaction() as cursor:
cursor.execute(
"""
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
UNION
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
""",
(model_key, model_key),
)
result = [row[0] for row in cursor.fetchall()]
return result
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
cursor = self._conn.cursor()
key_list = ",".join("?" for _ in model_keys)
cursor.execute(
f"""
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
UNION
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
""",
model_keys + model_keys,
)
return [row[0] for row in cursor.fetchall()]
with self._db.transaction() as cursor:
key_list = ",".join("?" for _ in model_keys)
cursor.execute(
f"""
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
UNION
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
""",
model_keys + model_keys,
)
result = [row[0] for row in cursor.fetchall()]
return result

View File

@@ -332,6 +332,7 @@ class EnqueueBatchResult(BaseModel):
requested: int = Field(description="The total number of queue items requested to be enqueued")
batch: Batch = Field(description="The batch that was enqueued")
priority: int = Field(description="The priority of the enqueued batch")
item_ids: list[int] = Field(description="The IDs of the queue items that were enqueued")
class RetryItemsResult(BaseModel):

View File

@@ -50,15 +50,14 @@ class SqliteSessionQueue(SessionQueueBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def _set_in_progress_to_canceled(self) -> None:
"""
Sets all in_progress queue items to canceled. Run on app startup, not associated with any queue.
This is necessary because the invoker may have been killed while processing a queue item.
"""
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
UPDATE session_queue
@@ -66,99 +65,104 @@ class SqliteSessionQueue(SessionQueueBase):
WHERE status = 'in_progress';
"""
)
except Exception:
self._conn.rollback()
raise
def _get_current_queue_size(self, queue_id: str) -> int:
"""Gets the current number of pending queue items"""
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
""",
(queue_id,),
)
return cast(int, cursor.fetchone()[0])
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
""",
(queue_id,),
)
count = cast(int, cursor.fetchone()[0])
return count
def _get_highest_priority(self, queue_id: str) -> int:
"""Gets the highest priority value in the queue"""
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT MAX(priority)
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
""",
(queue_id,),
)
return cast(Union[int, None], cursor.fetchone()[0]) or 0
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT MAX(priority)
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
""",
(queue_id,),
)
priority = cast(Union[int, None], cursor.fetchone()[0]) or 0
return priority
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
try:
# TODO: how does this work in a multi-user scenario?
current_queue_size = self._get_current_queue_size(queue_id)
max_queue_size = self.__invoker.services.configuration.max_queue_size
max_new_queue_items = max_queue_size - current_queue_size
current_queue_size = self._get_current_queue_size(queue_id)
max_queue_size = self.__invoker.services.configuration.max_queue_size
max_new_queue_items = max_queue_size - current_queue_size
priority = 0
if prepend:
priority = self._get_highest_priority(queue_id) + 1
priority = 0
if prepend:
priority = self._get_highest_priority(queue_id) + 1
requested_count = await asyncio.to_thread(
calc_session_count,
batch=batch,
)
values_to_insert = await asyncio.to_thread(
prepare_values_to_insert,
queue_id=queue_id,
batch=batch,
priority=priority,
max_new_queue_items=max_new_queue_items,
)
enqueued_count = len(values_to_insert)
requested_count = await asyncio.to_thread(
calc_session_count,
batch=batch,
)
values_to_insert = await asyncio.to_thread(
prepare_values_to_insert,
queue_id=queue_id,
batch=batch,
priority=priority,
max_new_queue_items=max_new_queue_items,
)
enqueued_count = len(values_to_insert)
with self._conn:
cursor = self._conn.cursor()
cursor.executemany(
"""--sql
with self._db.transaction() as cursor:
cursor.executemany(
"""--sql
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
values_to_insert,
)
except Exception:
raise
values_to_insert,
)
cursor.execute(
"""--sql
SELECT item_id
FROM session_queue
WHERE batch_id = ?
ORDER BY item_id DESC;
""",
(batch.batch_id,),
)
item_ids = [row[0] for row in cursor.fetchall()]
enqueue_result = EnqueueBatchResult(
queue_id=queue_id,
requested=requested_count,
enqueued=enqueued_count,
batch=batch,
priority=priority,
item_ids=item_ids,
)
self.__invoker.services.events.emit_batch_enqueued(enqueue_result)
return enqueue_result
def dequeue(self) -> Optional[SessionQueueItem]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE status = 'pending'
ORDER BY
priority DESC,
item_id ASC
LIMIT 1
"""
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE status = 'pending'
ORDER BY
priority DESC,
item_id ASC
LIMIT 1
"""
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
if result is None:
return None
queue_item = SessionQueueItem.queue_item_from_dict(dict(result))
@@ -166,40 +170,40 @@ class SqliteSessionQueue(SessionQueueBase):
return queue_item
def get_next(self, queue_id: str) -> Optional[SessionQueueItem]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
ORDER BY
priority DESC,
created_at ASC
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'pending'
ORDER BY
priority DESC,
created_at ASC
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
if result is None:
return None
return SessionQueueItem.queue_item_from_dict(dict(result))
def get_current(self, queue_id: str) -> Optional[SessionQueueItem]:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'in_progress'
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT *
FROM session_queue
WHERE
queue_id = ?
AND status = 'in_progress'
LIMIT 1
""",
(queue_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
if result is None:
return None
return SessionQueueItem.queue_item_from_dict(dict(result))
@@ -212,8 +216,7 @@ class SqliteSessionQueue(SessionQueueBase):
error_message: Optional[str] = None,
error_traceback: Optional[str] = None,
) -> SessionQueueItem:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT status FROM session_queue WHERE item_id = ?
@@ -221,12 +224,15 @@ class SqliteSessionQueue(SessionQueueBase):
(item_id,),
)
row = cursor.fetchone()
if row is None:
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
current_status = row[0]
# Only update if not already finished (completed, failed or canceled)
if current_status in ("completed", "failed", "canceled"):
return self.get_queue_item(item_id)
if row is None:
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
current_status = row[0]
# Only update if not already finished (completed, failed or canceled)
if current_status in ("completed", "failed", "canceled"):
return self.get_queue_item(item_id)
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
UPDATE session_queue
@@ -235,10 +241,7 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(status, error_type, error_message, error_traceback, item_id),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
queue_item = self.get_queue_item(item_id)
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
@@ -246,35 +249,34 @@ class SqliteSessionQueue(SessionQueueBase):
return queue_item
def is_empty(self, queue_id: str) -> IsEmptyResult:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
is_empty = cast(int, cursor.fetchone()[0]) == 0
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
is_empty = cast(int, cursor.fetchone()[0]) == 0
return IsEmptyResult(is_empty=is_empty)
def is_full(self, queue_id: str) -> IsFullResult:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
max_queue_size = self.__invoker.services.configuration.max_queue_size
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT count(*)
FROM session_queue
WHERE queue_id = ?
""",
(queue_id,),
)
max_queue_size = self.__invoker.services.configuration.max_queue_size
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
return IsFullResult(is_full=is_full)
def clear(self, queue_id: str) -> ClearResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT COUNT(*)
@@ -292,24 +294,19 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
self.__invoker.services.events.emit_queue_cleared(queue_id)
return ClearResult(deleted=count)
def prune(self, queue_id: str) -> PruneResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
where = """--sql
WHERE
queue_id = ?
AND (
queue_id = ?
AND (
status = 'completed'
OR status = 'failed'
OR status = 'canceled'
)
)
"""
cursor.execute(
f"""--sql
@@ -328,10 +325,6 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return PruneResult(deleted=count)
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
@@ -344,8 +337,7 @@ class SqliteSessionQueue(SessionQueueBase):
self.cancel_queue_item(item_id)
except SessionQueueItemNotFoundError:
pass
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
DELETE
@@ -354,10 +346,6 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(item_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
def complete_queue_item(self, item_id: int) -> SessionQueueItem:
queue_item = self._set_queue_item_status(item_id=item_id, status="completed")
@@ -380,8 +368,7 @@ class SqliteSessionQueue(SessionQueueBase):
return queue_item
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
current_queue_item = self.get_current(queue_id)
placeholders = ", ".join(["?" for _ in batch_ids])
where = f"""--sql
@@ -391,6 +378,8 @@ class SqliteSessionQueue(SessionQueueBase):
AND status != 'canceled'
AND status != 'completed'
AND status != 'failed'
-- We will cancel the current item separately below - skip it here
AND status != 'in_progress'
"""
params = [queue_id] + batch_ids
cursor.execute(
@@ -410,17 +399,14 @@ class SqliteSessionQueue(SessionQueueBase):
""",
tuple(params),
)
self._conn.commit()
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
except Exception:
self._conn.rollback()
raise
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
return CancelByBatchIDsResult(canceled=count)
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
current_queue_item = self.get_current(queue_id)
where = """--sql
WHERE
@@ -429,6 +415,8 @@ class SqliteSessionQueue(SessionQueueBase):
AND status != 'canceled'
AND status != 'completed'
AND status != 'failed'
-- We will cancel the current item separately below - skip it here
AND status != 'in_progress'
"""
params = (queue_id, destination)
cursor.execute(
@@ -448,17 +436,12 @@ class SqliteSessionQueue(SessionQueueBase):
""",
params,
)
self._conn.commit()
if current_queue_item is not None and current_queue_item.destination == destination:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
except Exception:
self._conn.rollback()
raise
if current_queue_item is not None and current_queue_item.destination == destination:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
return CancelByDestinationResult(canceled=count)
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
current_queue_item = self.get_current(queue_id)
if current_queue_item is not None and current_queue_item.destination == destination:
self.cancel_queue_item(current_queue_item.item_id)
@@ -484,15 +467,10 @@ class SqliteSessionQueue(SessionQueueBase):
""",
params,
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return DeleteByDestinationResult(deleted=count)
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
where = """--sql
WHERE
queue_id == ?
@@ -515,15 +493,10 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return DeleteAllExceptCurrentResult(deleted=count)
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
current_queue_item = self.get_current(queue_id)
where = """--sql
WHERE
@@ -531,6 +504,8 @@ class SqliteSessionQueue(SessionQueueBase):
AND status != 'canceled'
AND status != 'completed'
AND status != 'failed'
-- We will cancel the current item separately below - skip it here
AND status != 'in_progress'
"""
params = [queue_id]
cursor.execute(
@@ -550,21 +525,13 @@ class SqliteSessionQueue(SessionQueueBase):
""",
tuple(params),
)
self._conn.commit()
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
queue_status = self.get_queue_status(queue_id=queue_id)
self.__invoker.services.events.emit_queue_item_status_changed(
current_queue_item, batch_status, queue_status
)
except Exception:
self._conn.rollback()
raise
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
self._set_queue_item_status(current_queue_item.item_id, "canceled")
return CancelByQueueIDResult(canceled=count)
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
where = """--sql
WHERE
queue_id == ?
@@ -587,30 +554,25 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(queue_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return CancelAllExceptCurrentResult(canceled=count)
def get_queue_item(self, item_id: int) -> SessionQueueItem:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT * FROM session_queue
WHERE
item_id = ?
""",
(item_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT * FROM session_queue
WHERE
item_id = ?
""",
(item_id,),
)
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
if result is None:
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
return SessionQueueItem.queue_item_from_dict(dict(result))
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
# during execution.
@@ -623,10 +585,6 @@ class SqliteSessionQueue(SessionQueueBase):
""",
(session_json, item_id),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return self.get_queue_item(item_id)
def list_queue_items(
@@ -638,42 +596,42 @@ class SqliteSessionQueue(SessionQueueBase):
status: Optional[QUEUE_ITEM_STATUS] = None,
destination: Optional[str] = None,
) -> CursorPaginatedResults[SessionQueueItem]:
cursor_ = self._conn.cursor()
item_id = cursor
query = """--sql
SELECT *
FROM session_queue
WHERE queue_id = ?
"""
params: list[Union[str, int]] = [queue_id]
if status is not None:
query += """--sql
AND status = ?
"""
params.append(status)
if destination is not None:
query += """---sql
AND destination = ?
with self._db.transaction() as cursor_:
item_id = cursor
query = """--sql
SELECT *
FROM session_queue
WHERE queue_id = ?
"""
params.append(destination)
params: list[Union[str, int]] = [queue_id]
if item_id is not None:
query += """--sql
AND (priority < ?) OR (priority = ? AND item_id > ?)
if status is not None:
query += """--sql
AND status = ?
"""
params.append(status)
if destination is not None:
query += """---sql
AND destination = ?
"""
params.extend([priority, priority, item_id])
params.append(destination)
query += """--sql
ORDER BY
priority DESC,
item_id ASC
LIMIT ?
"""
params.append(limit + 1)
cursor_.execute(query, params)
results = cast(list[sqlite3.Row], cursor_.fetchall())
if item_id is not None:
query += """--sql
AND (priority < ?) OR (priority = ? AND item_id > ?)
"""
params.extend([priority, priority, item_id])
query += """--sql
ORDER BY
priority DESC,
item_id ASC
LIMIT ?
"""
params.append(limit + 1)
cursor_.execute(query, params)
results = cast(list[sqlite3.Row], cursor_.fetchall())
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
has_more = False
if len(items) > limit:
@@ -688,46 +646,46 @@ class SqliteSessionQueue(SessionQueueBase):
destination: Optional[str] = None,
) -> list[SessionQueueItem]:
"""Gets all queue items that match the given parameters"""
cursor_ = self._conn.cursor()
query = """--sql
SELECT *
FROM session_queue
WHERE queue_id = ?
"""
params: list[Union[str, int]] = [queue_id]
if destination is not None:
query += """---sql
AND destination = ?
with self._db.transaction() as cursor:
query = """--sql
SELECT *
FROM session_queue
WHERE queue_id = ?
"""
params.append(destination)
params: list[Union[str, int]] = [queue_id]
query += """--sql
ORDER BY
priority DESC,
item_id ASC
;
"""
cursor_.execute(query, params)
results = cast(list[sqlite3.Row], cursor_.fetchall())
if destination is not None:
query += """---sql
AND destination = ?
"""
params.append(destination)
query += """--sql
ORDER BY
priority DESC,
item_id ASC
;
"""
cursor.execute(query, params)
results = cast(list[sqlite3.Row], cursor.fetchall())
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
return items
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
GROUP BY status
""",
(queue_id,),
)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
GROUP BY status
""",
(queue_id,),
)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
current_item = self.get_current(queue_id=queue_id)
total = sum(row[1] for row in counts_result)
total = sum(row[1] or 0 for row in counts_result)
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
return SessionQueueStatus(
queue_id=queue_id,
@@ -743,20 +701,20 @@ class SqliteSessionQueue(SessionQueueBase):
)
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status, count(*), origin, destination
FROM session_queue
WHERE
queue_id = ?
AND batch_id = ?
GROUP BY status
""",
(queue_id, batch_id),
)
result = cast(list[sqlite3.Row], cursor.fetchall())
total = sum(row[1] for row in result)
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT status, count(*), origin, destination
FROM session_queue
WHERE
queue_id = ?
AND batch_id = ?
GROUP BY status
""",
(queue_id, batch_id),
)
result = cast(list[sqlite3.Row], cursor.fetchall())
total = sum(row[1] or 0 for row in result)
counts: dict[str, int] = {row[0]: row[1] for row in result}
origin = result[0]["origin"] if result else None
destination = result[0]["destination"] if result else None
@@ -775,20 +733,20 @@ class SqliteSessionQueue(SessionQueueBase):
)
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
AND destination = ?
GROUP BY status
""",
(queue_id, destination),
)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT status, count(*)
FROM session_queue
WHERE queue_id = ?
AND destination = ?
GROUP BY status
""",
(queue_id, destination),
)
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
total = sum(row[1] for row in counts_result)
total = sum(row[1] or 0 for row in counts_result)
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
return SessionQueueCountsByDestination(
@@ -804,8 +762,7 @@ class SqliteSessionQueue(SessionQueueBase):
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
"""Retries the given queue items"""
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
values_to_insert: list[ValueToInsertTuple] = []
retried_item_ids: list[int] = []
@@ -856,10 +813,6 @@ class SqliteSessionQueue(SessionQueueBase):
values_to_insert,
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
retry_result = RetryItemsResult(
queue_id=queue_id,
retried_item_ids=retried_item_ids,

View File

@@ -2,7 +2,7 @@
import copy
import itertools
from typing import Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
from typing import Any, Optional, TypeVar, Union, get_args, get_origin
import networkx as nx
from pydantic import (
@@ -58,17 +58,32 @@ class Edge(BaseModel):
def get_output_field_type(node: BaseInvocation, field: str) -> Any:
node_type = type(node)
node_outputs = get_type_hints(node_type.get_output_annotation())
node_output_field = node_outputs.get(field) or None
return node_output_field
# TODO(psyche): This is awkward - if field_info is None, it means the field is not defined in the output, which
# really should raise. The consumers of this utility expect it to never raise, and return None instead. Fixing this
# would require some fairly significant changes and I don't want risk breaking anything.
try:
invocation_class = type(node)
invocation_output_class = invocation_class.get_output_annotation()
field_info = invocation_output_class.model_fields.get(field)
assert field_info is not None, f"Output field '{field}' not found in {invocation_output_class.get_type()}"
output_field_type = field_info.annotation
return output_field_type
except Exception:
return None
def get_input_field_type(node: BaseInvocation, field: str) -> Any:
node_type = type(node)
node_inputs = get_type_hints(node_type)
node_input_field = node_inputs.get(field) or None
return node_input_field
# TODO(psyche): This is awkward - if field_info is None, it means the field is not defined in the output, which
# really should raise. The consumers of this utility expect it to never raise, and return None instead. Fixing this
# would require some fairly significant changes and I don't want risk breaking anything.
try:
invocation_class = type(node)
field_info = invocation_class.model_fields.get(field)
assert field_info is not None, f"Input field '{field}' not found in {invocation_class.get_type()}"
input_field_type = field_info.annotation
return input_field_type
except Exception:
return None
def is_union_subtype(t1, t2):
@@ -992,10 +1007,11 @@ class GraphExecutionState(BaseModel):
new_node_ids = []
if isinstance(next_node, CollectInvocation):
# Collapse all iterator input mappings and create a single execution node for the collect invocation
all_iteration_mappings = list(
itertools.chain(*(((s, p) for p in self.source_prepared_mapping[s]) for s in next_node_parents))
)
# all_iteration_mappings = list(set(itertools.chain(*prepared_parent_mappings)))
all_iteration_mappings = []
for source_node_id in next_node_parents:
prepared_nodes = self.source_prepared_mapping[source_node_id]
all_iteration_mappings.extend([(source_node_id, p) for p in prepared_nodes])
create_results = self._create_execution_node(next_node_id, all_iteration_mappings)
if create_results is not None:
new_node_ids.extend(create_results)

View File

@@ -1,4 +1,7 @@
import sqlite3
import threading
from collections.abc import Generator
from contextlib import contextmanager
from logging import Logger
from pathlib import Path
@@ -26,46 +29,65 @@ class SqliteDatabase:
def __init__(self, db_path: Path | None, logger: Logger, verbose: bool = False) -> None:
"""Initializes the database. This is used internally by the class constructor."""
self.logger = logger
self.db_path = db_path
self.verbose = verbose
self._logger = logger
self._db_path = db_path
self._verbose = verbose
self._lock = threading.RLock()
if not self.db_path:
if not self._db_path:
logger.info("Initializing in-memory database")
else:
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self.logger.info(f"Initializing database at {self.db_path}")
self._db_path.parent.mkdir(parents=True, exist_ok=True)
self._logger.info(f"Initializing database at {self._db_path}")
self.conn = sqlite3.connect(database=self.db_path or sqlite_memory, check_same_thread=False)
self.conn.row_factory = sqlite3.Row
self._conn = sqlite3.connect(database=self._db_path or sqlite_memory, check_same_thread=False)
self._conn.row_factory = sqlite3.Row
if self.verbose:
self.conn.set_trace_callback(self.logger.debug)
if self._verbose:
self._conn.set_trace_callback(self._logger.debug)
# Enable foreign key constraints
self.conn.execute("PRAGMA foreign_keys = ON;")
self._conn.execute("PRAGMA foreign_keys = ON;")
# Enable Write-Ahead Logging (WAL) mode for better concurrency
self.conn.execute("PRAGMA journal_mode = WAL;")
self._conn.execute("PRAGMA journal_mode = WAL;")
# Set a busy timeout to prevent database lockups during writes
self.conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
self._conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
def clean(self) -> None:
"""
Cleans the database by running the VACUUM command, reporting on the freed space.
"""
# No need to clean in-memory database
if not self.db_path:
if not self._db_path:
return
try:
initial_db_size = Path(self.db_path).stat().st_size
self.conn.execute("VACUUM;")
self.conn.commit()
final_db_size = Path(self.db_path).stat().st_size
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
if freed_space_in_mb > 0:
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
with self._conn as conn:
initial_db_size = Path(self._db_path).stat().st_size
conn.execute("VACUUM;")
conn.commit()
final_db_size = Path(self._db_path).stat().st_size
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
if freed_space_in_mb > 0:
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
except Exception as e:
self.logger.error(f"Error cleaning database: {e}")
self._logger.error(f"Error cleaning database: {e}")
raise
@contextmanager
def transaction(self) -> Generator[sqlite3.Cursor, None, None]:
"""
Thread-safe context manager for DB work.
Acquires the RLock, yields a Cursor, then commits or rolls back.
"""
with self._lock:
cursor = self._conn.cursor()
try:
yield cursor
self._conn.commit()
except:
self._conn.rollback()
raise
finally:
cursor.close()

View File

@@ -23,6 +23,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_19 import build_migration_19
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_20 import build_migration_20
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_21 import build_migration_21
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -63,6 +64,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_18())
migrator.register_migration(build_migration_19(app_config=config))
migrator.register_migration(build_migration_20())
migrator.register_migration(build_migration_21())
migrator.run_migrations()
return db

View File

@@ -0,0 +1,40 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration21Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
cursor.execute(
"""
CREATE TABLE client_state (
id INTEGER PRIMARY KEY CHECK(id = 1),
data TEXT NOT NULL, -- Frontend will handle the shape of this data
updated_at DATETIME NOT NULL DEFAULT (CURRENT_TIMESTAMP)
);
"""
)
cursor.execute(
"""
CREATE TRIGGER tg_client_state_updated_at
AFTER UPDATE ON client_state
FOR EACH ROW
BEGIN
UPDATE client_state
SET updated_at = CURRENT_TIMESTAMP
WHERE id = OLD.id;
END;
"""
)
def build_migration_21() -> Migration:
"""Builds the migration object for migrating from version 20 to version 21. This includes:
- Creating the `client_state` table.
- Adding a trigger to update the `updated_at` field on updates.
"""
return Migration(
from_version=20,
to_version=21,
callback=Migration21Callback(),
)

View File

@@ -32,7 +32,7 @@ class SqliteMigrator:
def __init__(self, db: SqliteDatabase) -> None:
self._db = db
self._logger = db.logger
self._logger = db._logger
self._migration_set = MigrationSet()
self._backup_path: Optional[Path] = None
@@ -45,7 +45,7 @@ class SqliteMigrator:
"""Migrates the database to the latest version."""
# This throws if there is a problem.
self._migration_set.validate_migration_chain()
cursor = self._db.conn.cursor()
cursor = self._db._conn.cursor()
self._create_migrations_table(cursor=cursor)
if self._migration_set.count == 0:
@@ -59,13 +59,13 @@ class SqliteMigrator:
self._logger.info("Database update needed")
# Make a backup of the db if it needs to be updated and is a file db
if self._db.db_path is not None:
if self._db._db_path is not None:
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
self._backup_path = self._db._db_path.parent / f"{self._db._db_path.stem}_backup_{timestamp}.db"
self._logger.info(f"Backing up database to {str(self._backup_path)}")
# Use SQLite to do the backup
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
self._db.conn.backup(backup_conn)
self._db._conn.backup(backup_conn)
else:
self._logger.info("Using in-memory database, no backup needed")
@@ -81,7 +81,7 @@ class SqliteMigrator:
try:
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
# exception is raised.
with self._db.conn as conn:
with self._db._conn as conn:
cursor = conn.cursor()
if self._get_current_version(cursor) != migration.from_version:
raise MigrationError(

View File

@@ -17,7 +17,7 @@ from invokeai.app.util.misc import uuid_string
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
@@ -25,24 +25,23 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
"""Gets a style preset by ID."""
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT *
FROM style_presets
WHERE id = ?;
""",
(style_preset_id,),
)
row = cursor.fetchone()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT *
FROM style_presets
WHERE id = ?;
""",
(style_preset_id,),
)
row = cursor.fetchone()
if row is None:
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
return StylePresetRecordDTO.from_dict(dict(row))
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
style_preset_id = uuid_string()
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
INSERT OR IGNORE INTO style_presets (
@@ -60,16 +59,11 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
style_preset.type,
),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return self.get(style_preset_id)
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
style_preset_ids = []
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
for style_preset in style_presets:
style_preset_id = uuid_string()
style_preset_ids.append(style_preset_id)
@@ -90,16 +84,11 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
style_preset.type,
),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return None
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
# Change the name of a style preset
if changes.name is not None:
cursor.execute(
@@ -122,15 +111,10 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
(changes.preset_data.model_dump_json(), style_preset_id),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return self.get(style_preset_id)
def delete(self, style_preset_id: str) -> None:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
DELETE from style_presets
@@ -138,51 +122,41 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
""",
(style_preset_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return None
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
main_query = """
SELECT
*
FROM style_presets
"""
with self._db.transaction() as cursor:
main_query = """
SELECT
*
FROM style_presets
"""
if type is not None:
main_query += "WHERE type = ? "
if type is not None:
main_query += "WHERE type = ? "
main_query += "ORDER BY LOWER(name) ASC"
main_query += "ORDER BY LOWER(name) ASC"
cursor = self._conn.cursor()
if type is not None:
cursor.execute(main_query, (type,))
else:
cursor.execute(main_query)
if type is not None:
cursor.execute(main_query, (type,))
else:
cursor.execute(main_query)
rows = cursor.fetchall()
rows = cursor.fetchall()
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
return style_presets
def _sync_default_style_presets(self) -> None:
"""Syncs default style presets to the database. Internal use only."""
# First delete all existing default style presets
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
# First delete all existing default style presets
cursor.execute(
"""--sql
DELETE FROM style_presets
WHERE type = "default";
"""
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
# Next, parse and create the default style presets
with open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
presets = json.load(file)

View File

@@ -25,7 +25,7 @@ SQL_TIME_FORMAT = "%Y-%m-%d %H:%M:%f"
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()
self._conn = db.conn
self._db = db
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
@@ -33,16 +33,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
def get(self, workflow_id: str) -> WorkflowRecordDTO:
"""Gets a workflow by ID. Updates the opened_at column."""
cursor = self._conn.cursor()
cursor.execute(
"""--sql
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
FROM workflow_library
WHERE workflow_id = ?;
""",
(workflow_id,),
)
row = cursor.fetchone()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
FROM workflow_library
WHERE workflow_id = ?;
""",
(workflow_id,),
)
row = cursor.fetchone()
if row is None:
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
return WorkflowRecordDTO.from_dict(dict(row))
@@ -51,9 +51,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
if workflow.meta.category is WorkflowCategory.Default:
raise ValueError("Default workflows cannot be created via this method")
try:
with self._db.transaction() as cursor:
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
cursor = self._conn.cursor()
cursor.execute(
"""--sql
INSERT OR IGNORE INTO workflow_library (
@@ -64,18 +63,13 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
""",
(workflow_with_id.id, workflow_with_id.model_dump_json()),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return self.get(workflow_with_id.id)
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
if workflow.meta.category is WorkflowCategory.Default:
raise ValueError("Default workflows cannot be updated")
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
UPDATE workflow_library
@@ -84,18 +78,13 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
""",
(workflow.model_dump_json(), workflow.id),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return self.get(workflow.id)
def delete(self, workflow_id: str) -> None:
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
raise ValueError("Default workflows cannot be deleted")
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
"""--sql
DELETE from workflow_library
@@ -103,10 +92,6 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
""",
(workflow_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
return None
def get_many(
@@ -121,108 +106,108 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
has_been_opened: Optional[bool] = None,
is_published: Optional[bool] = None,
) -> PaginatedResults[WorkflowRecordListItemDTO]:
# sanitize!
assert order_by in WorkflowRecordOrderBy
assert direction in SQLiteDirection
with self._db.transaction() as cursor:
# sanitize!
assert order_by in WorkflowRecordOrderBy
assert direction in SQLiteDirection
# We will construct the query dynamically based on the query params
# We will construct the query dynamically based on the query params
# The main query to get the workflows / counts
main_query = """
SELECT
workflow_id,
category,
name,
description,
created_at,
updated_at,
opened_at,
tags
FROM workflow_library
"""
count_query = "SELECT COUNT(*) FROM workflow_library"
# The main query to get the workflows / counts
main_query = """
SELECT
workflow_id,
category,
name,
description,
created_at,
updated_at,
opened_at,
tags
FROM workflow_library
"""
count_query = "SELECT COUNT(*) FROM workflow_library"
# Start with an empty list of conditions and params
conditions: list[str] = []
params: list[str | int] = []
# Start with an empty list of conditions and params
conditions: list[str] = []
params: list[str | int] = []
if categories:
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
if categories:
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
# Ensure all categories are valid (is this necessary?)
assert all(c in WorkflowCategory for c in categories)
# Ensure all categories are valid (is this necessary?)
assert all(c in WorkflowCategory for c in categories)
# Construct a placeholder string for the number of categories
placeholders = ", ".join("?" for _ in categories)
# Construct a placeholder string for the number of categories
placeholders = ", ".join("?" for _ in categories)
# Construct the condition string & params
category_condition = f"category IN ({placeholders})"
category_params = [category.value for category in categories]
# Construct the condition string & params
category_condition = f"category IN ({placeholders})"
category_params = [category.value for category in categories]
conditions.append(category_condition)
params.extend(category_params)
conditions.append(category_condition)
params.extend(category_params)
if tags:
# Tags is a list of strings, and a single string in the DB
# The string in the DB has no guaranteed format
if tags:
# Tags is a list of strings, and a single string in the DB
# The string in the DB has no guaranteed format
# Construct a list of conditions for each tag
tags_conditions = ["tags LIKE ?" for _ in tags]
tags_conditions_joined = " OR ".join(tags_conditions)
tags_condition = f"({tags_conditions_joined})"
# Construct a list of conditions for each tag
tags_conditions = ["tags LIKE ?" for _ in tags]
tags_conditions_joined = " OR ".join(tags_conditions)
tags_condition = f"({tags_conditions_joined})"
# And the params for the tags, case-insensitive
tags_params = [f"%{t.strip()}%" for t in tags]
# And the params for the tags, case-insensitive
tags_params = [f"%{t.strip()}%" for t in tags]
conditions.append(tags_condition)
params.extend(tags_params)
conditions.append(tags_condition)
params.extend(tags_params)
if has_been_opened:
conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
conditions.append("opened_at IS NULL")
if has_been_opened:
conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
conditions.append("opened_at IS NULL")
# Ignore whitespace in the query
stripped_query = query.strip() if query else None
if stripped_query:
# Construct a wildcard query for the name, description, and tags
wildcard_query = "%" + stripped_query + "%"
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
# Ignore whitespace in the query
stripped_query = query.strip() if query else None
if stripped_query:
# Construct a wildcard query for the name, description, and tags
wildcard_query = "%" + stripped_query + "%"
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
conditions.append(query_condition)
params.extend([wildcard_query, wildcard_query, wildcard_query])
conditions.append(query_condition)
params.extend([wildcard_query, wildcard_query, wildcard_query])
if conditions:
# If there are conditions, add a WHERE clause and then join the conditions
main_query += " WHERE "
count_query += " WHERE "
if conditions:
# If there are conditions, add a WHERE clause and then join the conditions
main_query += " WHERE "
count_query += " WHERE "
all_conditions = " AND ".join(conditions)
main_query += all_conditions
count_query += all_conditions
all_conditions = " AND ".join(conditions)
main_query += all_conditions
count_query += all_conditions
# After this point, the query and params differ for the main query and the count query
main_params = params.copy()
count_params = params.copy()
# After this point, the query and params differ for the main query and the count query
main_params = params.copy()
count_params = params.copy()
# Main query also gets ORDER BY and LIMIT/OFFSET
main_query += f" ORDER BY {order_by.value} {direction.value}"
# Main query also gets ORDER BY and LIMIT/OFFSET
main_query += f" ORDER BY {order_by.value} {direction.value}"
if per_page:
main_query += " LIMIT ? OFFSET ?"
main_params.extend([per_page, page * per_page])
if per_page:
main_query += " LIMIT ? OFFSET ?"
main_params.extend([per_page, page * per_page])
# Put a ring on it
main_query += ";"
count_query += ";"
# Put a ring on it
main_query += ";"
count_query += ";"
cursor = self._conn.cursor()
cursor.execute(main_query, main_params)
rows = cursor.fetchall()
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
cursor.execute(main_query, main_params)
rows = cursor.fetchall()
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
cursor.execute(count_query, count_params)
total = cursor.fetchone()[0]
cursor.execute(count_query, count_params)
total = cursor.fetchone()[0]
if per_page:
pages = total // per_page + (total % per_page > 0)
@@ -247,46 +232,46 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
if not tags:
return {}
cursor = self._conn.cursor()
result: dict[str, int] = {}
# Base conditions for categories and selected tags
base_conditions: list[str] = []
base_params: list[str | int] = []
with self._db.transaction() as cursor:
result: dict[str, int] = {}
# Base conditions for categories and selected tags
base_conditions: list[str] = []
base_params: list[str | int] = []
# Add category conditions
if categories:
assert all(c in WorkflowCategory for c in categories)
placeholders = ", ".join("?" for _ in categories)
base_conditions.append(f"category IN ({placeholders})")
base_params.extend([category.value for category in categories])
# Add category conditions
if categories:
assert all(c in WorkflowCategory for c in categories)
placeholders = ", ".join("?" for _ in categories)
base_conditions.append(f"category IN ({placeholders})")
base_params.extend([category.value for category in categories])
if has_been_opened:
base_conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
if has_been_opened:
base_conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
# For each tag to count, run a separate query
for tag in tags:
# Start with the base conditions
conditions = base_conditions.copy()
params = base_params.copy()
# For each tag to count, run a separate query
for tag in tags:
# Start with the base conditions
conditions = base_conditions.copy()
params = base_params.copy()
# Add this specific tag condition
conditions.append("tags LIKE ?")
params.append(f"%{tag.strip()}%")
# Add this specific tag condition
conditions.append("tags LIKE ?")
params.append(f"%{tag.strip()}%")
# Construct the full query
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
# Construct the full query
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
if conditions:
stmt += " WHERE " + " AND ".join(conditions)
if conditions:
stmt += " WHERE " + " AND ".join(conditions)
cursor.execute(stmt, params)
count = cursor.fetchone()[0]
result[tag] = count
cursor.execute(stmt, params)
count = cursor.fetchone()[0]
result[tag] = count
return result
@@ -296,52 +281,51 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
has_been_opened: Optional[bool] = None,
is_published: Optional[bool] = None,
) -> dict[str, int]:
cursor = self._conn.cursor()
result: dict[str, int] = {}
# Base conditions for categories
base_conditions: list[str] = []
base_params: list[str | int] = []
with self._db.transaction() as cursor:
result: dict[str, int] = {}
# Base conditions for categories
base_conditions: list[str] = []
base_params: list[str | int] = []
# Add category conditions
if categories:
assert all(c in WorkflowCategory for c in categories)
placeholders = ", ".join("?" for _ in categories)
base_conditions.append(f"category IN ({placeholders})")
base_params.extend([category.value for category in categories])
# Add category conditions
if categories:
assert all(c in WorkflowCategory for c in categories)
placeholders = ", ".join("?" for _ in categories)
base_conditions.append(f"category IN ({placeholders})")
base_params.extend([category.value for category in categories])
if has_been_opened:
base_conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
if has_been_opened:
base_conditions.append("opened_at IS NOT NULL")
elif has_been_opened is False:
base_conditions.append("opened_at IS NULL")
# For each category to count, run a separate query
for category in categories:
# Start with the base conditions
conditions = base_conditions.copy()
params = base_params.copy()
# For each category to count, run a separate query
for category in categories:
# Start with the base conditions
conditions = base_conditions.copy()
params = base_params.copy()
# Add this specific category condition
conditions.append("category = ?")
params.append(category.value)
# Add this specific category condition
conditions.append("category = ?")
params.append(category.value)
# Construct the full query
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
# Construct the full query
stmt = """--sql
SELECT COUNT(*)
FROM workflow_library
"""
if conditions:
stmt += " WHERE " + " AND ".join(conditions)
if conditions:
stmt += " WHERE " + " AND ".join(conditions)
cursor.execute(stmt, params)
count = cursor.fetchone()[0]
result[category.value] = count
cursor.execute(stmt, params)
count = cursor.fetchone()[0]
result[category.value] = count
return result
def update_opened_at(self, workflow_id: str) -> None:
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
cursor.execute(
f"""--sql
UPDATE workflow_library
@@ -350,10 +334,6 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
""",
(workflow_id,),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
def _sync_default_workflows(self) -> None:
"""Syncs default workflows to the database. Internal use only."""
@@ -368,8 +348,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
meaningless, as they are overwritten every time the server starts.
"""
try:
cursor = self._conn.cursor()
with self._db.transaction() as cursor:
workflows_from_file: list[Workflow] = []
workflows_to_update: list[Workflow] = []
workflows_to_add: list[Workflow] = []
@@ -449,8 +428,3 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
""",
(w.model_dump_json(), w.id),
)
self._conn.commit()
except Exception:
self._conn.rollback()
raise

View File

@@ -123,7 +123,11 @@ def calc_percentage(intermediate_state: PipelineIntermediateState) -> float:
if total_steps == 0:
return 0.0
if order == 2:
return floor(step / 2) / floor(total_steps / 2)
# Prevent division by zero when total_steps is 1 or 2
denominator = floor(total_steps / 2)
if denominator == 0:
return 0.0
return floor(step / 2) / denominator
# order == 1
return step / total_steps

View File

@@ -30,8 +30,11 @@ def denoise(
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension],
pos_ip_adapter_extensions: list[XLabsIPAdapterExtension],
neg_ip_adapter_extensions: list[XLabsIPAdapterExtension],
# extra img tokens
# extra img tokens (channel-wise)
img_cond: torch.Tensor | None,
# extra img tokens (sequence-wise) - for Kontext conditioning
img_cond_seq: torch.Tensor | None = None,
img_cond_seq_ids: torch.Tensor | None = None,
):
# step 0 is the initial state
total_steps = len(timesteps) - 1
@@ -46,6 +49,10 @@ def denoise(
)
# guidance_vec is ignored for schnell.
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
# Store original sequence length for slicing predictions
original_seq_len = img.shape[1]
for step_index, (t_curr, t_prev) in tqdm(list(enumerate(zip(timesteps[:-1], timesteps[1:], strict=True)))):
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
@@ -71,10 +78,26 @@ def denoise(
# controlnet_residuals datastructure is efficient in that it likely contains multiple references to the same
# tensors. Calculating the sum materializes each tensor into its own instance.
merged_controlnet_residuals = sum_controlnet_flux_outputs(controlnet_residuals)
pred_img = torch.cat((img, img_cond), dim=-1) if img_cond is not None else img
# Prepare input for model - concatenate fresh each step
img_input = img
img_input_ids = img_ids
# Add channel-wise conditioning (for ControlNet, FLUX Fill, etc.)
if img_cond is not None:
img_input = torch.cat((img_input, img_cond), dim=-1)
# Add sequence-wise conditioning (for Kontext)
if img_cond_seq is not None:
assert img_cond_seq_ids is not None, (
"You need to provide either both or neither of the sequence conditioning"
)
img_input = torch.cat((img_input, img_cond_seq), dim=1)
img_input_ids = torch.cat((img_input_ids, img_cond_seq_ids), dim=1)
pred = model(
img=pred_img,
img_ids=img_ids,
img=img_input,
img_ids=img_input_ids,
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
@@ -88,6 +111,10 @@ def denoise(
regional_prompting_extension=pos_regional_prompting_extension,
)
# Slice prediction to only include the main image tokens
if img_cond_seq is not None:
pred = pred[:, :original_seq_len]
step_cfg_scale = cfg_scale[step_index]
# If step_cfg_scale, is 1.0, then we don't need to run the negative prediction.
@@ -98,9 +125,26 @@ def denoise(
if neg_regional_prompting_extension is None:
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
# For negative prediction with Kontext, we need to include the reference images
# to maintain consistency between positive and negative passes. Without this,
# CFG would create artifacts as the attention mechanism would see different
# spatial structures in each pass
neg_img_input = img
neg_img_input_ids = img_ids
# Add channel-wise conditioning for negative pass if present
if img_cond is not None:
neg_img_input = torch.cat((neg_img_input, img_cond), dim=-1)
# Add sequence-wise conditioning (Kontext) for negative pass
# This ensures reference images are processed consistently
if img_cond_seq is not None:
neg_img_input = torch.cat((neg_img_input, img_cond_seq), dim=1)
neg_img_input_ids = torch.cat((neg_img_input_ids, img_cond_seq_ids), dim=1)
neg_pred = model(
img=img,
img_ids=img_ids,
img=neg_img_input,
img_ids=neg_img_input_ids,
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
@@ -113,6 +157,10 @@ def denoise(
ip_adapter_extensions=neg_ip_adapter_extensions,
regional_prompting_extension=neg_regional_prompting_extension,
)
# Slice negative prediction to match main image tokens
if img_cond_seq is not None:
neg_pred = neg_pred[:, :original_seq_len]
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
preview_img = img - t_curr * pred

View File

@@ -0,0 +1,203 @@
import torch
import torch.nn.functional as F
import torchvision.transforms as T
from einops import repeat
from invokeai.app.invocations.fields import FluxKontextConditioningField
from invokeai.app.invocations.model import VAEField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.flux.sampling_utils import pack
from invokeai.backend.util.devices import TorchDevice
def generate_img_ids_with_offset(
latent_height: int,
latent_width: int,
batch_size: int,
device: torch.device,
dtype: torch.dtype,
idx_offset: int = 0,
h_offset: int = 0,
w_offset: int = 0,
) -> torch.Tensor:
"""Generate tensor of image position ids with optional index and spatial offsets.
Args:
latent_height (int): Height of image in latent space (after packing, this becomes h//2).
latent_width (int): Width of image in latent space (after packing, this becomes w//2).
batch_size (int): Number of images in the batch.
device (torch.device): Device to create tensors on.
dtype (torch.dtype): Data type for the tensors.
idx_offset (int): Offset to add to the first dimension of the image ids (default: 0).
h_offset (int): Spatial offset for height/y-coordinates in latent space (default: 0).
w_offset (int): Spatial offset for width/x-coordinates in latent space (default: 0).
Returns:
torch.Tensor: Image position ids with shape [batch_size, (latent_height//2 * latent_width//2), 3].
"""
if device.type == "mps":
orig_dtype = dtype
dtype = torch.float16
# After packing, the spatial dimensions are halved due to the 2x2 patch structure
packed_height = latent_height // 2
packed_width = latent_width // 2
# Convert spatial offsets from latent space to packed space
packed_h_offset = h_offset // 2
packed_w_offset = w_offset // 2
# Create base tensor for position IDs with shape [packed_height, packed_width, 3]
# The 3 channels represent: [batch_offset, y_position, x_position]
img_ids = torch.zeros(packed_height, packed_width, 3, device=device, dtype=dtype)
# Set the batch offset for all positions
img_ids[..., 0] = idx_offset
# Create y-coordinate indices (vertical positions) with spatial offset
y_indices = torch.arange(packed_height, device=device, dtype=dtype) + packed_h_offset
# Broadcast y_indices to match the spatial dimensions [packed_height, 1]
img_ids[..., 1] = y_indices[:, None]
# Create x-coordinate indices (horizontal positions) with spatial offset
x_indices = torch.arange(packed_width, device=device, dtype=dtype) + packed_w_offset
# Broadcast x_indices to match the spatial dimensions [1, packed_width]
img_ids[..., 2] = x_indices[None, :]
# Expand to include batch dimension: [batch_size, (packed_height * packed_width), 3]
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
if device.type == "mps":
img_ids = img_ids.to(orig_dtype)
return img_ids
class KontextExtension:
"""Applies FLUX Kontext (reference image) conditioning."""
def __init__(
self,
kontext_conditioning: list[FluxKontextConditioningField],
context: InvocationContext,
vae_field: VAEField,
device: torch.device,
dtype: torch.dtype,
):
"""
Initializes the KontextExtension, pre-processing the reference images
into latents and positional IDs.
"""
self._context = context
self._device = device
self._dtype = dtype
self._vae_field = vae_field
self.kontext_conditioning = kontext_conditioning
# Pre-process and cache the kontext latents and ids upon initialization.
self.kontext_latents, self.kontext_ids = self._prepare_kontext()
def _prepare_kontext(self) -> tuple[torch.Tensor, torch.Tensor]:
"""Encodes the reference images and prepares their concatenated latents and IDs with spatial tiling."""
all_latents = []
all_ids = []
# Track cumulative dimensions for spatial tiling
# These track the running extent of the virtual canvas in latent space
h = 0 # Running height extent
w = 0 # Running width extent
vae_info = self._context.models.load(self._vae_field.vae)
for idx, kontext_field in enumerate(self.kontext_conditioning):
image = self._context.images.get_pil(kontext_field.image.image_name)
# Convert to RGB
image = image.convert("RGB")
# Convert to tensor using torchvision transforms for consistency
transformation = T.Compose(
[
T.ToTensor(), # Converts PIL image to tensor and scales to [0, 1]
]
)
image_tensor = transformation(image)
# Convert from [0, 1] to [-1, 1] range expected by VAE
image_tensor = image_tensor * 2.0 - 1.0
image_tensor = image_tensor.unsqueeze(0) # Add batch dimension
image_tensor = image_tensor.to(self._device)
# Continue with VAE encoding
# Don't sample from the distribution for reference images - use the mean (matching ComfyUI)
with vae_info as vae:
assert isinstance(vae, AutoEncoder)
vae_dtype = next(iter(vae.parameters())).dtype
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
# Use sample=False to get the distribution mean without noise
kontext_latents_unpacked = vae.encode(image_tensor, sample=False)
# Extract tensor dimensions
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
# Pad latents to be compatible with patch_size=2
# This ensures dimensions are even for the pack() function
pad_h = (2 - latent_height % 2) % 2
pad_w = (2 - latent_width % 2) % 2
if pad_h > 0 or pad_w > 0:
kontext_latents_unpacked = F.pad(kontext_latents_unpacked, (0, pad_w, 0, pad_h), mode="circular")
# Update dimensions after padding
_, _, latent_height, latent_width = kontext_latents_unpacked.shape
# Pack the latents
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
# Determine spatial offsets for this reference image
# - Compare the potential new canvas dimensions if we add the image vertically vs horizontally
# - Choose the placement that results in a more square-like canvas
h_offset = 0
w_offset = 0
if idx > 0: # First image starts at (0, 0)
# Check which placement would result in better canvas dimensions
# If adding to height would make the canvas taller than wide, tile horizontally
# Otherwise, tile vertically
if latent_height + h > latent_width + w:
# Tile horizontally (to the right of existing images)
w_offset = w
else:
# Tile vertically (below existing images)
h_offset = h
# Generate IDs with both index offset and spatial offsets
kontext_ids = generate_img_ids_with_offset(
latent_height=latent_height,
latent_width=latent_width,
batch_size=batch_size,
device=self._device,
dtype=self._dtype,
idx_offset=1, # All reference images use index=1 (matching ComfyUI implementation)
h_offset=h_offset,
w_offset=w_offset,
)
# Update cumulative dimensions
# Track the maximum extent of the virtual canvas after placing this image
h = max(h, latent_height + h_offset)
w = max(w, latent_width + w_offset)
all_latents.append(kontext_latents_packed)
all_ids.append(kontext_ids)
# Concatenate all latents and IDs along the sequence dimension
concatenated_latents = torch.cat(all_latents, dim=1) # Concatenate along sequence dimension
concatenated_ids = torch.cat(all_ids, dim=1) # Concatenate along sequence dimension
return concatenated_latents, concatenated_ids
def ensure_batch_size(self, target_batch_size: int) -> None:
"""Ensures the kontext latents and IDs match the target batch size by repeating if necessary."""
if self.kontext_latents.shape[0] != target_batch_size:
self.kontext_latents = self.kontext_latents.repeat(target_batch_size, 1, 1)
self.kontext_ids = self.kontext_ids.repeat(target_batch_size, 1, 1)

View File

@@ -174,11 +174,13 @@ def generate_img_ids(h: int, w: int, batch_size: int, device: torch.device, dtyp
dtype = torch.float16
img_ids = torch.zeros(h // 2, w // 2, 3, device=device, dtype=dtype)
# Set batch offset to 0 for main image tokens
img_ids[..., 0] = 0
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=device, dtype=dtype)[:, None]
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=device, dtype=dtype)[None, :]
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
if device.type == "mps":
img_ids.to(orig_dtype)
img_ids = img_ids.to(orig_dtype)
return img_ids

View File

@@ -18,6 +18,29 @@ class ModelSpec:
repo_ae: str | None
# Preferred resolutions for Kontext models to avoid tiling artifacts
# These are the specific resolutions the model was trained on
PREFERED_KONTEXT_RESOLUTIONS = [
(672, 1568),
(688, 1504),
(720, 1456),
(752, 1392),
(800, 1328),
(832, 1248),
(880, 1184),
(944, 1104),
(1024, 1024),
(1104, 944),
(1184, 880),
(1248, 832),
(1328, 800),
(1392, 752),
(1456, 720),
(1504, 688),
(1568, 672),
]
max_seq_lengths: Dict[str, Literal[256, 512]] = {
"flux-dev": 512,
"flux-dev-fill": 512,

View File

@@ -187,7 +187,7 @@ class ModelConfigBase(ABC, BaseModel):
else:
return config_cls.from_model_on_disk(mod, **overrides)
raise InvalidModelConfigException("No valid config found")
raise InvalidModelConfigException("Unable to determine model type")
@classmethod
def get_tag(cls) -> Tag:

View File

@@ -9,6 +9,7 @@ import spandrel
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.misc import uuid_string
from invokeai.backend.flux.controlnet.state_dict_utils import (
is_state_dict_instantx_controlnet,
@@ -493,9 +494,21 @@ class ModelProbe(object):
# scan model
scan_result = pscan.scan_file_path(checkpoint)
if scan_result.infected_files != 0:
raise Exception(f"The model {model_name} is potentially infected by malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"The model {model_name} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"The model {model_name} is potentially infected by malware. Aborting import.")
if scan_result.scan_err:
raise Exception(f"Error scanning model {model_name} for malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"Error scanning the model at {model_name} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"Error scanning the model at {model_name} for malware. Aborting import.")
# Probing utilities

View File

@@ -7,7 +7,14 @@ from typing import Optional
import accelerate
import torch
from safetensors.torch import load_file
from transformers import AutoConfig, AutoModelForTextEncoding, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
from transformers import (
AutoConfig,
AutoModelForTextEncoding,
CLIPTextModel,
CLIPTokenizer,
T5EncoderModel,
T5TokenizerFast,
)
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXControlNetFlux
@@ -139,7 +146,7 @@ class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
)
match submodel_type:
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
te2_model_path = Path(config.path) / "text_encoder_2"
model_config = AutoConfig.from_pretrained(te2_model_path)
@@ -183,7 +190,7 @@ class T5EncoderCheckpointModel(ModelLoader):
match submodel_type:
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
return T5EncoderModel.from_pretrained(
Path(config.path) / "text_encoder_2", torch_dtype="auto", low_cpu_mem_usage=True

View File

@@ -6,13 +6,17 @@ import torch
from picklescan.scanner import scan_file_path
from safetensors import safe_open
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.util.silence_warnings import SilenceWarnings
StateDict: TypeAlias = dict[str | int, Any] # When are the keys int?
logger = InvokeAILogger.get_logger()
class ModelOnDisk:
"""A utility class representing a model stored on disk."""
@@ -79,8 +83,24 @@ class ModelOnDisk:
with SilenceWarnings():
if path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
scan_result = scan_file_path(path)
if scan_result.infected_files != 0 or scan_result.scan_err:
raise RuntimeError(f"The model {path.stem} is potentially infected by malware. Aborting import.")
if scan_result.infected_files != 0:
if get_config().unsafe_disable_picklescan:
logger.warning(
f"The model {path.stem} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(
f"The model {path.stem} is potentially infected by malware. Aborting import."
)
if scan_result.scan_err:
if get_config().unsafe_disable_picklescan:
logger.warning(
f"Error scanning the model at {path.stem} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"Error scanning the model at {path.stem} for malware. Aborting import.")
checkpoint = torch.load(path, map_location="cpu")
assert isinstance(checkpoint, dict)
elif path.suffix.endswith(".gguf"):

View File

@@ -23,7 +23,7 @@ class StarterModel(StarterModelWithoutDependencies):
dependencies: Optional[list[StarterModelWithoutDependencies]] = None
class StarterModelBundles(BaseModel):
class StarterModelBundle(BaseModel):
name: str
models: list[StarterModel]
@@ -109,7 +109,7 @@ flux_vae = StarterModel(
# region: Main
flux_schnell_quantized = StarterModel(
name="FLUX Schnell (Quantized)",
name="FLUX.1 schnell (quantized)",
base=BaseModelType.Flux,
source="InvokeAI/flux_schnell::transformer/bnb_nf4/flux1-schnell-bnb_nf4.safetensors",
description="FLUX schnell transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
@@ -117,7 +117,7 @@ flux_schnell_quantized = StarterModel(
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
flux_dev_quantized = StarterModel(
name="FLUX Dev (Quantized)",
name="FLUX.1 dev (quantized)",
base=BaseModelType.Flux,
source="InvokeAI/flux_dev::transformer/bnb_nf4/flux1-dev-bnb_nf4.safetensors",
description="FLUX dev transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
@@ -125,7 +125,7 @@ flux_dev_quantized = StarterModel(
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
flux_schnell = StarterModel(
name="FLUX Schnell",
name="FLUX.1 schnell",
base=BaseModelType.Flux,
source="InvokeAI/flux_schnell::transformer/base/flux1-schnell.safetensors",
description="FLUX schnell transformer in bfloat16. Total size with dependencies: ~33GB",
@@ -133,13 +133,45 @@ flux_schnell = StarterModel(
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
)
flux_dev = StarterModel(
name="FLUX Dev",
name="FLUX.1 dev",
base=BaseModelType.Flux,
source="InvokeAI/flux_dev::transformer/base/flux1-dev.safetensors",
description="FLUX dev transformer in bfloat16. Total size with dependencies: ~33GB",
type=ModelType.Main,
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
)
flux_kontext = StarterModel(
name="FLUX.1 Kontext dev",
base=BaseModelType.Flux,
source="https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev/resolve/main/flux1-kontext-dev.safetensors",
description="FLUX.1 Kontext dev transformer in bfloat16. Total size with dependencies: ~33GB",
type=ModelType.Main,
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
)
flux_kontext_quantized = StarterModel(
name="FLUX.1 Kontext dev (quantized)",
base=BaseModelType.Flux,
source="https://huggingface.co/unsloth/FLUX.1-Kontext-dev-GGUF/resolve/main/flux1-kontext-dev-Q4_K_M.gguf",
description="FLUX.1 Kontext dev quantized (q4_k_m). Total size with dependencies: ~14GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
flux_krea = StarterModel(
name="FLUX.1 Krea dev",
base=BaseModelType.Flux,
source="https://huggingface.co/InvokeAI/FLUX.1-Krea-dev/resolve/main/flux1-krea-dev.safetensors",
description="FLUX.1 Krea dev. Total size with dependencies: ~33GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
flux_krea_quantized = StarterModel(
name="FLUX.1 Krea dev (quantized)",
base=BaseModelType.Flux,
source="https://huggingface.co/InvokeAI/FLUX.1-Krea-dev-GGUF/resolve/main/flux1-krea-dev-Q4_K_M.gguf",
description="FLUX.1 Krea dev quantized (q4_k_m). Total size with dependencies: ~14GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
)
sd35_medium = StarterModel(
name="SD3.5 Medium",
base=BaseModelType.StableDiffusion3,
@@ -564,13 +596,14 @@ t2i_sketch_sdxl = StarterModel(
)
# endregion
# region SpandrelImageToImage
realesrgan_anime = StarterModel(
name="RealESRGAN_x4plus_anime_6B",
animesharp_v4_rcan = StarterModel(
name="2x-AnimeSharpV4_RCAN",
base=BaseModelType.Any,
source="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
description="A Real-ESRGAN 4x upscaling model (optimized for anime images).",
source="https://github.com/Kim2091/Kim2091-Models/releases/download/2x-AnimeSharpV4/2x-AnimeSharpV4_RCAN.safetensors",
description="A 2x upscaling model (optimized for anime images).",
type=ModelType.SpandrelImageToImage,
)
realesrgan_x4 = StarterModel(
name="RealESRGAN_x4plus",
base=BaseModelType.Any,
@@ -656,6 +689,7 @@ flux_fill = StarterModel(
# List of starter models, displayed on the frontend.
# The order/sort of this list is not changed by the frontend - set it how you want it here.
STARTER_MODELS: list[StarterModel] = [
flux_kontext_quantized,
flux_schnell_quantized,
flux_dev_quantized,
flux_schnell,
@@ -715,7 +749,7 @@ STARTER_MODELS: list[StarterModel] = [
t2i_lineart_sdxl,
t2i_sketch_sdxl,
realesrgan_x4,
realesrgan_anime,
animesharp_v4_rcan,
realesrgan_x2,
swinir,
t5_base_encoder,
@@ -726,6 +760,8 @@ STARTER_MODELS: list[StarterModel] = [
llava_onevision,
flux_fill,
cogview4,
flux_krea,
flux_krea_quantized,
]
sd1_bundle: list[StarterModel] = [
@@ -776,12 +812,14 @@ flux_bundle: list[StarterModel] = [
flux_depth_control_lora,
flux_redux,
flux_fill,
flux_kontext_quantized,
flux_krea_quantized,
]
STARTER_BUNDLES: dict[str, list[StarterModel]] = {
BaseModelType.StableDiffusion1: sd1_bundle,
BaseModelType.StableDiffusionXL: sdxl_bundle,
BaseModelType.Flux: flux_bundle,
STARTER_BUNDLES: dict[str, StarterModelBundle] = {
BaseModelType.StableDiffusion1: StarterModelBundle(name="Stable Diffusion 1.5", models=sd1_bundle),
BaseModelType.StableDiffusionXL: StarterModelBundle(name="SDXL", models=sdxl_bundle),
BaseModelType.Flux: StarterModelBundle(name="FLUX.1 dev", models=flux_bundle),
}
assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"

View File

@@ -0,0 +1,145 @@
"""Utility functions for extracting metadata from LoRA model files."""
import json
import logging
from pathlib import Path
from typing import Any, Dict, Optional, Set, Tuple
from PIL import Image
from invokeai.app.util.thumbnails import make_thumbnail
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
logger = logging.getLogger(__name__)
def extract_lora_metadata(
model_path: Path, model_key: str, model_images_path: Path
) -> Tuple[Optional[str], Optional[Set[str]]]:
"""
Extract metadata for a LoRA model from associated JSON and image files.
Args:
model_path: Path to the LoRA model file
model_key: Unique key for the model
model_images_path: Path to the model images directory
Returns:
Tuple of (description, trigger_phrases)
"""
model_stem = model_path.stem
model_dir = model_path.parent
# Find and process preview image
_process_preview_image(model_stem, model_dir, model_key, model_images_path)
# Extract metadata from JSON
description, trigger_phrases = _extract_json_metadata(model_stem, model_dir)
return description, trigger_phrases
def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, model_images_path: Path) -> bool:
"""Find and process a preview image for the model, saving it to the model images store."""
image_extensions = [".png", ".jpg", ".jpeg", ".webp"]
for ext in image_extensions:
image_path = model_dir / f"{model_stem}{ext}"
if image_path.exists():
try:
# Open the image
with Image.open(image_path) as img:
# Create thumbnail and save to model images directory
thumbnail = make_thumbnail(img, 256)
thumbnail_path = model_images_path / f"{model_key}.webp"
thumbnail.save(thumbnail_path, format="webp")
logger.info(f"Processed preview image {image_path.name} for model {model_key}")
return True
except Exception as e:
logger.warning(f"Failed to process preview image {image_path.name}: {e}")
return False
return False
def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
"""Extract metadata from a JSON file with the same name as the model."""
json_path = model_dir / f"{model_stem}.json"
if not json_path.exists():
return None, None
try:
with open(json_path, "r", encoding="utf-8") as f:
metadata = json.load(f)
# Extract description
description = _build_description(metadata)
# Extract trigger phrases
trigger_phrases = _extract_trigger_phrases(metadata)
if description or trigger_phrases:
logger.info(f"Applied metadata from {json_path.name}")
return description, trigger_phrases
except (json.JSONDecodeError, IOError, Exception) as e:
logger.warning(f"Failed to read metadata from {json_path}: {e}")
return None, None
def _build_description(metadata: Dict[str, Any]) -> Optional[str]:
"""Build a description from metadata fields."""
description_parts = []
if description := metadata.get("description"):
description_parts.append(str(description).strip())
if notes := metadata.get("notes"):
description_parts.append(str(notes).strip())
return " | ".join(description_parts) if description_parts else None
def _extract_trigger_phrases(metadata: Dict[str, Any]) -> Optional[Set[str]]:
"""Extract trigger phrases from metadata."""
if not (activation_text := metadata.get("activation text")):
return None
activation_text = str(activation_text).strip()
if not activation_text:
return None
# Split on commas and clean up each phrase
phrases = [phrase.strip() for phrase in activation_text.split(",") if phrase.strip()]
return set(phrases) if phrases else None
def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_path: Path) -> None:
"""
Apply extracted metadata to a LoRA model configuration.
Args:
info: The model configuration to update
model_path: Path to the LoRA model file
model_images_path: Path to the model images directory
"""
# Only process LoRA models
if info.type != ModelType.LoRA:
return
# Extract and apply metadata
description, trigger_phrases = extract_lora_metadata(model_path, info.key, model_images_path)
# We don't set cover_image path in the config anymore since images are stored
# separately in the model images store by model key
if description:
info.description = description
if trigger_phrases:
info.trigger_phrases = trigger_phrases

View File

@@ -8,8 +8,12 @@ import picklescan.scanner as pscan
import safetensors
import torch
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.model_manager.taxonomy import ClipVariantType
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger()
def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
@@ -59,9 +63,21 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str,
if scan:
scan_result = pscan.scan_file_path(path)
if scan_result.infected_files != 0:
raise Exception(f"The model at {path} is potentially infected by malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"The model {path} is potentially infected by malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"The model {path} is potentially infected by malware. Aborting import.")
if scan_result.scan_err:
raise Exception(f"Error scanning model at {path} for malware. Aborting import.")
if get_config().unsafe_disable_picklescan:
logger.warning(
f"Error scanning the model at {path} for malware, but picklescan is disabled. "
"Proceeding with caution."
)
else:
raise RuntimeError(f"Error scanning the model at {path} for malware. Aborting import.")
checkpoint = torch.load(path, map_location=torch.device("meta"))
return checkpoint

View File

@@ -1,10 +0,0 @@
dist/
static/
.husky/
node_modules/
patches/
stats.html
index.html
.yarn/
*.scss
src/services/api/schema.ts

View File

@@ -1,70 +0,0 @@
module.exports = {
extends: ['@invoke-ai/eslint-config-react'],
plugins: ['path', 'i18next'],
rules: {
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
'react-refresh/only-export-components': 'off',
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
'@typescript-eslint/consistent-type-assertions': 'off',
// https://github.com/qdanik/eslint-plugin-path
'path/no-relative-imports': ['error', { maxDepth: 0 }],
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
// TODO: ENABLE THIS RULE BEFORE v6.0.0
// 'i18next/no-literal-string': 'error',
// https://eslint.org/docs/latest/rules/no-console
'no-console': 'warn',
// https://eslint.org/docs/latest/rules/no-promise-executor-return
'no-promise-executor-return': 'error',
// https://eslint.org/docs/latest/rules/require-await
'require-await': 'error',
// TODO: ENABLE THIS RULE BEFORE v6.0.0
'react/display-name': 'off',
'no-restricted-properties': [
'error',
{
object: 'crypto',
property: 'randomUUID',
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
},
{
object: 'navigator',
property: 'clipboard',
message:
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
},
],
'no-restricted-imports': [
'error',
{
paths: [
{
name: 'lodash-es',
importNames: ['isEqual'],
message: 'Please use objectEquals from @observ33r/object-equals instead.',
},
{
name: 'lodash-es',
message: 'Please use es-toolkit instead.',
},
{
name: 'es-toolkit',
importNames: ['isEqual'],
message: 'Please use objectEquals from @observ33r/object-equals instead.',
},
],
},
],
},
overrides: [
/**
* Overrides for stories
*/
{
files: ['*.stories.tsx'],
rules: {
// We may not have i18n available in stories.
'i18next/no-literal-string': 'off',
},
},
],
};

View File

@@ -44,4 +44,5 @@ yalc.lock
# vitest
tsconfig.vitest-temp.json
coverage/
coverage/
*.tgz

View File

@@ -14,3 +14,4 @@ static/
src/theme/css/overlayscrollbars.css
src/theme_/css/overlayscrollbars.css
pnpm-lock.yaml
.claude

View File

@@ -1,11 +0,0 @@
module.exports = {
...require('@invoke-ai/prettier-config-react'),
overrides: [
{
files: ['public/locales/*.json'],
options: {
tabWidth: 4,
},
},
],
};

View File

@@ -0,0 +1,17 @@
{
"$schema": "http://json.schemastore.org/prettierrc",
"trailingComma": "es5",
"printWidth": 120,
"tabWidth": 2,
"semi": true,
"singleQuote": true,
"endOfLine": "auto",
"overrides": [
{
"files": ["public/locales/*.json"],
"options": {
"tabWidth": 4
}
}
]
}

View File

@@ -1,21 +1,23 @@
import { PropsWithChildren, memo, useEffect } from 'react';
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
import { useAppDispatch } from '../src/app/store/storeHooks';
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
import type { PropsWithChildren } from 'react';
import { memo, useEffect } from 'react';
import { useAppDispatch } from '../src/app/store/storeHooks';
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
/**
* Initializes some state for storybook. Must be in a different component
* so that it is run inside the redux context.
*/
export const ReduxInit = memo((props: PropsWithChildren) => {
export const ReduxInit = memo(({ children }: PropsWithChildren) => {
const dispatch = useAppDispatch();
useGlobalModifiersInit();
useEffect(() => {
dispatch(
modelChanged({ model: { key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' } })
);
}, []);
}, [dispatch]);
return props.children;
return children;
});
ReduxInit.displayName = 'ReduxInit';

View File

@@ -2,19 +2,13 @@ import type { StorybookConfig } from '@storybook/react-vite';
const config: StorybookConfig = {
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
addons: [
'@storybook/addon-links',
'@storybook/addon-essentials',
'@storybook/addon-interactions',
'@storybook/addon-storysource',
],
addons: ['@storybook/addon-links', '@storybook/addon-docs'],
framework: {
name: '@storybook/react-vite',
options: {},
},
docs: {
autodocs: 'tag',
},
core: {
disableTelemetry: true,
},

View File

@@ -1,5 +1,5 @@
import { addons } from '@storybook/manager-api';
import { themes } from '@storybook/theming';
import { addons } from 'storybook/manager-api';
import { themes } from 'storybook/theming';
addons.setConfig({
theme: themes.dark,

View File

@@ -1,17 +1,18 @@
import { Preview } from '@storybook/react';
import { themes } from '@storybook/theming';
import type { Preview } from '@storybook/react-vite';
import { themes } from 'storybook/theming';
import { $store } from 'app/store/nanostores/store';
import i18n from 'i18next';
import { initReactI18next } from 'react-i18next';
import { Provider } from 'react-redux';
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
import { createStore } from '../src/app/store/store';
// TODO: Disabled for IDE performance issues with our translation JSON
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
import translationEN from '../public/locales/en.json';
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
import { createStore } from '../src/app/store/store';
import { ReduxInit } from './ReduxInit';
import { $store } from 'app/store/nanostores/store';
i18n.use(initReactI18next).init({
lng: 'en',
@@ -25,7 +26,7 @@ i18n.use(initReactI18next).init({
returnNull: false,
});
const store = createStore(undefined, false);
const store = createStore();
$store.set(store);
$baseUrl.set('http://localhost:9090');
@@ -46,6 +47,7 @@ const preview: Preview = {
parameters: {
docs: {
theme: themes.dark,
codePanel: true,
},
},
};

View File

@@ -0,0 +1,246 @@
import js from '@eslint/js';
import typescriptEslint from '@typescript-eslint/eslint-plugin';
import typescriptParser from '@typescript-eslint/parser';
import pluginI18Next from 'eslint-plugin-i18next';
import pluginImport from 'eslint-plugin-import';
import pluginPath from 'eslint-plugin-path';
import pluginReact from 'eslint-plugin-react';
import pluginReactHooks from 'eslint-plugin-react-hooks';
import pluginReactRefresh from 'eslint-plugin-react-refresh';
import pluginSimpleImportSort from 'eslint-plugin-simple-import-sort';
import pluginStorybook from 'eslint-plugin-storybook';
import pluginUnusedImports from 'eslint-plugin-unused-imports';
import globals from 'globals';
export default [
js.configs.recommended,
{
languageOptions: {
parser: typescriptParser,
parserOptions: {
ecmaFeatures: {
jsx: true,
},
},
globals: {
...globals.browser,
...globals.node,
GlobalCompositeOperation: 'readonly',
RequestInit: 'readonly',
},
},
files: ['**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx'],
plugins: {
react: pluginReact,
'@typescript-eslint': typescriptEslint,
'react-hooks': pluginReactHooks,
import: pluginImport,
'unused-imports': pluginUnusedImports,
'simple-import-sort': pluginSimpleImportSort,
'react-refresh': pluginReactRefresh.configs.vite,
path: pluginPath,
i18next: pluginI18Next,
storybook: pluginStorybook,
},
rules: {
...typescriptEslint.configs.recommended.rules,
...pluginReact.configs.recommended.rules,
...pluginReact.configs['jsx-runtime'].rules,
...pluginReactHooks.configs.recommended.rules,
...pluginStorybook.configs.recommended.rules,
'react/jsx-no-bind': [
'error',
{
allowBind: true,
},
],
'react/jsx-curly-brace-presence': [
'error',
{
props: 'never',
children: 'never',
},
],
'react-hooks/exhaustive-deps': 'error',
curly: 'error',
'no-var': 'error',
'brace-style': 'error',
'prefer-template': 'error',
radix: 'error',
'space-before-blocks': 'error',
eqeqeq: 'error',
'one-var': ['error', 'never'],
'no-eval': 'error',
'no-extend-native': 'error',
'no-implied-eval': 'error',
'no-label-var': 'error',
'no-return-assign': 'error',
'no-sequences': 'error',
'no-template-curly-in-string': 'error',
'no-throw-literal': 'error',
'no-unmodified-loop-condition': 'error',
'import/no-duplicates': 'error',
'import/prefer-default-export': 'off',
'unused-imports/no-unused-imports': 'error',
'unused-imports/no-unused-vars': [
'error',
{
vars: 'all',
varsIgnorePattern: '^_',
args: 'after-used',
argsIgnorePattern: '^_',
},
],
'simple-import-sort/imports': 'error',
'simple-import-sort/exports': 'error',
'@typescript-eslint/no-unused-vars': 'off',
'@typescript-eslint/ban-ts-comment': [
'error',
{
'ts-expect-error': 'allow-with-description',
'ts-ignore': true,
'ts-nocheck': true,
'ts-check': false,
minimumDescriptionLength: 10,
},
],
'@typescript-eslint/no-empty-interface': [
'error',
{
allowSingleExtends: true,
},
],
'@typescript-eslint/consistent-type-imports': [
'error',
{
prefer: 'type-imports',
fixStyle: 'separate-type-imports',
disallowTypeAnnotations: true,
},
],
'@typescript-eslint/no-import-type-side-effects': 'error',
'@typescript-eslint/consistent-type-assertions': [
'error',
{
assertionStyle: 'as',
},
],
'path/no-relative-imports': [
'error',
{
maxDepth: 0,
},
],
'no-console': 'warn',
'no-promise-executor-return': 'error',
'require-await': 'error',
'no-restricted-syntax': [
'error',
{
selector: 'CallExpression[callee.name="setActiveTab"]',
message:
'setActiveTab() can only be called from use-navigation-api.tsx. Use navigationApi.switchToTab() instead.',
},
],
'no-restricted-properties': [
'error',
{
object: 'crypto',
property: 'randomUUID',
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
},
{
object: 'navigator',
property: 'clipboard',
message:
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
},
],
// Typescript handles this for us: https://eslint.org/docs/latest/rules/no-redeclare#handled_by_typescript
'no-redeclare': 'off',
'no-restricted-imports': [
'error',
{
paths: [
{
name: 'lodash-es',
importNames: ['isEqual'],
message: 'Please use objectEquals from @observ33r/object-equals instead.',
},
{
name: 'lodash-es',
message: 'Please use es-toolkit instead.',
},
{
name: 'es-toolkit',
importNames: ['isEqual'],
message: 'Please use objectEquals from @observ33r/object-equals instead.',
},
{
name: 'zod/v3',
message: 'Import from zod instead.',
},
],
},
],
},
settings: {
react: {
version: 'detect',
},
},
},
{
files: ['**/use-navigation-api.tsx'],
rules: {
'no-restricted-syntax': 'off',
},
},
{
files: ['**/*.stories.tsx'],
rules: {
'i18next/no-literal-string': 'off',
},
},
{
ignores: [
'**/dist/',
'**/static/',
'**/.husky/',
'**/node_modules/',
'**/patches/',
'**/stats.html',
'**/index.html',
'**/.yarn/',
'**/*.scss',
'src/services/api/schema.ts',
'.prettierrc.js',
'.storybook',
],
},
];

View File

@@ -3,8 +3,6 @@ import type { KnipConfig } from 'knip';
const config: KnipConfig = {
project: ['src/**/*.{ts,tsx}!'],
ignore: [
// TODO(psyche): temporarily ignored all files for test build purposes
'src/**',
// This file is only used during debugging
'src/app/store/middleware/debugLoggerMiddleware.ts',
// Autogenerated types - shouldn't ever touch these
@@ -14,12 +12,12 @@ const config: KnipConfig = {
'src/features/parameters/types/parameterSchemas.ts',
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
'src/features/controlLayers/konva/util.ts',
// TODO(psyche): restore HRF functionality?
'src/features/hrf/**',
// This feature is (temprarily?) disabled
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
// Will be using this
'src/common/hooks/useAsyncState.ts',
'src/app/store/use-debounced-app-selector.ts',
],
ignoreBinaries: ['only-allow'],
ignoreDependencies: ['magic-string'],
paths: {
'public/*': ['public/*'],
},

View File

@@ -38,19 +38,6 @@
"test:ui": "vitest --coverage --ui",
"test:no-watch": "vitest --no-watch"
},
"madge": {
"excludeRegExp": [
"^index.ts$"
],
"detectiveOptions": {
"ts": {
"skipTypeImports": true
},
"tsx": {
"skipTypeImports": true
}
}
},
"dependencies": {
"@atlaskit/pragmatic-drag-and-drop": "^1.7.4",
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^2.1.1",
@@ -60,24 +47,25 @@
"@fontsource-variable/inter": "^5.2.6",
"@invoke-ai/ui-library": "^0.0.46",
"@nanostores/react": "^1.0.0",
"@observ33r/object-equals": "^1.1.4",
"@observ33r/object-equals": "^1.1.5",
"@reduxjs/toolkit": "2.8.2",
"@roarr/browser-log-writer": "^1.3.0",
"@xyflow/react": "^12.7.1",
"@xyflow/react": "^12.8.2",
"ag-psd": "^28.2.2",
"async-mutex": "^0.5.0",
"chakra-react-select": "^4.9.2",
"cmdk": "^1.1.1",
"compare-versions": "^6.1.1",
"dockview": "^4.4.0",
"es-toolkit": "^1.39.5",
"dockview": "^4.4.1",
"es-toolkit": "^1.39.7",
"filesize": "^10.1.6",
"fracturedjsonjs": "^4.1.0",
"framer-motion": "^11.10.0",
"i18next": "^25.2.1",
"i18next": "^25.3.2",
"i18next-http-backend": "^3.0.2",
"idb-keyval": "^6.2.2",
"idb-keyval": "6.2.1",
"jsondiffpatch": "^0.7.3",
"konva": "^9.3.20",
"konva": "^9.3.22",
"linkify-react": "^4.3.1",
"linkifyjs": "^4.3.1",
"lru-cache": "^11.1.0",
@@ -95,7 +83,7 @@
"react-dom": "^18.3.1",
"react-dropzone": "^14.3.8",
"react-error-boundary": "^5.0.0",
"react-hook-form": "^7.58.1",
"react-hook-form": "^7.60.0",
"react-hotkeys-hook": "4.5.0",
"react-i18next": "^15.5.3",
"react-icons": "^5.5.0",
@@ -115,7 +103,7 @@
"use-debounce": "^10.0.5",
"use-device-pixel-ratio": "^1.1.2",
"uuid": "^11.1.0",
"zod": "^3.25.67",
"zod": "^4.0.10",
"zod-validation-error": "^3.5.2"
},
"peerDependencies": {
@@ -123,39 +111,44 @@
"react-dom": "^18.2.0"
},
"devDependencies": {
"@invoke-ai/eslint-config-react": "^0.0.14",
"@invoke-ai/prettier-config-react": "^0.0.7",
"@storybook/addon-essentials": "^8.6.12",
"@storybook/addon-interactions": "^8.6.12",
"@storybook/addon-links": "^8.6.12",
"@storybook/addon-storysource": "^8.6.12",
"@storybook/manager-api": "^8.6.12",
"@storybook/react": "^8.6.12",
"@storybook/react-vite": "^8.6.12",
"@storybook/theming": "^8.6.12",
"@eslint/js": "^9.31.0",
"@storybook/addon-docs": "^9.0.17",
"@storybook/addon-links": "^9.0.17",
"@storybook/react-vite": "^9.0.17",
"@types/node": "^22.15.1",
"@types/react": "^18.3.11",
"@types/react-dom": "^18.3.0",
"@types/uuid": "^10.0.0",
"@typescript-eslint/eslint-plugin": "^8.37.0",
"@typescript-eslint/parser": "^8.37.0",
"@vitejs/plugin-react-swc": "^3.9.0",
"@vitest/coverage-v8": "^3.1.2",
"@vitest/ui": "^3.1.2",
"concurrently": "^9.1.2",
"csstype": "^3.1.3",
"dpdm": "^3.14.0",
"eslint": "^8.57.1",
"eslint-plugin-i18next": "^6.1.1",
"eslint-plugin-path": "^1.3.0",
"knip": "^5.50.5",
"eslint": "^9.31.0",
"eslint-plugin-i18next": "^6.1.2",
"eslint-plugin-import": "^2.29.1",
"eslint-plugin-path": "^2.0.3",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^5.2.0",
"eslint-plugin-react-refresh": "^0.4.5",
"eslint-plugin-simple-import-sort": "^12.0.0",
"eslint-plugin-storybook": "^9.0.17",
"eslint-plugin-unused-imports": "^4.1.4",
"globals": "^16.3.0",
"knip": "^5.61.3",
"magic-string": "^0.30.17",
"openapi-types": "^12.1.3",
"openapi-typescript": "^7.6.1",
"prettier": "^3.5.3",
"rollup-plugin-visualizer": "^5.14.0",
"storybook": "^8.6.12",
"rollup-plugin-visualizer": "^6.0.3",
"storybook": "^9.0.17",
"tsafe": "^1.8.5",
"type-fest": "^4.40.0",
"typescript": "^5.8.3",
"vite": "^6.3.3",
"vite": "^7.0.5",
"vite-plugin-css-injected-by-js": "^3.5.2",
"vite-plugin-dts": "^4.5.3",
"vite-plugin-eslint": "^1.8.1",
@@ -163,7 +156,7 @@
"vitest": "^3.1.2"
},
"engines": {
"pnpm": "8"
"pnpm": "10"
},
"packageManager": "pnpm@8.15.9+sha512.499434c9d8fdd1a2794ebf4552b3b25c0a633abcee5bb15e7b5de90f32f47b513aca98cd5cfd001c31f0db454bc3804edccd578501e4ca293a6816166bbd9f81"
"packageManager": "pnpm@10.12.4"
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
onlyBuiltDependencies:
- '@swc/core'
- esbuild

View File

@@ -711,7 +711,8 @@
"gaussianBlur": "Gaußsche Unschärfe",
"sendToUpscale": "An Hochskalieren senden",
"useCpuNoise": "CPU-Rauschen verwenden",
"sendToCanvas": "An Leinwand senden"
"sendToCanvas": "An Leinwand senden",
"disabledNoRasterContent": "Deaktiviert (kein Rasterinhalt)"
},
"settings": {
"displayInProgress": "Zwischenbilder anzeigen",
@@ -789,7 +790,10 @@
"pasteSuccess": "Eingefügt in {{destination}}",
"pasteFailed": "Einfügen fehlgeschlagen",
"unableToCopy": "Kopieren nicht möglich",
"unableToCopyDesc_theseSteps": "diese Schritte"
"unableToCopyDesc_theseSteps": "diese Schritte",
"noRasterLayers": "Keine Rasterebenen gefunden",
"noActiveRasterLayers": "Keine aktiven Rasterebenen",
"noVisibleRasterLayers": "Keine sichtbaren Rasterebenen"
},
"accessibility": {
"uploadImage": "Bild hochladen",
@@ -847,7 +851,10 @@
"assetsWithCount_one": "{{count}} in der Sammlung",
"assetsWithCount_other": "{{count}} in der Sammlung",
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
"updateBoardError": "Fehler beim Aktualisieren des Ordners",
"uncategorizedImages": "Nicht kategorisierte Bilder",
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen",
"deletedImagesCannotBeRestored": "Gelöschte Bilder können nicht wiederhergestellt werden."
},
"queue": {
"status": "Status",
@@ -1194,6 +1201,9 @@
"Die Kantengröße des Kohärenzdurchlaufs."
],
"heading": "Kantengröße"
},
"rasterLayer": {
"heading": "Rasterebene"
}
},
"invocationCache": {
@@ -1431,7 +1441,10 @@
"autoLayout": "Auto Layout",
"copyShareLink": "Teilen-Link kopieren",
"download": "Herunterladen",
"convertGraph": "Graph konvertieren"
"convertGraph": "Graph konvertieren",
"filterByTags": "Nach Tags filtern",
"yourWorkflows": "Ihre Arbeitsabläufe",
"recentlyOpened": "Kürzlich geöffnet"
},
"sdxl": {
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
@@ -1444,12 +1457,19 @@
"prompt": {
"noMatchingTriggers": "Keine passenden Trigger",
"addPromptTrigger": "Prompt-Trigger hinzufügen",
"compatibleEmbeddings": "Kompatible Einbettungen"
"compatibleEmbeddings": "Kompatible Einbettungen",
"replace": "Ersetzen",
"insert": "Einfügen",
"discard": "Verwerfen",
"generateFromImage": "Prompt aus Bild generieren",
"expandCurrentPrompt": "Aktuelle Prompt erweitern",
"uploadImageForPromptGeneration": "Bild zur Prompt-Generierung hochladen",
"expandingPrompt": "Prompt wird erweitert...",
"resultTitle": "Prompt-Erweiterung abgeschlossen"
},
"ui": {
"tabs": {
"queue": "Warteschlange",
"generation": "Erzeugung",
"gallery": "Galerie",
"models": "Modelle",
"upscaling": "Hochskalierung",
@@ -1573,30 +1593,30 @@
"newGlobalReferenceImage": "Neues globales Referenzbild",
"newRegionalReferenceImage": "Neues regionales Referenzbild",
"newControlLayer": "Neue Kontroll-Ebene",
"newRasterLayer": "Neue Raster-Ebene"
"newRasterLayer": "Neue Rasterebene"
},
"rectangle": "Rechteck",
"saveCanvasToGallery": "Leinwand in Galerie speichern",
"newRasterLayerError": "Problem beim Erstellen einer Raster-Ebene",
"newRasterLayerError": "Problem beim Erstellen einer Rasterebene",
"saveLayerToAssets": "Ebene in Galerie speichern",
"deleteReferenceImage": "Referenzbild löschen",
"referenceImage": "Referenzbild",
"opacity": "Opazität",
"removeBookmark": "Lesezeichen entfernen",
"rasterLayer": "Raster-Ebene",
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
"rasterLayer": "Rasterebene",
"rasterLayers_withCount_visible": "Rasterebenen ({{count}})",
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
"deleteSelected": "Ausgewählte löschen",
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
"newControlLayerOk": "Kontroll-Ebene erstellt",
"newControlLayerError": "Problem beim Erstellen einer Kontroll-Ebene",
"newRasterLayerOk": "Raster-Layer erstellt",
"newRasterLayerOk": "Rasterebene erstellt",
"moveToFront": "Nach vorne bringen",
"copyToClipboard": "In die Zwischenablage kopieren",
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
"clearCaches": "Cache leeren",
"controlLayer": "Kontroll-Ebene",
"rasterLayers_withCount_hidden": "Raster-Ebenen ({{count}} ausgeblendet)",
"rasterLayers_withCount_hidden": "Rasterebenen ({{count}} ausgeblendet)",
"transparency": "Transparenz",
"canvas": "Leinwand",
"global": "Global",
@@ -1682,7 +1702,14 @@
"filterType": "Filtertyp",
"filter": "Filter"
},
"bookmark": "Lesezeichen für Schnell-Umschalten"
"bookmark": "Lesezeichen für Schnell-Umschalten",
"asRasterLayer": "Als $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "Als $t(controlLayers.rasterLayer) (Größe anpassen)",
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
"rasterLayer_withCount_other": "Rasterebenen",
"newRasterLayer": "Neue $t(controlLayers.rasterLayer)",
"showNonRasterLayers": "Nicht-Rasterebenen anzeigen (Umschalt+H)",
"hideNonRasterLayers": "Nicht-Rasterebenen ausblenden (Umschalt+H)"
},
"upsell": {
"shareAccess": "Zugang teilen",

View File

@@ -225,7 +225,16 @@
"prompt": {
"addPromptTrigger": "Add Prompt Trigger",
"compatibleEmbeddings": "Compatible Embeddings",
"noMatchingTriggers": "No matching triggers"
"noMatchingTriggers": "No matching triggers",
"generateFromImage": "Generate prompt from image",
"expandCurrentPrompt": "Expand Current Prompt",
"uploadImageForPromptGeneration": "Upload Image for Prompt Generation",
"expandingPrompt": "Expanding prompt...",
"resultTitle": "Prompt Expansion Complete",
"resultSubtitle": "Choose how to handle the expanded prompt:",
"replace": "Replace",
"insert": "Insert",
"discard": "Discard"
},
"queue": {
"queue": "Queue",
@@ -244,6 +253,7 @@
"cancel": "Cancel",
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
"cancelAllExceptCurrent": "Cancel All Except Current",
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
"cancelTooltip": "Cancel Current Item",
"cancelSucceeded": "Item Canceled",
@@ -264,7 +274,7 @@
"retryItem": "Retry Item",
"cancelBatchSucceeded": "Batch Canceled",
"cancelBatchFailed": "Problem Canceling Batch",
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled and the Canvas Staging Area will be reset.",
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
"current": "Current",
"next": "Next",
@@ -335,14 +345,14 @@
"images": "Images",
"assets": "Assets",
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
"assetsTab": "Files youve uploaded for use in your projects.",
"assetsTab": "Files you've uploaded for use in your projects.",
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
"autoSwitchNewImages": "Auto-Switch to New Images",
"boardsSettings": "Boards Settings",
"copy": "Copy",
"currentlyInUse": "This image is currently in use in the following features:",
"drop": "Drop",
"dropOrUpload": "$t(gallery.drop) or Upload",
"dropOrUpload": "Drop or Upload",
"dropToUpload": "$t(gallery.drop) to Upload",
"deleteImage_one": "Delete Image",
"deleteImage_other": "Delete {{count}} Images",
@@ -357,7 +367,7 @@
"gallerySettings": "Gallery Settings",
"go": "Go",
"image": "image",
"imagesTab": "Images youve created and saved within Invoke.",
"imagesTab": "Images you've created and saved within Invoke.",
"imagesSettings": "Gallery Images Settings",
"jump": "Jump",
"loading": "Loading",
@@ -396,7 +406,8 @@
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit.",
"openViewer": "Open Viewer",
"closeViewer": "Close Viewer",
"move": "Move"
"move": "Move",
"useForPromptGeneration": "Use for Prompt Generation"
},
"hotkeys": {
"hotkeys": "Hotkeys",
@@ -460,6 +471,11 @@
"togglePanels": {
"title": "Toggle Panels",
"desc": "Show or hide both left and right panels at once."
},
"selectGenerateTab": {
"title": "Select the Generate Tab",
"desc": "Selects the Generate tab.",
"key": "1"
}
},
"canvas": {
@@ -564,6 +580,10 @@
"title": "Transform",
"desc": "Transform the selected layer."
},
"invertMask": {
"title": "Invert Mask",
"desc": "Invert the selected inpaint mask, creating a new mask with opposite transparency."
},
"applyFilter": {
"title": "Apply Filter",
"desc": "Apply the pending filter to the selected layer."
@@ -579,6 +599,34 @@
"cancelTransform": {
"title": "Cancel Transform",
"desc": "Cancel the pending transform."
},
"settings": {
"behavior": "Behavior",
"display": "Display",
"grid": "Grid",
"debug": "Debug"
},
"toggleNonRasterLayers": {
"title": "Toggle Non-Raster Layers",
"desc": "Show or hide all non-raster layer categories (Control Layers, Inpaint Masks, Regional Guidance)."
},
"fitBboxToLayers": {
"title": "Fit Bbox To Layers",
"desc": "Automatically adjust the generation bounding box to fit visible layers"
},
"fitBboxToMasks": {
"title": "Fit Bbox To Masks",
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
},
"applySegmentAnything": {
"title": "Apply Segment Anything",
"desc": "Apply the current Segment Anything mask.",
"key": "enter"
},
"cancelSegmentAnything": {
"title": "Cancel Segment Anything",
"desc": "Cancel the current Segment Anything operation.",
"key": "esc"
}
},
"workflows": {
@@ -708,6 +756,10 @@
"deleteSelection": {
"title": "Delete",
"desc": "Delete all selected images. By default, you will be prompted to confirm deletion. If the images are currently in use in the app, you will be warned."
},
"starImage": {
"title": "Star/Unstar Image",
"desc": "Star or unstar the selected image."
}
}
},
@@ -742,7 +794,7 @@
"vae": "VAE",
"width": "Width",
"workflow": "Workflow",
"canvasV2Metadata": "Canvas"
"canvasV2Metadata": "Canvas Layers"
},
"modelManager": {
"active": "active",
@@ -763,7 +815,7 @@
"convertToDiffusers": "Convert To Diffusers",
"convertToDiffusersHelpText1": "This model will be converted to the 🧨 Diffusers format.",
"convertToDiffusersHelpText2": "This process will replace your Model Manager entry with the Diffusers version of the same model.",
"convertToDiffusersHelpText3": "Your checkpoint file on disk WILL be deleted if it is in InvokeAI root folder. If it is in a custom location, then it WILL NOT be deleted.",
"convertToDiffusersHelpText3": "Your checkpoint file on disk WILL be deleted if it is in the InvokeAI root folder. If it is in a custom location, then it WILL NOT be deleted.",
"convertToDiffusersHelpText4": "This is a one time process only. It might take around 30s-60s depending on the specifications of your computer.",
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 2GB-7GB in size.",
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
@@ -806,7 +858,11 @@
"urlUnauthorizedErrorMessage": "You may need to configure an API token to access this model.",
"urlUnauthorizedErrorMessage2": "Learn how here.",
"imageEncoderModelId": "Image Encoder Model ID",
"includesNModels": "Includes {{n}} models and their dependencies",
"installedModelsCount": "{{installed}} of {{total}} models installed.",
"includesNModels": "Includes {{n}} models and their dependencies.",
"allNModelsInstalled": "All {{count}} models installed",
"nToInstall": "{{count}} to install",
"nAlreadyInstalled": "{{count}} already installed",
"installQueue": "Install Queue",
"inplaceInstall": "In-place install",
"inplaceInstallDesc": "Install models without copying the files. When using the model, it will be loaded from its this location. If disabled, the model file(s) will be copied into the Invoke-managed models directory during installation.",
@@ -869,6 +925,25 @@
"starterBundleHelpText": "Easily install all models needed to get started with a base model, including a main model, controlnets, IP adapters, and more. Selecting a bundle will skip any models that you already have installed.",
"starterModels": "Starter Models",
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
"bundleAlreadyInstalled": "Bundle already installed",
"bundleAlreadyInstalledDesc": "All models in the {{bundleName}} bundle are already installed.",
"launchpadTab": "Launchpad",
"launchpad": {
"welcome": "Welcome to Model Management",
"description": "Invoke requires models to be installed to utilize most features of the platform. Choose from manual installation options or explore curated starter models.",
"manualInstall": "Manual Installation",
"urlDescription": "Install models from a URL or local file path. Perfect for specific models you want to add.",
"huggingFaceDescription": "Browse and install models directly from HuggingFace repositories.",
"scanFolderDescription": "Scan a local folder to automatically detect and install models.",
"recommendedModels": "Recommended Models",
"exploreStarter": "Or browse all available starter models",
"quickStart": "Quick Start Bundles",
"bundleDescription": "Each bundle includes essential models for each model family and curated base models to get started.",
"browseAll": "Or browse all available models:",
"stableDiffusion15": "Stable Diffusion 1.5",
"sdxl": "SDXL",
"fluxDev": "FLUX.1 dev"
},
"controlLora": "Control LoRA",
"llavaOnevision": "LLaVA OneVision",
"syncModels": "Sync Models",
@@ -905,7 +980,8 @@
"selectModel": "Select a Model",
"noLoRAsInstalled": "No LoRAs installed",
"noRefinerModelsInstalled": "No SDXL Refiner models installed",
"defaultVAE": "Default VAE"
"defaultVAE": "Default VAE",
"noCompatibleLoRAs": "No Compatible LoRAs"
},
"nodes": {
"arithmeticSequence": "Arithmetic Sequence",
@@ -1081,7 +1157,23 @@
"addItem": "Add Item",
"generateValues": "Generate Values",
"floatRangeGenerator": "Float Range Generator",
"integerRangeGenerator": "Integer Range Generator"
"integerRangeGenerator": "Integer Range Generator",
"layout": {
"autoLayout": "Auto Layout",
"layeringStrategy": "Layering Strategy",
"networkSimplex": "Network Simplex",
"longestPath": "Longest Path",
"nodeSpacing": "Node Spacing",
"layerSpacing": "Layer Spacing",
"layoutDirection": "Layout Direction",
"layoutDirectionRight": "Right",
"layoutDirectionDown": "Down",
"alignment": "Node Alignment",
"alignmentUL": "Top Left",
"alignmentDL": "Bottom Left",
"alignmentUR": "Top Right",
"alignmentDR": "Bottom Right"
}
},
"parameters": {
"aspect": "Aspect",
@@ -1147,7 +1239,7 @@
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with FLUX Kontext via BFL API",
"canvasIsFiltering": "Canvas is busy (filtering)",
"canvasIsTransforming": "Canvas is busy (transforming)",
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
@@ -1155,7 +1247,9 @@
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
"noPrompts": "No prompts generated",
"noNodesInGraph": "No nodes in graph",
"systemDisconnected": "System disconnected"
"systemDisconnected": "System disconnected",
"promptExpansionPending": "Prompt expansion in progress",
"promptExpansionResultPending": "Please accept or discard your prompt expansion result"
},
"maskBlur": "Mask Blur",
"negativePromptPlaceholder": "Negative Prompt",
@@ -1313,6 +1407,21 @@
"problemCopyingLayer": "Unable to Copy Layer",
"problemSavingLayer": "Unable to Save Layer",
"problemDownloadingImage": "Unable to Download Image",
"noRasterLayers": "No Raster Layers Found",
"noRasterLayersDesc": "Create at least one raster layer to export to PSD",
"noActiveRasterLayers": "No Active Raster Layers",
"noActiveRasterLayersDesc": "Enable at least one raster layer to export to PSD",
"noVisibleRasterLayers": "No Visible Raster Layers",
"noVisibleRasterLayersDesc": "Enable at least one raster layer to export to PSD",
"invalidCanvasDimensions": "Invalid Canvas Dimensions",
"canvasTooLarge": "Canvas Too Large",
"canvasTooLargeDesc": "Canvas dimensions exceed the maximum allowed size for PSD export. Reduce the total width and height of the canvas of the canvas and try again.",
"failedToProcessLayers": "Failed to Process Layers",
"psdExportSuccess": "PSD Export Complete",
"psdExportSuccessDesc": "Successfully exported {{count}} layers to PSD file",
"problemExportingPSD": "Problem Exporting PSD",
"canvasManagerNotAvailable": "Canvas Manager Not Available",
"noValidLayerAdapters": "No Valid Layer Adapters Found",
"pasteSuccess": "Pasted to {{destination}}",
"pasteFailed": "Paste Failed",
"prunedQueue": "Pruned Queue",
@@ -1338,10 +1447,23 @@
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.",
"imagenIncompatibleGenerationMode": "Google {{model}} supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supports Text to Image and Image to Image only. Use other models Inpainting and Outpainting tasks.",
"fluxKontextIncompatibleGenerationMode": "Flux Kontext supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext does not support generation from images placed on the canvas. Re-try using the Reference Image section and disable any Raster Layers.",
"problemUnpublishingWorkflow": "Problem Unpublishing Workflow",
"problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.",
"workflowUnpublished": "Workflow Unpublished"
"workflowUnpublished": "Workflow Unpublished",
"sentToCanvas": "Sent to Canvas",
"sentToUpscale": "Sent to Upscale",
"promptGenerationStarted": "Prompt generation started",
"uploadAndPromptGenerationFailed": "Failed to upload image and generate prompt",
"promptExpansionFailed": "We ran into an issue. Please try prompt expansion again.",
"maskInverted": "Mask Inverted",
"maskInvertFailed": "Failed to Invert Mask",
"noVisibleMasks": "No Visible Masks",
"noVisibleMasksDesc": "Create or enable at least one inpaint mask to invert",
"noInpaintMaskSelected": "No Inpaint Mask Selected",
"noInpaintMaskSelectedDesc": "Select an inpaint mask to invert",
"invalidBbox": "Invalid Bounding Box",
"invalidBboxDesc": "The bounding box has no valid dimensions"
},
"popovers": {
"clipSkip": {
@@ -1709,6 +1831,20 @@
"Structure controls how closely the output image will keep to the layout of the original. Low structure allows major changes, while high structure strictly maintains the original composition and layout."
]
},
"tileSize": {
"heading": "Tile Size",
"paragraphs": [
"Controls the size of tiles used during the upscaling process. Larger tiles use more memory but may produce better results.",
"SD1.5 models default to 768, while SDXL models default to 1024. Reduce tile size if you encounter memory issues."
]
},
"tileOverlap": {
"heading": "Tile Overlap",
"paragraphs": [
"Controls the overlap between adjacent tiles during upscaling. Higher overlap values help reduce visible seams between tiles but use more memory.",
"The default value of 128 works well for most cases, but you can adjust based on your specific needs and memory constraints."
]
},
"fluxDevLicense": {
"heading": "Non-Commercial License",
"paragraphs": [
@@ -1860,10 +1996,12 @@
"canvas": "Canvas",
"bookmark": "Bookmark for Quick Switch",
"fitBboxToLayers": "Fit Bbox To Layers",
"fitBboxToMasks": "Fit Bbox To Masks",
"removeBookmark": "Remove Bookmark",
"saveCanvasToGallery": "Save Canvas to Gallery",
"saveBboxToGallery": "Save Bbox to Gallery",
"saveLayerToAssets": "Save Layer to Assets",
"exportCanvasToPSD": "Export Canvas to PSD",
"cropLayerToBbox": "Crop Layer to Bbox",
"savedToGalleryOk": "Saved to Gallery",
"savedToGalleryError": "Error saving to gallery",
@@ -1889,6 +2027,7 @@
"mergingLayers": "Merging layers",
"clearHistory": "Clear History",
"bboxOverlay": "Show Bbox Overlay",
"ruleOfThirds": "Show Rule of Thirds",
"newSession": "New Session",
"clearCaches": "Clear Caches",
"recalculateRects": "Recalculate Rects",
@@ -1922,6 +2061,7 @@
"rasterLayer": "Raster Layer",
"controlLayer": "Control Layer",
"inpaintMask": "Inpaint Mask",
"invertMask": "Invert Mask",
"regionalGuidance": "Regional Guidance",
"referenceImageRegional": "Reference Image (Regional)",
"referenceImageGlobal": "Reference Image (Global)",
@@ -1930,6 +2070,8 @@
"asControlLayer": "As $t(controlLayers.controlLayer)",
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
"referenceImage": "Reference Image",
"maxRefImages": "Max Ref Images",
"useAsReferenceImage": "Use as Reference Image",
"regionalReferenceImage": "Regional Reference Image",
"globalReferenceImage": "Global Reference Image",
"sendingToCanvas": "Staging Generations on Canvas",
@@ -1994,6 +2136,8 @@
"disableTransparencyEffect": "Disable Transparency Effect",
"hidingType": "Hiding {{type}}",
"showingType": "Showing {{type}}",
"showNonRasterLayers": "Show Non-Raster Layers (Shift+H)",
"hideNonRasterLayers": "Hide Non-Raster Layers (Shift+H)",
"dynamicGrid": "Dynamic Grid",
"logDebugInfo": "Log Debug Info",
"locked": "Locked",
@@ -2016,9 +2160,9 @@
"resetCanvasLayers": "Reset Canvas Layers",
"resetGenerationSettings": "Reset Generation Settings",
"replaceCurrent": "Replace Current",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this Reference Image to get started.",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the gallery onto this Reference Image to get started.",
"uploadOrDragAnImage": "Drag an image from the gallery or <UploadButton>upload an image</UploadButton>.",
"imageNoise": "Image Noise",
"denoiseLimit": "Denoise Limit",
@@ -2260,6 +2404,10 @@
"label": "Preserve Masked Region",
"alert": "Preserving Masked Region"
},
"saveAllImagesToGallery": {
"label": "Send New Generations to Gallery",
"alert": "Sending new generations to Gallery, bypassing Canvas"
},
"isolatedStagingPreview": "Isolated Staging Preview",
"isolatedPreview": "Isolated Preview",
"isolatedLayerPreview": "Isolated Layer Preview",
@@ -2288,6 +2436,7 @@
"newGlobalReferenceImage": "New Global Reference Image",
"newRegionalReferenceImage": "New Regional Reference Image",
"newControlLayer": "New Control Layer",
"newResizedControlLayer": "New Resized Control Layer",
"newRasterLayer": "New Raster Layer",
"newInpaintMask": "New Inpaint Mask",
"newRegionalGuidance": "New Regional Guidance",
@@ -2305,6 +2454,11 @@
"saveToGallery": "Save To Gallery",
"showResultsOn": "Showing Results",
"showResultsOff": "Hiding Results"
},
"autoSwitch": {
"off": "Off",
"switchOnStart": "On Start",
"switchOnFinish": "On Finish"
}
},
"upscaling": {
@@ -2316,6 +2470,9 @@
"upscaleModel": "Upscale Model",
"postProcessingModel": "Post-Processing Model",
"scale": "Scale",
"tileControl": "Tile Control",
"tileSize": "Tile Size",
"tileOverlap": "Tile Overlap",
"postProcessingMissingModelWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install a post-processing (image to image) model.",
"missingModelsWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install the required models:",
"mainModelDesc": "Main model (SD1.5 or SDXL architecture)",
@@ -2371,7 +2528,8 @@
"uploadImage": "Upload Image",
"useForTemplate": "Use For Prompt Template",
"viewList": "View Template List",
"viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box."
"viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box.",
"togglePromptPreviews": "Toggle Prompt Previews"
},
"upsell": {
"inviteTeammates": "Invite Teammates",
@@ -2381,7 +2539,7 @@
},
"ui": {
"tabs": {
"generation": "Generation",
"generate": "Generate",
"canvas": "Canvas",
"workflows": "Workflows",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
@@ -2391,6 +2549,90 @@
"upscaling": "Upscaling",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"gallery": "Gallery"
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Workflow Editor",
"imageViewer": "Image Viewer",
"canvas": "Canvas"
},
"launchpad": {
"workflowsTitle": "Go deep with Workflows.",
"upscalingTitle": "Upscale and add detail.",
"canvasTitle": "Edit and refine on Canvas.",
"generateTitle": "Generate images from text prompts.",
"modelGuideText": "Want to learn what prompts work best for each model?",
"modelGuideLink": "Check out our Model Guide.",
"createNewWorkflowFromScratch": "Create a new Workflow from scratch",
"browseAndLoadWorkflows": "Browse and load existing workflows",
"addStyleRef": {
"title": "Add a Style Reference",
"description": "Add an image to transfer its look."
},
"editImage": {
"title": "Edit Image",
"description": "Add an image to refine."
},
"generateFromText": {
"title": "Generate from Text",
"description": "Enter a prompt and Invoke."
},
"useALayoutImage": {
"title": "Use a Layout Image",
"description": "Add an image to control composition."
},
"generate": {
"canvasCalloutTitle": "Looking to get more control, edit, and iterate on your images?",
"canvasCalloutLink": "Navigate to Canvas for more capabilities."
},
"workflows": {
"description": "Workflows are reusable templates that automate image generation tasks, allowing you to quickly perform complex operations and get consistent results.",
"learnMoreLink": "Learn more about creating workflows",
"browseTemplates": {
"title": "Browse Workflow Templates",
"description": "Choose from pre-built workflows for common tasks"
},
"createNew": {
"title": "Create a new Workflow",
"description": "Start a new workflow from scratch"
},
"loadFromFile": {
"title": "Load workflow from file",
"description": "Upload a workflow to start with an existing setup"
}
},
"upscaling": {
"uploadImage": {
"title": "Upload Image to Upscale",
"description": "Click or drag an image to upscale (JPG, PNG, WebP up to 100MB)"
},
"replaceImage": {
"title": "Replace Current Image",
"description": "Click or drag a new image to replace the current one"
},
"imageReady": {
"title": "Image Ready",
"description": "Press Invoke to begin upscaling"
},
"readyToUpscale": {
"title": "Ready to upscale!",
"description": "Configure your settings below, then click the Invoke button to begin upscaling your image."
},
"upscaleModel": "Upscale Model",
"model": "Model",
"scale": "Scale",
"creativityAndStructure": {
"title": "Creativity & Structure Defaults",
"conservative": "Conservative",
"balanced": "Balanced",
"creative": "Creative",
"artistic": "Artistic"
},
"helpText": {
"promptAdvice": "When upscaling, use a prompt that describes the medium and style. Avoid describing specific content details in the image.",
"styleAdvice": "Upscaling works best with the general style of your image."
}
}
}
},
"system": {
@@ -2430,8 +2672,8 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"Inpainting: Per-mask noise levels and denoise limits.",
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
"Studio state is saved to the server, allowing you to continue your work on any device.",
"Support for multiple reference images for FLUX Kontext (local model only)."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",
@@ -2440,62 +2682,16 @@
"supportVideos": {
"supportVideos": "Support Videos",
"gettingStarted": "Getting Started",
"controlCanvas": "Control Canvas",
"watch": "Watch",
"studioSessionsDesc1": "Check out the <StudioSessionsPlaylistLink /> for Invoke deep dives.",
"studioSessionsDesc2": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
"studioSessionsDesc": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
"videos": {
"creatingYourFirstImage": {
"title": "Creating Your First Image",
"description": "Introduction to creating an image from scratch using Invoke's tools."
"gettingStarted": {
"title": "Getting Started with Invoke",
"description": "Complete video series covering everything you need to know to get started with Invoke, from creating your first image to advanced techniques."
},
"usingControlLayersAndReferenceGuides": {
"title": "Using Control Layers and Reference Guides",
"description": "Learn how to guide your image creation with control layers and reference images."
},
"understandingImageToImageAndDenoising": {
"title": "Understanding Image-to-Image and Denoising",
"description": "Overview of image-to-image transformations and denoising in Invoke."
},
"exploringAIModelsAndConceptAdapters": {
"title": "Exploring AI Models and Concept Adapters",
"description": "Dive into AI models and how to use concept adapters for creative control."
},
"creatingAndComposingOnInvokesControlCanvas": {
"title": "Creating and Composing on Invoke's Control Canvas",
"description": "Learn to compose images using Invoke's control canvas."
},
"upscaling": {
"title": "Upscaling",
"description": "How to upscale images with Invoke's tools to enhance resolution."
},
"howDoIGenerateAndSaveToTheGallery": {
"title": "How Do I Generate and Save to the Gallery?",
"description": "Steps to generate and save images to the gallery."
},
"howDoIEditOnTheCanvas": {
"title": "How Do I Edit on the Canvas?",
"description": "Guide to editing images directly on the canvas."
},
"howDoIDoImageToImageTransformation": {
"title": "How Do I Do Image-to-Image Transformation?",
"description": "Tutorial on performing image-to-image transformations in Invoke."
},
"howDoIUseControlNetsAndControlLayers": {
"title": "How Do I Use Control Nets and Control Layers?",
"description": "Learn to apply control layers and controlnets to your images."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"title": "How Do I Use Global IP Adapters and Reference Images?",
"description": "Introduction to adding reference images and global IP adapters."
},
"howDoIUseInpaintMasks": {
"title": "How Do I Use Inpaint Masks?",
"description": "How to apply inpaint masks for image correction and variation."
},
"howDoIOutpaint": {
"title": "How Do I Outpaint?",
"description": "Guide to outpainting beyond the original image borders."
"studioSessions": {
"title": "Studio Sessions",
"description": "Deep dive sessions exploring advanced Invoke features, creative workflows, and community discussions."
}
}
}

View File

@@ -399,7 +399,6 @@
"ui": {
"tabs": {
"canvas": "Lienzo",
"generation": "Generación",
"queue": "Cola",
"workflows": "Flujos de trabajo",
"models": "Modelos",

View File

@@ -1820,7 +1820,6 @@
"upscaling": "Agrandissement",
"gallery": "Galerie",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"generation": "Génération",
"workflows": "Workflows",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
"models": "Modèles",
@@ -2375,65 +2374,8 @@
},
"supportVideos": {
"watch": "Regarder",
"videos": {
"upscaling": {
"description": "Comment améliorer la résolution des images avec les outils d'Invoke pour les agrandir.",
"title": "Upscaling"
},
"howDoIGenerateAndSaveToTheGallery": {
"description": "Étapes pour générer et enregistrer des images dans la galerie.",
"title": "Comment générer et enregistrer dans la galerie?"
},
"usingControlLayersAndReferenceGuides": {
"title": "Utilisation des couche de contrôle et des guides de référence",
"description": "Apprenez à guider la création de vos images avec des couche de contrôle et des images de référence."
},
"exploringAIModelsAndConceptAdapters": {
"description": "Plongez dans les modèles d'IA et découvrez comment utiliser les adaptateurs de concepts pour un contrôle créatif.",
"title": "Exploration des modèles d'IA et des adaptateurs de concepts"
},
"howDoIUseControlNetsAndControlLayers": {
"title": "Comment utiliser les réseaux de contrôle et les couches de contrôle?",
"description": "Apprenez à appliquer des couches de contrôle et des ControlNets à vos images."
},
"creatingAndComposingOnInvokesControlCanvas": {
"description": "Apprenez à composer des images en utilisant le canvas de contrôle d'Invoke.",
"title": "Créer et composer sur le canvas de contrôle d'Invoke"
},
"howDoIEditOnTheCanvas": {
"title": "Comment puis-je modifier sur la toile?",
"description": "Guide pour éditer des images directement sur la toile."
},
"howDoIDoImageToImageTransformation": {
"title": "Comment effectuer une transformation d'image à image?",
"description": "Tutoriel sur la réalisation de transformations d'image à image dans Invoke."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"title": "Comment utiliser les IP Adapters globaux et les images de référence?",
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
},
"howDoIUseInpaintMasks": {
"title": "Comment utiliser les masques d'inpainting?",
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
},
"creatingYourFirstImage": {
"title": "Créer votre première image",
"description": "Introduction à la création d'une image à partir de zéro en utilisant les outils d'Invoke."
},
"understandingImageToImageAndDenoising": {
"title": "Comprendre l'Image-à-Image et le Débruitage",
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
},
"howDoIOutpaint": {
"title": "Comment effectuer un outpainting?",
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
}
},
"gettingStarted": "Commencer",
"studioSessionsDesc1": "Consultez le <StudioSessionsPlaylistLink /> pour des approfondissements sur Invoke.",
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
"supportVideos": "Vidéos d'assistance",
"controlCanvas": "Contrôler la toile"
"supportVideos": "Vidéos d'assistance"
},
"modelCache": {
"clear": "Effacer le cache du modèle",

View File

@@ -152,7 +152,7 @@
"image": "immagine",
"drop": "Rilascia",
"unstarImage": "Rimuovi contrassegno immagine",
"dropOrUpload": "$t(gallery.drop) o carica",
"dropOrUpload": "Rilascia o carica",
"starImage": "Contrassegna l'immagine",
"dropToUpload": "$t(gallery.drop) per aggiornare",
"bulkDownloadRequested": "Preparazione del download",
@@ -197,7 +197,8 @@
"boardsSettings": "Impostazioni Bacheche",
"imagesSettings": "Impostazioni Immagini Galleria",
"assets": "Risorse",
"images": "Immagini"
"images": "Immagini",
"useForPromptGeneration": "Usa per generare il prompt"
},
"hotkeys": {
"searchHotkeys": "Cerca tasti di scelta rapida",
@@ -253,12 +254,16 @@
"desc": "Attiva/disattiva il pannello destro."
},
"resetPanelLayout": {
"title": "Ripristina il layout del pannello",
"desc": "Ripristina le dimensioni e il layout predefiniti dei pannelli sinistro e destro."
"title": "Ripristina lo schema del pannello",
"desc": "Ripristina le dimensioni e lo schema predefiniti dei pannelli sinistro e destro."
},
"togglePanels": {
"title": "Attiva/disattiva i pannelli",
"desc": "Mostra o nascondi contemporaneamente i pannelli sinistro e destro."
},
"selectGenerateTab": {
"title": "Seleziona la scheda Genera",
"desc": "Seleziona la scheda Genera."
}
},
"hotkeys": "Tasti di scelta rapida",
@@ -379,6 +384,32 @@
"applyTransform": {
"title": "Applica trasformazione",
"desc": "Applica la trasformazione in sospeso al livello selezionato."
},
"toggleNonRasterLayers": {
"desc": "Mostra o nascondi tutte le categorie di livelli non raster (Livelli di controllo, Maschere di Inpaint, Guida regionale).",
"title": "Attiva/disattiva livelli non raster"
},
"settings": {
"behavior": "Comportamento",
"display": "Mostra",
"grid": "Griglia"
},
"invertMask": {
"title": "Inverti maschera",
"desc": "Inverte la maschera di inpaint selezionata, creando una nuova maschera con trasparenza opposta."
},
"fitBboxToMasks": {
"title": "Adatta il riquadro di delimitazione alle maschere",
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo alle maschere di inpaint visibili"
},
"applySegmentAnything": {
"title": "Applica Segment Anything",
"desc": "Applica la maschera Segment Anything corrente.",
"key": "invio"
},
"cancelSegmentAnything": {
"title": "Annulla Segment Anything",
"desc": "Annulla l'operazione Segment Anything corrente."
}
},
"workflows": {
@@ -508,6 +539,10 @@
"galleryNavUpAlt": {
"desc": "Uguale a Naviga verso l'alto, ma seleziona l'immagine da confrontare, aprendo la modalità di confronto se non è già aperta.",
"title": "Naviga verso l'alto (Confronta immagine)"
},
"starImage": {
"desc": "Aggiungi/Rimuovi contrassegno all'immagine selezionata.",
"title": "Aggiungi / Rimuovi contrassegno immagine"
}
}
},
@@ -623,7 +658,7 @@
"installingXModels_one": "Installazione di {{count}} modello",
"installingXModels_many": "Installazione di {{count}} modelli",
"installingXModels_other": "Installazione di {{count}} modelli",
"includesNModels": "Include {{n}} modelli e le loro dipendenze",
"includesNModels": "Include {{n}} modelli e le loro dipendenze.",
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
@@ -656,7 +691,27 @@
"manageModels": "Gestione modelli",
"hfTokenReset": "Ripristino del gettone HF",
"relatedModels": "Modelli correlati",
"showOnlyRelatedModels": "Correlati"
"showOnlyRelatedModels": "Correlati",
"installedModelsCount": "{{installed}} di {{total}} modelli installati.",
"allNModelsInstalled": "Tutti i {{count}} modelli installati",
"nToInstall": "{{count}} da installare",
"nAlreadyInstalled": "{{count}} già installati",
"bundleAlreadyInstalled": "Pacchetto già installato",
"bundleAlreadyInstalledDesc": "Tutti i modelli nel pacchetto {{bundleName}} sono già installati.",
"launchpad": {
"description": "Per utilizzare la maggior parte delle funzionalità della piattaforma, Invoke richiede l'installazione di modelli. Scegli tra le opzioni di installazione manuale o esplora i modelli di avvio selezionati.",
"manualInstall": "Installazione manuale",
"urlDescription": "Installa i modelli da un URL o da un percorso file locale. Perfetto per modelli specifici che desideri aggiungere.",
"huggingFaceDescription": "Esplora e installa i modelli direttamente dai repository di HuggingFace.",
"scanFolderDescription": "Esegui la scansione di una cartella locale per rilevare e installare automaticamente i modelli.",
"recommendedModels": "Modelli consigliati",
"exploreStarter": "Oppure sfoglia tutti i modelli iniziali disponibili",
"welcome": "Benvenuti in Gestione Modelli",
"quickStart": "Pacchetti di avvio rapido",
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
"browseAll": "Oppure scopri tutti i modelli disponibili:"
},
"launchpadTab": "Rampa di lancio"
},
"parameters": {
"images": "Immagini",
@@ -742,7 +797,10 @@
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade."
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con FLUX Kontext tramite BFL API",
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
"promptExpansionPending": "Espansione del prompt in corso"
},
"useCpuNoise": "Usa la CPU per generare rumore",
"iterations": "Iterazioni",
@@ -884,7 +942,34 @@
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
"workflowUnpublished": "Flusso di lavoro non pubblicato",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
"noRasterLayers": "Nessun livello raster trovato",
"noRasterLayersDesc": "Crea almeno un livello raster da esportare in PSD",
"noActiveRasterLayers": "Nessun livello raster attivo",
"noActiveRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
"noVisibleRasterLayers": "Nessun livello raster visibile",
"noVisibleRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
"invalidCanvasDimensions": "Dimensioni della tela non valide",
"canvasTooLarge": "Tela troppo grande",
"canvasTooLargeDesc": "Le dimensioni della tela superano le dimensioni massime consentite per l'esportazione in formato PSD. Riduci la larghezza e l'altezza totali della tela e riprova.",
"failedToProcessLayers": "Impossibile elaborare i livelli",
"psdExportSuccess": "Esportazione PSD completata",
"psdExportSuccessDesc": "Esportazione riuscita di {{count}} livelli nel file PSD",
"problemExportingPSD": "Problema durante l'esportazione PSD",
"noValidLayerAdapters": "Nessun adattatore di livello valido trovato",
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext non supporta la generazione di immagini posizionate sulla tela. Riprova utilizzando la sezione Immagine di riferimento e disattiva tutti i livelli raster.",
"canvasManagerNotAvailable": "Gestione tela non disponibile",
"promptExpansionFailed": "Abbiamo riscontrato un problema. Riprova a eseguire l'espansione del prompt.",
"uploadAndPromptGenerationFailed": "Impossibile caricare l'immagine e generare il prompt",
"promptGenerationStarted": "Generazione del prompt avviata",
"invalidBboxDesc": "Il riquadro di delimitazione non ha dimensioni valide",
"invalidBbox": "Riquadro di delimitazione non valido",
"noInpaintMaskSelectedDesc": "Seleziona una maschera di inpaint da invertire",
"noInpaintMaskSelected": "Nessuna maschera di inpaint selezionata",
"noVisibleMasksDesc": "Crea o abilita almeno una maschera inpaint da invertire",
"noVisibleMasks": "Nessuna maschera visibile",
"maskInvertFailed": "Impossibile invertire la maschera",
"maskInverted": "Maschera invertita"
},
"accessibility": {
"invokeProgressBar": "Barra di avanzamento generazione",
@@ -1079,7 +1164,22 @@
"missingField_withName": "Campo \"{{name}}\" mancante",
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante"
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante",
"layout": {
"alignmentDR": "In basso a destra",
"autoLayout": "Schema automatico",
"nodeSpacing": "Spaziatura nodi",
"layerSpacing": "Spaziatura livelli",
"layeringStrategy": "Strategia livelli",
"longestPath": "Percorso più lungo",
"layoutDirection": "Direzione schema",
"layoutDirectionRight": "A destra",
"layoutDirectionDown": "In basso",
"alignment": "Allineamento nodi",
"alignmentUL": "In alto a sinistra",
"alignmentDL": "In basso a sinistra",
"alignmentUR": "In alto a destra"
}
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
@@ -1156,7 +1256,7 @@
"batchQueuedDesc_other": "Aggiunte {{count}} sessioni a {{direction}} della coda",
"graphQueued": "Grafico in coda",
"batch": "Lotto",
"clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati.",
"clearQueueAlertDialog": "La cancellazione della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati e l'area di lavoro della Tela verrà reimpostata.",
"pending": "In attesa",
"completedIn": "Completato in",
"resumeFailed": "Problema nel riavvio dell'elaborazione",
@@ -1212,7 +1312,8 @@
"retrySucceeded": "Elemento rieseguito",
"retryItem": "Riesegui elemento",
"retryFailed": "Problema riesecuzione elemento",
"credits": "Crediti"
"credits": "Crediti",
"cancelAllExceptCurrent": "Annulla tutto tranne quello corrente"
},
"models": {
"noMatchingModels": "Nessun modello corrispondente",
@@ -1225,7 +1326,8 @@
"addLora": "Aggiungi LoRA",
"defaultVAE": "VAE predefinito",
"concepts": "Concetti",
"lora": "LoRA"
"lora": "LoRA",
"noCompatibleLoRAs": "Nessun LoRA compatibile"
},
"invocationCache": {
"disable": "Disabilita",
@@ -1626,7 +1728,7 @@
"structure": {
"heading": "Struttura",
"paragraphs": [
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Una struttura bassa permette cambiamenti significativi, mentre una struttura alta conserva la composizione e il layout originali."
"La struttura determina quanto l'immagine finale rispecchierà lo schema dell'originale. Un valore struttura basso permette cambiamenti significativi, mentre un valore struttura alto conserva la composizione e lo schema originali."
]
},
"fluxDevLicense": {
@@ -1683,6 +1785,20 @@
"paragraphs": [
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
]
},
"tileSize": {
"heading": "Dimensione riquadro",
"paragraphs": [
"Controlla la dimensione dei riquadri utilizzati durante il processo di ampliamento. Riquadri più grandi consumano più memoria, ma possono produrre risultati migliori.",
"I modelli SD1.5 hanno un valore predefinito di 768, mentre i modelli SDXL hanno un valore predefinito di 1024. Ridurre le dimensioni dei riquadri in caso di problemi di memoria."
]
},
"tileOverlap": {
"heading": "Sovrapposizione riquadri",
"paragraphs": [
"Controlla la sovrapposizione tra riquadri adiacenti durante l'ampliamento. Valori di sovrapposizione più elevati aiutano a ridurre le giunzioni visibili tra i riquadri, ma consuma più memoria.",
"Il valore predefinito di 128 è adatto alla maggior parte dei casi, ma è possibile modificarlo in base alle proprie esigenze specifiche e ai limiti di memoria."
]
}
},
"sdxl": {
@@ -1730,7 +1846,7 @@
"parameterSet": "Parametro {{parameter}} impostato",
"parsingFailed": "Analisi non riuscita",
"recallParameter": "Richiama {{label}}",
"canvasV2Metadata": "Tela",
"canvasV2Metadata": "Livelli Tela",
"guidance": "Guida",
"seamlessXAxis": "Asse X senza giunte",
"seamlessYAxis": "Asse Y senza giunte",
@@ -1778,7 +1894,7 @@
"opened": "Aperto",
"convertGraph": "Converti grafico",
"loadWorkflow": "$t(common.load) Flusso di lavoro",
"autoLayout": "Disposizione automatica",
"autoLayout": "Schema automatico",
"loadFromGraph": "Carica il flusso di lavoro dal grafico",
"userWorkflows": "Flussi di lavoro utente",
"projectWorkflows": "Flussi di lavoro del progetto",
@@ -1901,7 +2017,16 @@
"prompt": {
"compatibleEmbeddings": "Incorporamenti compatibili",
"addPromptTrigger": "Aggiungi Trigger nel prompt",
"noMatchingTriggers": "Nessun Trigger corrispondente"
"noMatchingTriggers": "Nessun Trigger corrispondente",
"discard": "Scarta",
"insert": "Inserisci",
"replace": "Sostituisci",
"resultSubtitle": "Scegli come gestire il prompt espanso:",
"resultTitle": "Espansione del prompt completata",
"expandingPrompt": "Espansione del prompt...",
"uploadImageForPromptGeneration": "Carica l'immagine per la generazione del prompt",
"expandCurrentPrompt": "Espandi il prompt corrente",
"generateFromImage": "Genera prompt dall'immagine"
},
"controlLayers": {
"addLayer": "Aggiungi Livello",
@@ -2212,7 +2337,11 @@
"label": "Preserva la regione mascherata"
},
"isolatedLayerPreview": "Anteprima livello isolato",
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione."
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione.",
"saveAllImagesToGallery": {
"alert": "Invia le nuove generazioni alla Galleria, bypassando la Tela",
"label": "Invia le nuove generazioni alla Galleria"
}
},
"transform": {
"reset": "Reimposta",
@@ -2262,7 +2391,8 @@
"newRegionalGuidance": "Nuova Guida Regionale",
"copyToClipboard": "Copia negli appunti",
"copyCanvasToClipboard": "Copia la tela negli appunti",
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti",
"newResizedControlLayer": "Nuovo livello di controllo ridimensionato"
},
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
@@ -2299,10 +2429,10 @@
"replaceCurrent": "Sostituisci corrente",
"mergeDown": "Unire in basso",
"mergingLayers": "Unione dei livelli",
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
"useImage": "Usa immagine",
"resetGenerationSettings": "Ripristina impostazioni di generazione",
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla galleria su questa Immagine di riferimento.",
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
"asControlLayer": "Come $t(controlLayers.controlLayer)",
@@ -2352,11 +2482,25 @@
"denoiseLimit": "Limite di riduzione del rumore",
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
"imageNoise": "Rumore dell'immagine"
"imageNoise": "Rumore dell'immagine",
"exportCanvasToPSD": "Esporta la tela in PSD",
"ruleOfThirds": "Mostra la regola dei terzi",
"showNonRasterLayers": "Mostra livelli non raster (Shift+H)",
"hideNonRasterLayers": "Nascondi livelli non raster (Shift+H)",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questa immagine di riferimento o <PullBboxButton>trascina il riquadro di delimitazione in questa immagine di riferimento</PullBboxButton> per iniziare.",
"uploadOrDragAnImage": "Trascina un'immagine dalla galleria o <UploadButton>carica un'immagine</UploadButton>.",
"autoSwitch": {
"switchOnStart": "All'inizio",
"switchOnFinish": "Alla fine",
"off": "Spento"
},
"invertMask": "Inverti maschera",
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere",
"maxRefImages": "Max Immagini di rif.to",
"useAsReferenceImage": "Usa come immagine di riferimento"
},
"ui": {
"tabs": {
"generation": "Generazione",
"canvas": "Tela",
"workflows": "Flussi di lavoro",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
@@ -2365,7 +2509,92 @@
"queue": "Coda",
"upscaling": "Amplia",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
"gallery": "Galleria"
"gallery": "Galleria",
"generate": "Genera"
},
"launchpad": {
"workflowsTitle": "Approfondisci i flussi di lavoro.",
"upscalingTitle": "Amplia e aggiungi dettagli.",
"canvasTitle": "Modifica e perfeziona sulla tela.",
"generateTitle": "Genera immagini da prompt testuali.",
"modelGuideText": "Vuoi scoprire quali prompt funzionano meglio per ciascun modello?",
"modelGuideLink": "Consulta la nostra guida ai modelli.",
"workflows": {
"description": "I flussi di lavoro sono modelli riutilizzabili che automatizzano le attività di generazione delle immagini, consentendo di eseguire rapidamente operazioni complesse e di ottenere risultati coerenti.",
"learnMoreLink": "Scopri di più sulla creazione di flussi di lavoro",
"browseTemplates": {
"title": "Sfoglia i modelli di flusso di lavoro",
"description": "Scegli tra flussi di lavoro predefiniti per le attività comuni"
},
"createNew": {
"title": "Crea un nuovo flusso di lavoro",
"description": "Avvia un nuovo flusso di lavoro da zero"
},
"loadFromFile": {
"title": "Carica flusso di lavoro da file",
"description": "Carica un flusso di lavoro per iniziare con una configurazione esistente"
}
},
"upscaling": {
"uploadImage": {
"title": "Carica l'immagine da ampliare",
"description": "Fai clic o trascina un'immagine per ingrandirla (JPG, PNG, WebP fino a 100 MB)"
},
"replaceImage": {
"title": "Sostituisci l'immagine corrente",
"description": "Fai clic o trascina una nuova immagine per sostituire quella corrente"
},
"imageReady": {
"title": "Immagine pronta",
"description": "Premere Invoke per iniziare l'ampliamento"
},
"readyToUpscale": {
"title": "Pronto per ampliare!",
"description": "Configura le impostazioni qui sotto, quindi fai clic sul pulsante Invoke per iniziare ad ampliare l'immagine."
},
"upscaleModel": "Modello per l'ampliamento",
"model": "Modello",
"scale": "Scala",
"helpText": {
"promptAdvice": "Durante l'ampliamento, utilizza un prompt che descriva il mezzo e lo stile. Evita di descrivere dettagli specifici del contenuto dell'immagine.",
"styleAdvice": "L'ampliamento funziona meglio con lo stile generale dell'immagine."
},
"creativityAndStructure": {
"title": "Creatività e struttura predefinite",
"conservative": "Conservativo",
"balanced": "Bilanciato",
"creative": "Creativo",
"artistic": "Artistico"
}
},
"createNewWorkflowFromScratch": "Crea un nuovo flusso di lavoro da zero",
"browseAndLoadWorkflows": "Sfoglia e carica i flussi di lavoro esistenti",
"addStyleRef": {
"title": "Aggiungi un riferimento di stile",
"description": "Aggiungi un'immagine per trasferirne l'aspetto."
},
"editImage": {
"title": "Modifica immagine",
"description": "Aggiungi un'immagine da perfezionare."
},
"generateFromText": {
"title": "Genera da testo",
"description": "Inserisci un prompt e genera."
},
"useALayoutImage": {
"description": "Aggiungi un'immagine per controllare la composizione.",
"title": "Usa una immagine guida"
},
"generate": {
"canvasCalloutTitle": "Vuoi avere più controllo, modificare e affinare le tue immagini?",
"canvasCalloutLink": "Per ulteriori funzionalità, vai su Tela."
}
},
"panels": {
"launchpad": "Rampa di lancio",
"workflowEditor": "Editor del flusso di lavoro",
"imageViewer": "Visualizzatore immagini",
"canvas": "Tela"
}
},
"upscaling": {
@@ -2386,7 +2615,10 @@
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
"upscale": "Amplia",
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento.",
"tileControl": "Controllo del riquadro",
"tileSize": "Dimensione del riquadro",
"tileOverlap": "Sovrapposizione riquadro"
},
"upsell": {
"inviteTeammates": "Invita collaboratori",
@@ -2436,7 +2668,8 @@
"positivePromptColumn": "'prompt' o 'positive_prompt'",
"noTemplates": "Nessun modello",
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
"promptTemplateCleared": "Modello di prompt cancellato"
"promptTemplateCleared": "Modello di prompt cancellato",
"togglePromptPreviews": "Attiva/disattiva le anteprime dei prompt"
},
"newUserExperience": {
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
@@ -2452,8 +2685,8 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
"Lo stato dello studio viene salvato sul server, consentendoti di continuare a lavorare su qualsiasi dispositivo.",
"Supporto per più immagini di riferimento per FLUX Kontext (solo modello locale)."
]
},
"system": {
@@ -2485,64 +2718,18 @@
"supportVideos": {
"gettingStarted": "Iniziare",
"supportVideos": "Video di supporto",
"videos": {
"usingControlLayersAndReferenceGuides": {
"title": "Utilizzo di livelli di controllo e guide di riferimento",
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
},
"creatingYourFirstImage": {
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
"title": "Creazione della tua prima immagine"
},
"understandingImageToImageAndDenoising": {
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
},
"howDoIDoImageToImageTransformation": {
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
},
"howDoIUseInpaintMasks": {
"title": "Come si usano le maschere Inpaint?",
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
},
"howDoIOutpaint": {
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
"title": "Come posso eseguire l'outpainting?"
},
"exploringAIModelsAndConceptAdapters": {
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
},
"upscaling": {
"title": "Ampliamento",
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
},
"creatingAndComposingOnInvokesControlCanvas": {
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
"title": "Creare e comporre sulla tela di controllo di Invoke"
},
"howDoIGenerateAndSaveToTheGallery": {
"description": "Passaggi per generare e salvare le immagini nella galleria.",
"title": "Come posso generare e salvare nella Galleria?"
},
"howDoIEditOnTheCanvas": {
"title": "Come posso apportare modifiche sulla tela?",
"description": "Guida alla modifica delle immagini direttamente sulla tela."
},
"howDoIUseControlNetsAndControlLayers": {
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
}
},
"controlCanvas": "Tela di Controllo",
"watch": "Guarda",
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
"studioSessionsDesc": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e porre domande. Le sessioni vengono caricate nella playlist la settimana successiva.",
"videos": {
"gettingStarted": {
"title": "Introduzione a Invoke",
"description": "Serie video completa che copre tutto ciò che devi sapere per iniziare a usare Invoke, dalla creazione della tua prima immagine alle tecniche avanzate."
},
"studioSessions": {
"title": "Sessioni in studio",
"description": "Sessioni approfondite che esplorano le funzionalità avanzate di Invoke, i flussi di lavoro creativi e le discussioni della community."
}
}
},
"modelCache": {
"clear": "Cancella la cache del modello",

View File

@@ -141,7 +141,7 @@
"loading": "ロード中",
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
"drop": "ドロップ",
"dropOrUpload": "$t(gallery.drop) またはアップロード",
"dropOrUpload": "ドロップまたはアップロード",
"deleteImage_other": "画像 {{count}} 枚を削除",
"deleteImagePermanent": "削除された画像は復元できません。",
"download": "ダウンロード",
@@ -193,7 +193,8 @@
"images": "画像",
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
"imagesTab": "Invoke内で作成および保存された画像。",
"assets": "アセット"
"assets": "アセット",
"useForPromptGeneration": "プロンプト生成に使用する"
},
"hotkeys": {
"searchHotkeys": "ホットキーを検索",
@@ -363,6 +364,16 @@
"selectRectTool": {
"title": "矩形ツール",
"desc": "矩形ツールを選択します。"
},
"settings": {
"behavior": "行動",
"display": "ディスプレイ",
"grid": "グリッド",
"debug": "デバッグ"
},
"toggleNonRasterLayers": {
"title": "非ラスターレイヤーの切り替え",
"desc": "ラスター以外のレイヤー カテゴリ (コントロール レイヤー、インペイント マスク、地域ガイダンス) を表示または非表示にします。"
}
},
"workflows": {
@@ -630,7 +641,7 @@
"restoreDefaultSettings": "クリックするとモデルのデフォルト設定が使用されます.",
"hfTokenSaved": "ハギングフェイストークンを保存しました",
"imageEncoderModelId": "画像エンコーダーモデルID",
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます",
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます",
"learnMoreAboutSupportedModels": "私たちのサポートしているモデルについて更に学ぶ",
"modelImageUpdateFailed": "モデル画像アップデート失敗",
"scanFolder": "スキャンフォルダ",
@@ -654,7 +665,30 @@
"manageModels": "モデル管理",
"hfTokenReset": "ハギングフェイストークンリセット",
"relatedModels": "関連のあるモデル",
"showOnlyRelatedModels": "関連している"
"showOnlyRelatedModels": "関連している",
"installedModelsCount": "{{total}} モデルのうち {{installed}} 個がインストールされています。",
"allNModelsInstalled": "{{count}} 個のモデルがすべてインストールされています",
"nToInstall": "{{count}}個をインストールする",
"nAlreadyInstalled": "{{count}} 個すでにインストールされています",
"bundleAlreadyInstalled": "バンドルがすでにインストールされています",
"bundleAlreadyInstalledDesc": "{{bundleName}} バンドル内のすべてのモデルはすでにインストールされています。",
"launchpadTab": "ランチパッド",
"launchpad": {
"welcome": "モデルマネジメントへようこそ",
"description": "Invoke プラットフォームのほとんどの機能を利用するには、モデルのインストールが必要です。手動インストールオプションから選択するか、厳選されたスターターモデルをご覧ください。",
"manualInstall": "マニュアルインストール",
"urlDescription": "URLまたはローカルファイルパスからモデルをインストールします。特定のモデルを追加したい場合に最適です。",
"huggingFaceDescription": "HuggingFace リポジトリからモデルを直接参照してインストールします。",
"scanFolderDescription": "ローカルフォルダをスキャンしてモデルを自動的に検出し、インストールします。",
"recommendedModels": "推奨モデル",
"exploreStarter": "または、利用可能なすべてのスターターモデルを参照してください",
"quickStart": "クイックスタートバンドル",
"bundleDescription": "各バンドルには各モデルファミリーの必須モデルと、開始するための厳選されたベースモデルが含まれています。",
"browseAll": "または、利用可能なすべてのモデルを参照してください。",
"stableDiffusion15": "Stable Diffusion1.5",
"sdxl": "SDXL",
"fluxDev": "FLUX.1 dev"
}
},
"parameters": {
"images": "画像",
@@ -720,7 +754,10 @@
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。"
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
"promptExpansionPending": "プロンプト拡張が進行中",
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
},
"aspect": "縦横比",
"lockAspectRatio": "縦横比を固定",
@@ -875,7 +912,26 @@
"imageNotLoadedDesc": "画像を見つけられません",
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください.",
"noRasterLayers": "ラスターレイヤーが見つかりません",
"noRasterLayersDesc": "PSDにエクスポートするには、少なくとも1つのラスターレイヤーを作成します",
"noActiveRasterLayers": "アクティブなラスターレイヤーがありません",
"noActiveRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
"noVisibleRasterLayers": "表示されるラスター レイヤーがありません",
"noVisibleRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
"invalidCanvasDimensions": "キャンバスのサイズが無効です",
"canvasTooLarge": "キャンバスが大きすぎます",
"canvasTooLargeDesc": "キャンバスのサイズがPSDエクスポートの最大許容サイズを超えています。キャンバス全体の幅と高さを小さくしてから、もう一度お試しください。",
"failedToProcessLayers": "レイヤーの処理に失敗しました",
"psdExportSuccess": "PSDエクスポート完了",
"psdExportSuccessDesc": "{{count}} 個のレイヤーを PSD ファイルに正常にエクスポートしました",
"problemExportingPSD": "PSD のエクスポート中に問題が発生しました",
"canvasManagerNotAvailable": "キャンバスマネージャーは利用できません",
"noValidLayerAdapters": "有効なレイヤーアダプタが見つかりません",
"fluxKontextIncompatibleGenerationMode": "Flux Kontext はテキストから画像への変換のみをサポートしています。画像から画像への変換、インペインティング、アウトペインティングのタスクには他のモデルを使用してください。",
"promptGenerationStarted": "プロンプト生成が開始されました",
"uploadAndPromptGenerationFailed": "画像のアップロードとプロンプトの生成に失敗しました",
"promptExpansionFailed": "プロンプト拡張に失敗しました"
},
"accessibility": {
"invokeProgressBar": "進捗バー",
@@ -1014,7 +1070,8 @@
"lora": "LoRA",
"defaultVAE": "デフォルトVAE",
"noLoRAsInstalled": "インストールされているLoRAはありません",
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません"
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません",
"noCompatibleLoRAs": "互換性のあるLoRAはありません"
},
"nodes": {
"addNode": "ノードを追加",
@@ -1708,7 +1765,16 @@
"prompt": {
"addPromptTrigger": "プロンプトトリガーを追加",
"compatibleEmbeddings": "互換性のある埋め込み",
"noMatchingTriggers": "一致するトリガーがありません"
"noMatchingTriggers": "一致するトリガーがありません",
"generateFromImage": "画像からプロンプトを生成する",
"expandCurrentPrompt": "現在のプロンプトを展開",
"uploadImageForPromptGeneration": "プロンプト生成用の画像をアップロードする",
"expandingPrompt": "プロンプトを展開しています...",
"resultTitle": "プロンプト拡張完了",
"resultSubtitle": "拡張プロンプトの処理方法を選択します:",
"replace": "交換する",
"insert": "挿入する",
"discard": "破棄する"
},
"ui": {
"tabs": {
@@ -1716,7 +1782,60 @@
"canvas": "キャンバス",
"workflows": "ワークフロー",
"models": "モデル",
"gallery": "ギャラリー"
"gallery": "ギャラリー",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
"upscaling": "アップスケーリング",
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
},
"launchpad": {
"upscaling": {
"model": "モデル",
"scale": "スケール",
"helpText": {
"promptAdvice": "アップスケールする際は、媒体とスタイルを説明するプロンプトを使用してください。画像内の具体的なコンテンツの詳細を説明することは避けてください。",
"styleAdvice": "アップスケーリングは、画像の全体的なスタイルに最適です。"
},
"uploadImage": {
"title": "アップスケール用の画像をアップロードする",
"description": "アップスケールするには、画像をクリックまたはドラッグしますJPG、PNG、WebP、最大100MB"
},
"replaceImage": {
"title": "現在の画像を置き換える",
"description": "新しい画像をクリックまたはドラッグして、現在の画像を置き換えます"
},
"imageReady": {
"title": "画像準備完了",
"description": "アップスケールを開始するにはInvokeを押してください"
},
"readyToUpscale": {
"title": "アップスケールの準備ができました!",
"description": "以下の設定を構成し、「Invoke」ボタンをクリックして画像のアップスケールを開始します。"
},
"upscaleModel": "アップスケールモデル"
},
"workflowsTitle": "ワークフローを詳しく見てみましょう。",
"upscalingTitle": "アップスケールして詳細を追加します。",
"canvasTitle": "キャンバス上で編集および調整します。",
"generateTitle": "テキストプロンプトから画像を生成します。",
"modelGuideText": "各モデルに最適なプロンプトを知りたいですか?",
"modelGuideLink": "モデルガイドをご覧ください。",
"workflows": {
"description": "ワークフローは、画像生成タスクを自動化する再利用可能なテンプレートであり、複雑な操作を迅速に実行して一貫した結果を得ることができます。",
"learnMoreLink": "ワークフローの作成について詳しく見る",
"browseTemplates": {
"title": "ワークフローテンプレートを参照する",
"description": "一般的なタスク用にあらかじめ構築されたワークフローから選択する"
},
"createNew": {
"title": "新規ワークフローを作成する",
"description": "新しいワークフローをゼロから始める"
},
"loadFromFile": {
"title": "ファイルからワークフローを読み込む",
"description": "既存の設定から開始するためのワークフローをアップロードする"
}
}
}
},
"controlLayers": {
@@ -1732,7 +1851,16 @@
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
"newGlobalReferenceImage": "新規全域参照画像",
"newRegionalReferenceImage": "新規領域参照画像",
"canvasGroup": "キャンバス"
"canvasGroup": "キャンバス",
"saveToGalleryGroup": "ギャラリーに保存",
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
"saveBboxToGallery": "Bボックスをギャラリーに保存",
"newControlLayer": "新規コントロールレイヤー",
"newRasterLayer": "新規ラスターレイヤー",
"newInpaintMask": "新規インペイントマスク",
"copyToClipboard": "クリップボードにコピー",
"copyCanvasToClipboard": "キャンバスをクリップボードにコピー",
"copyBboxToClipboard": "Bボックスをクリップボードにコピー"
},
"regionalGuidance": "領域ガイダンス",
"globalReferenceImage": "全域参照画像",
@@ -1743,7 +1871,11 @@
"transform": "変形",
"apply": "適用",
"cancel": "キャンセル",
"reset": "リセット"
"reset": "リセット",
"fitMode": "フィットモード",
"fitModeContain": "含む",
"fitModeCover": "カバー",
"fitModeFill": "満たす"
},
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
@@ -1754,7 +1886,8 @@
"rectangle": "矩形",
"move": "移動",
"eraser": "消しゴム",
"bbox": "Bbox"
"bbox": "Bbox",
"view": "ビュー"
},
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
@@ -1774,25 +1907,386 @@
"removeBookmark": "ブックマークを外す",
"savedToGalleryOk": "ギャラリーに保存しました",
"controlMode": {
"prompt": "プロンプト"
"prompt": "プロンプト",
"controlMode": "コントロールモード",
"balanced": "バランス(推奨)",
"control": "コントロール",
"megaControl": "メガコントロール"
},
"prompt": "プロンプト",
"settings": {
"snapToGrid": {
"off": "オフ",
"on": "オン"
}
"on": "オン",
"label": "グリッドにスナップ"
},
"preserveMask": {
"label": "マスクされた領域を保持",
"alert": "マスクされた領域の保存"
},
"isolatedStagingPreview": "分離されたステージングプレビュー",
"isolatedPreview": "分離されたプレビュー",
"isolatedLayerPreview": "分離されたレイヤーのプレビュー",
"isolatedLayerPreviewDesc": "フィルタリングや変換などの操作を実行するときに、このレイヤーのみを表示するかどうか。",
"invertBrushSizeScrollDirection": "ブラシサイズのスクロール反転",
"pressureSensitivity": "圧力感度"
},
"filter": {
"filter": "フィルター",
"spandrel_filter": {
"model": "モデル"
"model": "モデル",
"label": "img2imgモデル",
"description": "選択したレイヤーでimg2imgモデルを実行します。",
"autoScale": "オートスケール",
"autoScaleDesc": "選択したモデルは、目標スケールに達するまで実行されます。",
"scale": "ターゲットスケール"
},
"apply": "適用",
"reset": "リセット",
"cancel": "キャンセル"
"cancel": "キャンセル",
"filters": "フィルター",
"filterType": "フィルタータイプ",
"autoProcess": "オートプロセス",
"process": "プロセス",
"advanced": "アドバンスド",
"processingLayerWith": "{{type}} フィルターを使用した処理レイヤー。",
"forMoreControl": "さらに細かく制御するには、以下の「詳細設定」をクリックしてください。",
"canny_edge_detection": {
"label": "キャニーエッジ検出",
"description": "Canny エッジ検出アルゴリズムを使用して、選択したレイヤーからエッジ マップを生成します。",
"low_threshold": "低閾値",
"high_threshold": "高閾値"
},
"color_map": {
"label": "カラーマップ",
"description": "選択したレイヤーからカラーマップを作成します。",
"tile_size": "タイルサイズ"
},
"content_shuffle": {
"label": "コンテンツシャッフル",
"description": "選択したレイヤーのコンテンツを、「液化」効果と同様にシャッフルします。",
"scale_factor": "スケール係数"
},
"depth_anything_depth_estimation": {
"label": "デプスエニシング",
"description": "デプスエニシングモデルを使用して、選択したレイヤーから深度マップを生成します。",
"model_size": "モデルサイズ",
"model_size_small": "スモール",
"model_size_small_v2": "スモールv2",
"model_size_base": "ベース",
"model_size_large": "ラージ"
},
"dw_openpose_detection": {
"label": "DW オープンポーズ検出",
"description": "DW Openpose モデルを使用して、選択したレイヤー内の人間のポーズを検出します。",
"draw_hands": "手を描く",
"draw_face": "顔を描く",
"draw_body": "体を描く"
},
"hed_edge_detection": {
"label": "HEDエッジ検出",
"description": "HED エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
"scribble": "落書き"
},
"lineart_anime_edge_detection": {
"label": "線画アニメのエッジ検出",
"description": "線画アニメエッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。"
},
"lineart_edge_detection": {
"label": "線画エッジ検出",
"description": "線画エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
"coarse": "粗い"
},
"mediapipe_face_detection": {
"label": "メディアパイプ顔検出",
"description": "メディアパイプ顔検出モデルを使用して、選択したレイヤー内の顔を検出します。",
"max_faces": "マックスフェイス",
"min_confidence": "最小信頼度"
},
"mlsd_detection": {
"label": "線分検出",
"description": "MLSD 線分検出モデルを使用して、選択したレイヤーから線分マップを生成します。",
"score_threshold": "スコア閾値",
"distance_threshold": "距離閾値"
},
"normal_map": {
"label": "ノーマルマップ",
"description": "選択したレイヤーからノーマルマップを生成します。"
},
"pidi_edge_detection": {
"label": "PiDiNetエッジ検出",
"description": "PiDiNet エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
"scribble": "落書き",
"quantize_edges": "エッジを量子化する"
},
"img_blur": {
"label": "画像をぼかす",
"description": "選択したレイヤーをぼかします。",
"blur_type": "ぼかしの種類",
"blur_radius": "半径",
"gaussian_type": "ガウス分布",
"box_type": "ボックス"
},
"img_noise": {
"label": "ノイズ画像",
"description": "選択したレイヤーにノイズを追加します。",
"noise_type": "ノイズの種類",
"noise_amount": "総計",
"gaussian_type": "ガウス分布",
"salt_and_pepper_type": "塩コショウ",
"noise_color": "カラーノイズ",
"size": "ノイズサイズ"
},
"adjust_image": {
"label": "画像を調整する",
"description": "画像の選択したチャンネルを調整します。",
"channel": "チャンネル",
"value_setting": "バリュー",
"scale_values": "スケールバリュー",
"red": "赤RGBA",
"green": "緑RGBA",
"blue": "青RGBA",
"alpha": "アルファRGBA",
"cyan": "シアンCMYK",
"magenta": "マゼンタCMYK",
"yellow": "黄色CMYK",
"black": "黒CMYK",
"hue": "色相HSV",
"saturation": "彩度HSV",
"value": "値HSV",
"luminosity": "明度LAB",
"a": "Aラボ",
"b": "Bラボ",
"y": "YYCbCr",
"cb": "CbYCbCr",
"cr": "CrYCbCr"
}
},
"weight": "重み"
"weight": "重み",
"bookmark": "クイックスイッチのブックマーク",
"exportCanvasToPSD": "キャンバスをPSDにエクスポート",
"savedToGalleryError": "ギャラリーへの保存中にエラーが発生しました",
"regionCopiedToClipboard": "{{region}} をクリップボードにコピーしました",
"copyRegionError": "{{region}} のコピー中にエラーが発生しました",
"newGlobalReferenceImageOk": "作成されたグローバル参照画像",
"newGlobalReferenceImageError": "グローバル参照イメージの作成中に問題が発生しました",
"newRegionalReferenceImageOk": "地域参照画像の作成",
"newRegionalReferenceImageError": "地域参照画像の作成中に問題が発生しました",
"newControlLayerOk": "制御レイヤーの作成",
"newControlLayerError": "制御層の作成中に問題が発生しました",
"newRasterLayerOk": "ラスターレイヤーを作成しました",
"newRasterLayerError": "ラスターレイヤーの作成中に問題が発生しました",
"pullBboxIntoLayerOk": "Bbox をレイヤーにプル",
"pullBboxIntoLayerError": "BBox をレイヤーにプルする際に問題が発生しました",
"pullBboxIntoReferenceImageOk": "Bbox が ReferenceImage にプルされました",
"pullBboxIntoReferenceImageError": "BBox を ReferenceImage にプルする際に問題が発生しました",
"regionIsEmpty": "選択した領域は空です",
"mergeVisible": "マージを可視化",
"mergeVisibleOk": "マージされたレイヤー",
"mergeVisibleError": "レイヤーの結合エラー",
"mergingLayers": "レイヤーのマージ",
"clearHistory": "履歴をクリア",
"bboxOverlay": "Bboxオーバーレイを表示",
"ruleOfThirds": "三分割法を表示",
"newSession": "新しいセッション",
"clearCaches": "キャッシュをクリア",
"recalculateRects": "長方形を再計算する",
"clipToBbox": "ストロークをBboxにクリップ",
"outputOnlyMaskedRegions": "生成された領域のみを出力する",
"width": "幅",
"autoNegative": "オートネガティブ",
"enableAutoNegative": "オートネガティブを有効にする",
"disableAutoNegative": "オートネガティブを無効にする",
"deletePrompt": "プロンプトを削除",
"deleteReferenceImage": "参照画像を削除",
"showHUD": "HUDを表示",
"maskFill": "マスク塗りつぶし",
"addPositivePrompt": "$t(controlLayers.prompt) を追加します",
"addNegativePrompt": "$t(controlLayers.negativePrompt)を追加します",
"addReferenceImage": "$t(controlLayers.referenceImage)を追加します",
"addImageNoise": "$t(controlLayers.imageNoise)を追加します",
"addRasterLayer": "$t(controlLayers.rasterLayer)を追加します",
"addControlLayer": "$t(controlLayers.controlLayer)を追加します",
"addInpaintMask": "$t(controlLayers.inpaintMask)を追加します",
"addRegionalGuidance": "$t(controlLayers.regionalGuidance)を追加します",
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage)を追加します",
"addDenoiseLimit": "$t(controlLayers.denoiseLimit)を追加します",
"controlLayer": "コントロールレイヤー",
"inpaintMask": "インペイントマスク",
"referenceImageRegional": "参考画像(地域別)",
"referenceImageGlobal": "参考画像(グローバル)",
"asRasterLayer": "$t(controlLayers.rasterLayer) として",
"asRasterLayerResize": "$t(controlLayers.rasterLayer) として (リサイズ)",
"asControlLayer": "$t(controlLayers.controlLayer) として",
"asControlLayerResize": "$t(controlLayers.controlLayer) として (リサイズ)",
"referenceImage": "参照画像",
"sendingToCanvas": "キャンバスに生成をのせる",
"sendingToGallery": "生成をギャラリーに送る",
"sendToGallery": "ギャラリーに送る",
"sendToGalleryDesc": "Invokeを押すとユニークな画像が生成され、ギャラリーに保存されます。",
"sendToCanvas": "キャンバスに送る",
"newLayerFromImage": "画像から新規レイヤー",
"newCanvasFromImage": "画像から新規キャンバス",
"newImg2ImgCanvasFromImage": "画像からの新規 Img2Img",
"copyToClipboard": "クリップボードにコピー",
"sendToCanvasDesc": "Invokeを押すと、進行中の作品がキャンバス上にステージされます。",
"viewProgressInViewer": "<Btn>画像ビューア</Btn>で進行状況と出力を表示します。",
"viewProgressOnCanvas": "<Btn>キャンバス</Btn> で進行状況とステージ出力を表示します。",
"rasterLayer_withCount_other": "ラスターレイヤー",
"controlLayer_withCount_other": "コントロールレイヤー",
"regionalGuidance_withCount_hidden": "地域ガイダンス({{count}} 件非表示)",
"controlLayers_withCount_hidden": "コントロールレイヤー({{count}} 個非表示)",
"rasterLayers_withCount_hidden": "ラスター レイヤー ({{count}} 個非表示)",
"globalReferenceImages_withCount_hidden": "グローバル参照画像({{count}} 枚非表示)",
"regionalGuidance_withCount_visible": "地域ガイダンス ({{count}})",
"controlLayers_withCount_visible": "コントロールレイヤー ({{count}})",
"rasterLayers_withCount_visible": "ラスターレイヤー({{count}}",
"globalReferenceImages_withCount_visible": "グローバル参照画像 ({{count}})",
"layer_other": "レイヤー",
"layer_withCount_other": "レイヤー ({{count}})",
"convertRasterLayerTo": "$t(controlLayers.rasterLayer) を変換する",
"convertControlLayerTo": "$t(controlLayers.controlLayer) を変換する",
"convertRegionalGuidanceTo": "$t(controlLayers.regionalGuidance) を変換する",
"copyRasterLayerTo": "$t(controlLayers.rasterLayer)をコピーする",
"copyControlLayerTo": "$t(controlLayers.controlLayer) をコピーする",
"copyRegionalGuidanceTo": "$t(controlLayers.regionalGuidance)をコピーする",
"newRasterLayer": "新しい $t(controlLayers.rasterLayer)",
"newControlLayer": "新しい $t(controlLayers.controlLayer)",
"newInpaintMask": "新しい $t(controlLayers.inpaintMask)",
"newRegionalGuidance": "新しい $t(controlLayers.regionalGuidance)",
"pasteTo": "貼り付け先",
"pasteToAssets": "アセット",
"pasteToAssetsDesc": "アセットに貼り付け",
"pasteToBbox": "Bボックス",
"pasteToBboxDesc": "新しいレイヤーBbox内",
"pasteToCanvas": "キャンバス",
"pasteToCanvasDesc": "新しいレイヤー(キャンバス内)",
"pastedTo": "{{destination}} に貼り付けました",
"transparency": "透明性",
"enableTransparencyEffect": "透明効果を有効にする",
"disableTransparencyEffect": "透明効果を無効にする",
"hidingType": "{{type}} を非表示",
"showingType": "{{type}}を表示",
"showNonRasterLayers": "非ラスターレイヤーを表示 (Shift+H)",
"hideNonRasterLayers": "非ラスターレイヤーを非表示にする (Shift+H)",
"dynamicGrid": "ダイナミックグリッド",
"logDebugInfo": "デバッグ情報をログに記録する",
"locked": "ロックされています",
"unlocked": "ロック解除",
"deleteSelected": "選択項目を削除",
"stagingOnCanvas": "ステージング画像",
"replaceLayer": "レイヤーの置き換え",
"pullBboxIntoLayer": "Bboxをレイヤーに引き込む",
"pullBboxIntoReferenceImage": "Bboxを参照画像に取り込む",
"showProgressOnCanvas": "キャンバスに進捗状況を表示",
"useImage": "画像を使う",
"negativePrompt": "ネガティブプロンプト",
"beginEndStepPercentShort": "開始/終了 %",
"newGallerySession": "新しいギャラリーセッション",
"newGallerySessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成した画像はギャラリーに送信されます。",
"newCanvasSession": "新規キャンバスセッション",
"newCanvasSessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成はキャンバス上でステージングされます。",
"resetCanvasLayers": "キャンバスレイヤーをリセット",
"resetGenerationSettings": "生成設定をリセット",
"replaceCurrent": "現在のものを置き換える",
"controlLayerEmptyState": "<UploadButton>画像をアップロード</UploadButton>、<GalleryButton>ギャラリー</GalleryButton>からこのレイヤーに画像をドラッグ、<PullBboxButton>境界ボックスをこのレイヤーにプル</PullBboxButton>、またはキャンバスに描画して開始します。",
"referenceImageEmptyStateWithCanvasOptions": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグするか、<PullBboxButton>境界ボックスをこの参照画像にプル</PullBboxButton>します。",
"referenceImageEmptyState": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグします。",
"uploadOrDragAnImage": "ギャラリーから画像をドラッグするか、<UploadButton>画像をアップロード</UploadButton>します。",
"imageNoise": "画像ノイズ",
"denoiseLimit": "ノイズ除去制限",
"warnings": {
"problemsFound": "問題が見つかりました",
"unsupportedModel": "選択したベースモデルではレイヤーがサポートされていません",
"controlAdapterNoModelSelected": "制御レイヤーモデルが選択されていません",
"controlAdapterIncompatibleBaseModel": "互換性のない制御レイヤーベースモデル",
"controlAdapterNoControl": "コントロールが選択/描画されていません",
"ipAdapterNoModelSelected": "参照画像モデルが選択されていません",
"ipAdapterIncompatibleBaseModel": "互換性のない参照画像ベースモデル",
"ipAdapterNoImageSelected": "参照画像が選択されていません",
"rgNoPromptsOrIPAdapters": "テキストプロンプトや参照画像はありません",
"rgNegativePromptNotSupported": "選択されたベースモデルでは否定プロンプトはサポートされていません",
"rgReferenceImagesNotSupported": "選択されたベースモデルでは地域の参照画像はサポートされていません",
"rgAutoNegativeNotSupported": "選択したベースモデルでは自動否定はサポートされていません",
"rgNoRegion": "領域が描画されていません",
"fluxFillIncompatibleWithControlLoRA": "コントロールLoRAはFLUX Fillと互換性がありません"
},
"errors": {
"unableToFindImage": "画像が見つかりません",
"unableToLoadImage": "画像を読み込めません"
},
"ipAdapterMethod": {
"ipAdapterMethod": "モード",
"full": "スタイルと構成",
"fullDesc": "視覚スタイル (色、テクスチャ) と構成 (レイアウト、構造) を適用します。",
"style": "スタイル(シンプル)",
"styleDesc": "レイアウトを考慮せずに視覚スタイル(色、テクスチャ)を適用します。以前は「スタイルのみ」と呼ばれていました。",
"composition": "構成のみ",
"compositionDesc": "参照スタイルを無視してレイアウトと構造を複製します。",
"styleStrong": "スタイル(ストロング)",
"styleStrongDesc": "構成への影響をわずかに抑えて、強力なビジュアル スタイルを適用します。",
"stylePrecise": "スタイル(正確)",
"stylePreciseDesc": "被写体の影響を排除し、正確な視覚スタイルを適用します。"
},
"fluxReduxImageInfluence": {
"imageInfluence": "イメージの影響力",
"lowest": "最低",
"low": "低",
"medium": "中",
"high": "高",
"highest": "最高"
},
"fill": {
"fillColor": "塗りつぶし色",
"fillStyle": "塗りつぶしスタイル",
"solid": "固体",
"grid": "グリッド",
"crosshatch": "クロスハッチ",
"vertical": "垂直",
"horizontal": "水平",
"diagonal": "対角線"
},
"selectObject": {
"selectObject": "オブジェクトを選択",
"pointType": "ポイントタイプ",
"invertSelection": "選択範囲を反転",
"include": "含む",
"exclude": "除外",
"neutral": "ニュートラル",
"apply": "適用",
"reset": "リセット",
"saveAs": "名前を付けて保存",
"cancel": "キャンセル",
"process": "プロセス",
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
"dragToMove": "ポイントをドラッグして移動します",
"clickToRemove": "ポイントをクリックして削除します"
},
"HUD": {
"bbox": "Bボックス",
"scaledBbox": "スケールされたBボックス",
"entityStatus": {
"isFiltering": "{{title}} はフィルタリング中です",
"isTransforming": "{{title}}は変化しています",
"isLocked": "{{title}}はロックされています",
"isHidden": "{{title}}は非表示になっています",
"isDisabled": "{{title}}は無効です",
"isEmpty": "{{title}} は空です"
}
},
"stagingArea": {
"accept": "受け入れる",
"discardAll": "すべて破棄",
"discard": "破棄する",
"previous": "前へ",
"next": "次へ",
"saveToGallery": "ギャラリーに保存",
"showResultsOn": "結果を表示",
"showResultsOff": "結果を隠す"
}
},
"stylePresets": {
"clearTemplateSelection": "選択したテンプレートをクリア",
@@ -1810,13 +2304,56 @@
"nameColumn": "'name'",
"type": "タイプ",
"private": "プライベート",
"name": "名称"
"name": "名称",
"active": "アクティブ",
"copyTemplate": "テンプレートをコピー",
"deleteImage": "画像を削除",
"deleteTemplate": "テンプレートを削除",
"deleteTemplate2": "このテンプレートを削除してもよろしいですか? 元に戻すことはできません。",
"exportPromptTemplates": "プロンプトテンプレートをエクスポートするCSV",
"editTemplate": "テンプレートを編集",
"exportDownloaded": "エクスポートをダウンロードしました",
"exportFailed": "生成とCSVのダウンロードができません",
"importTemplates": "プロンプトテンプレートのインポートCSV/JSON",
"acceptedColumnsKeys": "受け入れられる列/キー:",
"positivePromptColumn": "'プロンプト'または'ポジティブプロンプト'",
"insertPlaceholder": "プレースホルダーを挿入",
"negativePrompt": "ネガティブプロンプト",
"noTemplates": "テンプレートがありません",
"noMatchingTemplates": "マッチするテンプレートがありません",
"promptTemplatesDesc1": "プロンプトテンプレートは、プロンプトボックスに書き込むプロンプトにテキストを追加します。",
"promptTemplatesDesc2": "テンプレート内でプロンプトを含める場所を指定するには <Pre>{{placeholder}}</Pre> のプレースホルダーの文字列を使用します。",
"promptTemplatesDesc3": "プレースホルダーを省略すると、テンプレートはプロンプトの末尾に追加されます。",
"positivePrompt": "ポジティブプロンプト",
"shared": "共有",
"sharedTemplates": "テンプレートを共有",
"templateDeleted": "プロンプトテンプレートを削除しました",
"unableToDeleteTemplate": "プロンプトテンプレートを削除できません",
"updatePromptTemplate": "プロンプトテンプレートをアップデート",
"useForTemplate": "プロンプトテンプレートに使用する",
"viewList": "テンプレートリストを表示",
"viewModeTooltip": "現在選択されているテンプレートでは、プロンプトはこのようになります。プロンプトを編集するには、テキストボックス内の任意の場所をクリックしてください。",
"togglePromptPreviews": "プロンプトプレビューを切り替える"
},
"upscaling": {
"upscaleModel": "アップスケールモデル",
"postProcessingModel": "ポストプロセスモデル",
"upscale": "アップスケール",
"scale": "スケール"
"scale": "スケール",
"creativity": "創造性",
"exceedsMaxSize": "アップスケール設定が最大サイズ制限を超えています",
"exceedsMaxSizeDetails": "アップスケールの上限は{{max Upscale Dimension}} x {{max Upscale Dimension}}ピクセルです。画像を小さくするか、スケールの選択範囲を小さくしてください。",
"structure": "構造",
"postProcessingMissingModelWarning": "後処理 (img2img) モデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
"missingModelsWarning": "必要なモデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
"mainModelDesc": "メインモデルSD1.5またはSDXLアーキテクチャ",
"tileControlNetModelDesc": "選択したメインモデルアーキテクチャのタイルコントロールネットモデル",
"upscaleModelDesc": "アップスケールimg2imgモデル",
"missingUpscaleInitialImage": "アップスケール用の初期画像がありません",
"missingUpscaleModel": "アップスケールモデルがありません",
"missingTileControlNetModel": "有効なタイル コントロールネットモデルがインストールされていません",
"incompatibleBaseModel": "アップスケーリングにサポートされていないメインモデルアーキテクチャです",
"incompatibleBaseModelDesc": "アップスケーリングはSD1.5およびSDXLアーキテクチャモデルでのみサポートされています。アップスケーリングを有効にするには、メインモデルを変更してください。"
},
"sdxl": {
"denoisingStrength": "ノイズ除去強度",
@@ -1891,7 +2428,34 @@
"minimum": "最小",
"publish": "公開",
"unpublish": "非公開",
"publishedWorkflowInputs": "インプット"
"publishedWorkflowInputs": "インプット",
"workflowLocked": "ワークフローがロックされました",
"workflowLockedPublished": "公開済みのワークフローは編集用にロックされています。\nワークフローを非公開にして編集したり、コピーを作成したりできます。",
"workflowLockedDuringPublishing": "公開の構成中にワークフローがロックされます。",
"selectOutputNode": "出力ノードを選択",
"changeOutputNode": "出力ノードの変更",
"unpublishableInputs": "これらの公開できない入力は省略されます",
"noPublishableInputs": "公開可能な入力はありません",
"noOutputNodeSelected": "出力ノードが選択されていません",
"cannotPublish": "ワークフローを公開できません",
"publishWarnings": "警告",
"errorWorkflowHasUnsavedChanges": "ワークフローに保存されていない変更があります",
"errorWorkflowHasUnpublishableNodes": "ワークフローにはバッチ、ジェネレータ、またはメタデータ抽出ノードがあります",
"errorWorkflowHasInvalidGraph": "ワークフロー グラフが無効です (詳細については [呼び出し] ボタンにマウスを移動してください)",
"errorWorkflowHasNoOutputNode": "出力ノードが選択されていません",
"warningWorkflowHasNoPublishableInputFields": "公開可能な入力フィールドが選択されていません - 公開されたワークフローはデフォルト値のみで実行されます",
"warningWorkflowHasUnpublishableInputFields": "ワークフローには公開できない入力がいくつかあります。これらは公開されたワークフローから省略されます",
"publishFailed": "公開失敗",
"publishFailedDesc": "ワークフローの公開中に問題が発生しました。もう一度お試しください。",
"publishSuccess": "ワークフローを公開しています",
"publishSuccessDesc": "<LinkComponent>プロジェクト ダッシュボード</LinkComponent> をチェックして進捗状況を確認してください。",
"publishInProgress": "公開中",
"publishedWorkflowIsLocked": "公開されたワークフローはロックされています",
"publishingValidationRun": "公開検証実行",
"publishingValidationRunInProgress": "公開検証の実行が進行中です。",
"publishedWorkflowsLocked": "公開済みのワークフローはロックされており、編集または実行できません。このワークフローを編集または実行するには、ワークフローを非公開にするか、コピーを保存してください。",
"selectingOutputNode": "出力ノードの選択",
"selectingOutputNodeDesc": "ノードをクリックして、ワークフローの出力ノードとして選択します。"
},
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
"unnamedWorkflow": "名前のないワークフロー",
@@ -1954,15 +2518,23 @@
"models": "モデル",
"canvas": "キャンバス",
"metadata": "メタデータ",
"queue": "キュー"
"queue": "キュー",
"logNamespaces": "ログのネームスペース",
"dnd": "ドラッグ&ドロップ",
"config": "構成",
"generation": "生成",
"events": "イベント"
},
"logLevel": {
"debug": "Debug",
"info": "Info",
"error": "Error",
"fatal": "Fatal",
"warn": "Warn"
}
"warn": "Warn",
"logLevel": "ログレベル",
"trace": "追跡"
},
"enableLogging": "ログを有効にする"
},
"dynamicPrompts": {
"promptsPreview": "プロンプトプレビュー",
@@ -1978,5 +2550,34 @@
"dynamicPrompts": "ダイナミックプロンプト",
"loading": "ダイナミックプロンプトを生成...",
"maxPrompts": "最大プロンプト"
},
"upsell": {
"inviteTeammates": "チームメートを招待",
"professional": "プロフェッショナル",
"professionalUpsell": "InvokeのProfessional Editionでご利用いただけます。詳細については、こちらをクリックするか、invoke.com/pricingをご覧ください。",
"shareAccess": "共有アクセス"
},
"newUserExperience": {
"toGetStartedLocal": "始めるには、Invoke の実行に必要なモデルをダウンロードまたはインポートしてください。次に、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
"toGetStarted": "開始するには、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
"toGetStartedWorkflow": "開始するには、左側のフィールドに入力し、<StrongComponent>Invoke</StrongComponent> をクリックして画像を生成します。他のワークフローも試してみたい場合は、ワークフロータイトルの横にある<StrongComponent>フォルダアイコン</StrongComponent> をクリックすると、試せる他のテンプレートのリストが表示されます。",
"gettingStartedSeries": "さらに詳しいガイダンスが必要ですか? Invoke Studio の可能性を最大限に引き出すためのヒントについては、<LinkComponent>入門シリーズ</LinkComponent>をご覧ください。",
"lowVRAMMode": "最高のパフォーマンスを得るには、<LinkComponent>低 VRAM ガイド</LinkComponent>に従ってください。",
"noModelsInstalled": "モデルがインストールされていないようです。<DownloadStarterModelsButton>スターターモデルバンドルをダウンロード</DownloadStarterModelsButton>するか、<ImportModelsButton>モデルをインポート</ImportModelsButton>してください。"
},
"whatsNew": {
"whatsNewInInvoke": "Invokeの新機能",
"items": [
"インペインティング: マスクごとのノイズ レベルとノイズ除去の制限。",
"キャンバス: SDXL のアスペクト比がスマートになり、スクロールによるズームが改善されました。"
],
"readReleaseNotes": "リリースノートを読む",
"watchRecentReleaseVideos": "最近のリリースビデオを見る",
"watchUiUpdatesOverview": "Watch UI アップデートの概要"
},
"supportVideos": {
"supportVideos": "サポートビデオ",
"gettingStarted": "はじめる",
"watch": "ウォッチ"
}
}

View File

@@ -1931,7 +1931,6 @@
},
"ui": {
"tabs": {
"generation": "Генерация",
"canvas": "Холст",
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
"models": "Модели",

View File

@@ -74,7 +74,7 @@
"bulkDownloadFailed": "Tải Xuống Thất Bại",
"bulkDownloadRequestFailed": "Có Vấn Đề Khi Đang Chuẩn Bị Tải Xuống",
"download": "Tải Xuống",
"dropOrUpload": "$t(gallery.drop) Hoặc Tải Lên",
"dropOrUpload": "Kéo Thả Hoặc Tải Lên",
"currentlyInUse": "Hình ảnh này hiện đang sử dụng các tính năng sau:",
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
@@ -111,7 +111,7 @@
"noImageSelected": "Không Có Ảnh Được Chọn",
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
"imagesTab": "nh bạn vừa được tạo và lưu trong Invoke.",
"imagesTab": "nh bạn vừa được tạo và lưu trong Invoke.",
"loading": "Đang Tải",
"oldestFirst": "Cũ Nhất Trước",
"exitCompare": "Ngừng So Sánh",
@@ -122,7 +122,8 @@
"boardsSettings": "Thiết Lập Bảng",
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
"assets": "Tài Nguyên",
"images": "Hình Ảnh"
"images": "Hình Ảnh",
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
},
"common": {
"ipAdapter": "IP Adapter",
@@ -254,9 +255,18 @@
"options_withCount_other": "{{count}} thiết lập"
},
"prompt": {
"addPromptTrigger": "Thêm Prompt Trigger",
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
"compatibleEmbeddings": "Embedding Tương Thích",
"noMatchingTriggers": "Không có trigger phù hợp"
"noMatchingTriggers": "Không có trigger phù hợp",
"generateFromImage": "Tạo sinh lệnh từ ảnh",
"expandCurrentPrompt": "Mở Rộng Lệnh Hiện Tại",
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
"expandingPrompt": "Đang mở rộng lệnh...",
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
"replace": "Thay Thế",
"insert": "Chèn",
"discard": "Huỷ Bỏ"
},
"queue": {
"resume": "Tiếp Tục",
@@ -289,7 +299,7 @@
"pruneTooltip": "Cắt bớt {{item_count}} mục đã hoàn tất",
"pruneSucceeded": "Đã cắt bớt {{item_count}} mục đã hoàn tất khỏi hàng",
"clearTooltip": "Huỷ Và Dọn Dẹp Tất Cả Mục",
"clearQueueAlertDialog": "Dọn dẹp hàng đợi sẽ ngay lập tức huỷ tất cả mục đang xử lý và làm sạch hàng hoàn toàn. Bộ lọc đang chờ xử lý sẽ bị huỷ bỏ.",
"clearQueueAlertDialog": "Dọn dẹp hàng đợi sẽ ngay lập tức huỷ tất cả mục đang xử lý và làm sạch hàng hoàn toàn. Bộ lọc đang chờ xử lý sẽ bị huỷ bỏ và Vùng Dựng Canva sẽ được khởi động lại.",
"session": "Phiên",
"item": "Mục",
"resumeFailed": "Có Vấn Đề Khi Tiếp Tục Bộ Xử Lý",
@@ -333,13 +343,14 @@
"retrySucceeded": "Mục Đã Thử Lại",
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
"retryItem": "Thử Lại Mục",
"credits": "Nguồn"
"credits": "Nguồn",
"cancelAllExceptCurrent": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại"
},
"hotkeys": {
"canvas": {
"fitLayersToCanvas": {
"title": "Xếp Vừa Layers Vào Canvas",
"desc": "Căn chỉnh để góc nhìn vừa vặn với tất cả layer."
"desc": "Căn chỉnh để góc nhìn vừa vặn với tất cả layer nhìn thấy dược."
},
"setZoomTo800Percent": {
"desc": "Phóng to canvas lên 800%.",
@@ -453,6 +464,34 @@
"applyFilter": {
"title": "Áp Dụng Bộ Lộc",
"desc": "Áp dụng bộ lọc đang chờ sẵn cho layer được chọn."
},
"settings": {
"behavior": "Hành Vi",
"display": "Hiển Thị",
"grid": "Lưới",
"debug": "Gỡ Lỗi"
},
"toggleNonRasterLayers": {
"title": "Bật/Tắt Layer Không Thuộc Dạng Raster",
"desc": "Hiện hoặc ẩn tất cả layer không thuộc dạng raster (Layer Điều Khiển Được, Lớp Phủ Inpaint, Chỉ Dẫn Khu Vực)."
},
"invertMask": {
"title": "Đảo Ngược Lớp Phủ",
"desc": "Đảo ngược lớp phủ inpaint được chọn, tạo một lớp phủ mới với độ trong suốt đối nghịch."
},
"fitBboxToMasks": {
"title": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào lớp phủ inpaint nhìn thấy được"
},
"applySegmentAnything": {
"title": "Áp Dụng Segment Anything",
"desc": "Áp dụng lớp phủ Segment Anything hiện tại.",
"key": "enter"
},
"cancelSegmentAnything": {
"title": "Huỷ Segment Anything",
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
"key": "esc"
}
},
"workflows": {
@@ -582,6 +621,10 @@
"clearSelection": {
"desc": "Xoá phần lựa chọn hiện tại nếu có.",
"title": "Xoá Phần Lựa Chọn"
},
"starImage": {
"title": "Dấu/Huỷ Sao Hình Ảnh",
"desc": "Đánh dấu sao hoặc huỷ đánh dấu sao ảnh được chọn."
}
},
"app": {
@@ -641,6 +684,11 @@
"selectModelsTab": {
"desc": "Chọn tab Model (Mô Hình).",
"title": "Chọn Tab Model"
},
"selectGenerateTab": {
"title": "Chọn Tab Tạo Sinh",
"desc": "Chọn tab Tạo Sinh.",
"key": "1"
}
},
"searchHotkeys": "Tìm Phím tắt",
@@ -695,7 +743,7 @@
"cancel": "Huỷ",
"huggingFace": "HuggingFace (HF)",
"huggingFacePlaceholder": "chủ-sỡ-hữu/tên-model",
"includesNModels": "Thêm vào {{n}} model và dependency của nó",
"includesNModels": "Thêm vào {{n}} model và dependency của nó.",
"localOnly": "chỉ ở trên máy chủ",
"manual": "Thủ Công",
"convertToDiffusersHelpText4": "Đây là quá trình diễn ra chỉ một lần. Nó có thể tốn tầm 30-60 giây tuỳ theo thông số kỹ thuật của máy tính.",
@@ -742,7 +790,7 @@
"simpleModelPlaceholder": "Url hoặc đường đẫn đến tệp hoặc thư mục chứa diffusers trong máy chủ",
"selectModel": "Chọn Model",
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
"starterBundles": "Quà Tân Thủ",
"starterBundles": "Gói Khởi Đầu",
"vae": "VAE",
"urlOrLocalPath": "URL / Đường Dẫn",
"triggerPhrases": "Từ Ngữ Kích Hoạt",
@@ -794,7 +842,30 @@
"manageModels": "Quản Lý Model",
"hfTokenReset": "Làm Mới HF Token",
"relatedModels": "Model Liên Quan",
"showOnlyRelatedModels": "Liên Quan"
"showOnlyRelatedModels": "Liên Quan",
"installedModelsCount": "Đã tải {{installed}} trên {{total}} model.",
"allNModelsInstalled": "Đã tải tất cả {{count}} model",
"nToInstall": "Còn {{count}} để tải",
"nAlreadyInstalled": "Có {{count}} đã tải",
"bundleAlreadyInstalled": "Gói đã được cài sẵn",
"bundleAlreadyInstalledDesc": "Tất cả model trong gói {{bundleName}} đã được cài sẵn.",
"launchpadTab": "Launchpad",
"launchpad": {
"welcome": "Chào mừng đến Trình Quản Lý Model",
"description": "Invoke yêu cầu tải model nhằm tối ưu hoá các tính năng trên nền tảng. Chọn tải các phương án thủ công hoặc khám phá các model khởi đầu thích hợp.",
"manualInstall": "Tải Thủ Công",
"urlDescription": "Tải model bằng URL hoặc đường dẫn trên máy. Phù hợp để cụ thể model muốn thêm vào.",
"huggingFaceDescription": "Duyệt và cài đặt model từ các repository trên HuggingFace.",
"scanFolderDescription": "Quét một thư mục trên máy để tự động tra và tải model.",
"recommendedModels": "Model Khuyến Nghị",
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
"quickStart": "Gói Khởi Đầu Nhanh",
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
"stableDiffusion15": "Stable Diffusion 1.5",
"sdxl": "SDXL",
"fluxDev": "FLUX.1 dev"
}
},
"metadata": {
"guidance": "Hướng Dẫn",
@@ -802,7 +873,7 @@
"imageDetails": "Chi Tiết Ảnh",
"createdBy": "Được Tạo Bởi",
"parsingFailed": "Lỗi Cú Pháp",
"canvasV2Metadata": "Canvas",
"canvasV2Metadata": "Layer Canvas",
"parameterSet": "Dữ liệu tham số {{parameter}}",
"positivePrompt": "Lệnh Tích Cực",
"recallParameter": "Gợi Nhớ {{label}}",
@@ -1047,7 +1118,23 @@
"unknownField_withName": "Vùng Dữ Liệu Không Rõ \"{{name}}\"",
"unexpectedField_withName": "Sai Vùng Dữ Liệu \"{{name}}\"",
"unknownFieldEditWorkflowToFix_withName": "Workflow chứa vùng dữ liệu không rõ \"{{name}}\".\nHãy biên tập workflow để sửa lỗi.",
"missingField_withName": "Thiếu Vùng Dữ Liệu \"{{name}}\""
"missingField_withName": "Thiếu Vùng Dữ Liệu \"{{name}}\"",
"layout": {
"autoLayout": "Bố Cục Tự Động",
"layeringStrategy": "Chiến Lược Phân Layer",
"networkSimplex": "Network Simplex",
"longestPath": "Đường Đi Dài Nhất",
"nodeSpacing": "Khoảng Cách Node",
"layerSpacing": "Khoảng Cách Layer",
"layoutDirection": "Hướng Bố Cục",
"layoutDirectionRight": "Phải",
"layoutDirectionDown": "Xuống",
"alignment": "Căn Chỉnh Node",
"alignmentUL": "Trên Cùng Bên Trái",
"alignmentDL": "Dưới Cùng Bên Trái",
"alignmentUR": "Trên Cùng Bên Phải",
"alignmentDR": "Dưới Cùng Bên Phải"
}
},
"popovers": {
"paramCFGRescaleMultiplier": {
@@ -1474,6 +1561,20 @@
"Lát khối liền mạch bức ảnh theo trục ngang."
],
"heading": "Lát Khối Liền Mạch Trục X"
},
"tileSize": {
"heading": "Kích Thước Khối",
"paragraphs": [
"Điều chỉnh kích thước của khối trong quá trình upscale. Khối càng lớn, bộ nhớ được sử dụng càng nhiều, nhưng có thể tạo sinh ảnh tốt hơn.",
"Model SD1.5 mặt định là 768, trong khi SDXL mặc định là 1024. Giảm kích thước khối nếu các gặp vấn đề bộ nhớ."
]
},
"tileOverlap": {
"heading": "Chồng Chéo Khối",
"paragraphs": [
"Điều chỉnh sự chồng chéo giữa các khối liền kề trong quá trình upscale. Giá trị chồng chép lớn giúp giảm sự rõ nét của các chỗ nối nhau, nhưng ngốn nhiều bộ nhớ hơn.",
"Giá trị mặc định (128) hoạt động tốt với đa số trường hợp, nhưng bạn có thể điều chỉnh cho phù hợp với nhu cầu cụ thể và hạn chế về bộ nhớ."
]
}
},
"models": {
@@ -1487,7 +1588,8 @@
"defaultVAE": "VAE Mặc Định",
"noMatchingModels": "Không có Model phù hợp",
"noModelsAvailable": "Không có model",
"selectModel": "Chọn Model"
"selectModel": "Chọn Model",
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
},
"parameters": {
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
@@ -1538,7 +1640,10 @@
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp."
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với LUX Kontext thông qua BFL API",
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
},
"cfgScale": "Thang CFG",
"useSeed": "Dùng Hạt Giống",
@@ -1869,7 +1974,8 @@
"canvasGroup": "Canvas",
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
"copyToClipboard": "Sao Chép Vào Clipboard",
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard",
"newResizedControlLayer": "Layer Điều Khiển Được Đã Chỉnh Kích Thước Mới"
},
"stagingArea": {
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
@@ -2050,7 +2156,11 @@
},
"isolatedLayerPreviewDesc": "Có hay không hiển thị riêng layer này khi thực hiện các thao tác như lọc hay biến đổi.",
"isolatedStagingPreview": "Xem Trước Tổng Quan Phần Cô Lập",
"isolatedPreview": "Xem Trước Phần Cô Lập"
"isolatedPreview": "Xem Trước Phần Cô Lập",
"saveAllImagesToGallery": {
"label": "Chuyển Sản Phẩm Tạo Sinh Mới Vào Thư Viện Ảnh",
"alert": "Đang chuyển sản phẩm tạo sinh mới vào Thư Viện Ảnh, bỏ qua Canvas"
}
},
"tool": {
"eraser": "Tẩy",
@@ -2062,8 +2172,8 @@
"colorPicker": "Chọn Màu"
},
"mergingLayers": "Đang gộp layer",
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ thư viện ảnh vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton> hoặc kéo ảnh từ thư viện ảnh vào Ảnh Mẫu để bắt đầu.",
"useImage": "Dùng Hình Ảnh",
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
@@ -2115,7 +2225,22 @@
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
"imageNoise": "Độ Nhiễu Hình Ảnh",
"denoiseLimit": "Giới Hạn Khử Nhiễu",
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
"addImageNoise": "Thêm $t(controlLayers.imageNoise)",
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ thư viện ảnh vào Ảnh Mẫu này, hoặc <PullBboxButton>kéo hộp giới hạn vào Ảnh Mẫu này</PullBboxButton> để bắt đầu.",
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>.",
"exportCanvasToPSD": "Xuất Canvas Thành File PSD",
"ruleOfThirds": "Hiển Thị Quy Tắc Một Phần Ba",
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
"hideNonRasterLayers": "Ẩn Layer Không Thuộc Dạng Raster (Shift + H)",
"autoSwitch": {
"off": "Tắt",
"switchOnStart": "Khi Bắt Đầu",
"switchOnFinish": "Khi Kết Thúc"
},
"fitBboxToMasks": "Xếp Vừa Hộp Giới Hạn Vào Lớp Phủ",
"invertMask": "Đảo Ngược Lớp Phủ",
"maxRefImages": "Ảnh Mẫu Tối Đa",
"useAsReferenceImage": "Dùng Làm Ảnh Mẫu"
},
"stylePresets": {
"negativePrompt": "Lệnh Tiêu Cực",
@@ -2161,7 +2286,8 @@
"deleteImage": "Xoá Hình Ảnh",
"exportPromptTemplates": "Xuất Mẫu Trình Bày Cho Lệnh Ra (CSV)",
"templateDeleted": "Mẫu trình bày cho lệnh đã được xoá",
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh"
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh",
"togglePromptPreviews": "Bật/Tắt Xem Trước Lệnh"
},
"system": {
"enableLogging": "Bật Chế Độ Ghi Log",
@@ -2257,20 +2383,131 @@
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext không hỗ trợ tạo sinh từ hình ảnh từ canvas. Thử sử dụng Ảnh Mẫu và tắt các Layer Dạng Raster.",
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
"noActiveRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
"noVisibleRasterLayers": "Không Có Layer Dạng Raster Hiển Thị",
"noVisibleRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
"invalidCanvasDimensions": "Kích Thước Canvas Không Phù Hợp",
"canvasTooLarge": "Canvas Quá Lớn",
"canvasTooLargeDesc": "Kích thước canvas vượt mức tối đa cho phép để xuất file PSD. Giảm cả chiều dài và chiều rộng chủa canvas và thử lại.",
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
"psdExportSuccess": "Xuất File PSD Hoàn Tất",
"psdExportSuccessDesc": "Thành công xuất {{count}} layer sang file PSD",
"problemExportingPSD": "Có Vấn Đề Khi Xuất File PSD",
"canvasManagerNotAvailable": "Trình Quản Lý Canvas Không Có Sẵn",
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
"promptGenerationStarted": "Trình tạo sinh lệnh khởi động",
"uploadAndPromptGenerationFailed": "Thất bại khi tải lên ảnh để tạo sinh lệnh",
"promptExpansionFailed": "Có vấn đề xảy ra. Hãy thử mở rộng lệnh lại.",
"maskInverted": "Đã Đảo Ngược Lớp Phủ",
"maskInvertFailed": "Thất Bại Khi Đảo Ngược Lớp Phủ",
"noVisibleMasks": "Không Có Lớp Phủ Đang Hiển Thị",
"noVisibleMasksDesc": "Tạo hoặc bật ít nhất một lớp phủ inpaint để đảo ngược",
"noInpaintMaskSelected": "Không Có Lớp Phủ Inpant Được Chọn",
"noInpaintMaskSelectedDesc": "Chọn một lớp phủ inpaint để đảo ngược",
"invalidBbox": "Hộp Giới Hạn Không Hợp Lệ",
"invalidBboxDesc": "Hợp giới hạn có kích thước không hợp lệ"
},
"ui": {
"tabs": {
"gallery": "Thư Viện Ảnh",
"models": "Models",
"generation": "Generation (Máy Tạo Sinh)",
"upscaling": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)",
"canvas": "Canvas (Vùng Ảnh)",
"upscalingTab": "$t(common.tab) $t(ui.tabs.upscaling)",
"modelsTab": "$t(common.tab) $t(ui.tabs.models)",
"queue": "Queue (Hàng Đợi)",
"workflows": "Workflow (Luồng Làm Việc)",
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)",
"generate": "Tạo Sinh"
},
"launchpad": {
"workflowsTitle": "Đi sâu hơn với Workflow.",
"upscalingTitle": "Upscale và thêm chi tiết.",
"canvasTitle": "Biên tập và làm đẹp trên Canvas.",
"generateTitle": "Tạo sinh ảnh từ lệnh chữ.",
"modelGuideText": "Muốn biết lệnh nào tốt nhất cho từng model chứ?",
"modelGuideLink": "Xem thêm Hướng Dẫn Model.",
"workflows": {
"description": "Workflow là các template tái sử dụng được sẽ tự động hoá các tác vụ tạo sinh ảnh, cho phép bạn nhanh chóng thực hiện cách thao tác phức tạp và nhận được kết quả nhất quán.",
"learnMoreLink": "Học thêm cách tạo ra workflow",
"browseTemplates": {
"title": "Duyệt Template Workflow",
"description": "Chọn từ các workflow có sẵn cho những tác vụ cơ bản"
},
"createNew": {
"title": "Tạo workflow mới",
"description": "Tạo workflow mới từ ban đầu"
},
"loadFromFile": {
"title": "Tải workflow từ tệp",
"description": "Tải lên workflow để bắt đầu với những thiết lập sẵn có"
}
},
"upscaling": {
"uploadImage": {
"title": "Tải Ảnh Để Upscale",
"description": "Nhấp hoặc kéo ảnh để upscale (JPG, PNG, WebP lên đến 100MB)"
},
"replaceImage": {
"title": "Thay Thế Ảnh Hiện Tại",
"description": "Nhấp hoặc kéo ảnh mới để thay thế cái hiện tại"
},
"imageReady": {
"title": "Ảnh Đã Sẵn Sàng",
"description": "Bấm 'Kích Hoạt' để chuẩn bị upscale"
},
"readyToUpscale": {
"title": "Chuẩn bị upscale!",
"description": "Điều chỉnh thiết lập bên dưới, sau đó bấm vào nút 'Khởi Động' để chuẩn bị upscale ảnh."
},
"upscaleModel": "Model Upscale",
"model": "Model",
"helpText": {
"promptAdvice": "Khi upscale, dùng lệnh để mô tả phương thức và phong cách. Tránh mô tả các chi tiết cụ thể trong ảnh.",
"styleAdvice": "Upscale thích hợp nhất cho phong cách chung của ảnh."
},
"scale": "Kích Thước",
"creativityAndStructure": {
"title": "Độ Sáng Tạo & Cấu Trúc Mặc Định",
"conservative": "Bảo toàn",
"balanced": "Cân bằng",
"creative": "Sáng tạo",
"artistic": "Thẩm mỹ"
}
},
"createNewWorkflowFromScratch": "Tạo workflow mới từ đầu",
"browseAndLoadWorkflows": "Duyệt và tải workflow có sẵn",
"addStyleRef": {
"title": "Thêm Phong Cách Mẫu",
"description": "Thêm ảnh để chuyển đổi diện mạo của nó."
},
"editImage": {
"title": "Biên Tập Ảnh",
"description": "Thêm ảnh để chỉnh sửa."
},
"generateFromText": {
"title": "Tạo Sinh Từ Chữ",
"description": "Nhập lệnh vào và Kích Hoạt."
},
"useALayoutImage": {
"title": "Dùng Bố Cục Ảnh",
"description": "Thêm ảnh để điều khiển bố cục."
},
"generate": {
"canvasCalloutTitle": "Đang tìm cách để điều khiển, chỉnh sửa, và làm lại ảnh?",
"canvasCalloutLink": "Vào Canvas cho nhiều tính năng hơn."
}
},
"panels": {
"launchpad": "Launchpad",
"workflowEditor": "Trình Biên Tập Workflow",
"imageViewer": "Trình Xem Ảnh",
"canvas": "Canvas"
}
},
"workflows": {
@@ -2423,7 +2660,10 @@
"postProcessingMissingModelWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model xử lý hậu kỳ (ảnh sang ảnh).",
"missingModelsWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model cần thiết:",
"incompatibleBaseModel": "Phiên bản model chính không được hỗ trợ để upscale",
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale."
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale.",
"tileControl": "Điều Chỉnh Khối",
"tileSize": "Kích Thước Khối",
"tileOverlap": "Chồng Chéo Khối"
},
"newUserExperience": {
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
@@ -2439,8 +2679,8 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
"Trạng thái Studio được lưu vào server, giúp bạn tiếp tục công việc ở mọi thiết bị.",
"Hỗ trợ nhiều ảnh mẫu cho FLUX KONTEXT (chỉ cho model trên máy)."
]
},
"upsell": {
@@ -2452,64 +2692,18 @@
"supportVideos": {
"supportVideos": "Video Hỗ Trợ",
"gettingStarted": "Bắt Đầu Làm Quen",
"studioSessionsDesc1": "Xem thử <StudioSessionsPlaylistLink /> để hiểu rõ Invoke hơn.",
"studioSessionsDesc2": "Đến <DiscordLink /> để tham gia vào phiên trực tiếp và hỏi câu hỏi. Các phiên được tải lên danh sách phát vào các tuần.",
"watch": "Xem",
"studioSessionsDesc": "Tham gia <DiscordLink /> để xem các buổi phát trực tiếp và đặt câu hỏi. Các phiên được đăng lên trên playlist các tuần tiếp theo.",
"videos": {
"howDoIDoImageToImageTransformation": {
"title": "Làm Sao Để Tôi Dùng Trình Biến Đổi Hình Ảnh Sang Hình Ảnh?",
"description": "Hướng dẫn cách thực hiện biến đổi ảnh sang ảnh trong Invoke."
"gettingStarted": {
"title": "Bắt Đầu Với Invoke",
"description": "Hoàn thành các video bao hàm mọi thứ bạn cần biết để bắt đầu với Invoke, từ tạo bức ảnh đầu tiên đến các kỹ thuật phức tạp khác."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"description": "Giới thiệu về ảnh mẫu và IP adapter toàn vùng.",
"title": "Làm Sao Để Tôi Dùng IP Adapter Toàn Vùng Và Ảnh Mẫu?"
},
"creatingAndComposingOnInvokesControlCanvas": {
"description": "Học cách sáng tạo ảnh bằng trình điều khiển canvas của Invoke.",
"title": "Sáng Tạo Trong Trình Kiểm Soát Canvas Của Invoke"
},
"upscaling": {
"description": "Cách upscale ảnh bằng bộ công cụ của Invoke để nâng cấp độ phân giải.",
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
},
"howDoIGenerateAndSaveToTheGallery": {
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện Ảnh?",
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện ảnh."
},
"howDoIEditOnTheCanvas": {
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
"title": "Làm Sao Để Tôi Chỉnh Sửa Trên Canvas?"
},
"howDoIUseControlNetsAndControlLayers": {
"title": "Làm Sao Để Tôi Dùng ControlNet và Layer Điều Khiển Được?",
"description": "Học cách áp dụng layer điều khiển được và controlnet vào ảnh của bạn."
},
"howDoIUseInpaintMasks": {
"title": "Làm Sao Để Tôi Dùng Lớp Phủ Inpaint?",
"description": "Cách áp dụng lớp phủ inpaint vào chỉnh sửa và thay đổi ảnh."
},
"howDoIOutpaint": {
"title": "Làm Sao Để Tôi Outpaint?",
"description": "Hướng dẫn outpaint bên ngoài viền ảnh gốc."
},
"creatingYourFirstImage": {
"description": "Giới thiệu về cách tạo ảnh từ ban đầu bằng công cụ Invoke.",
"title": "Tạo Hình Ảnh Đầu Tiên Của Bạn"
},
"usingControlLayersAndReferenceGuides": {
"description": "Học cách chỉ dẫn ảnh được tạo ra bằng layer điều khiển được và ảnh mẫu.",
"title": "Dùng Layer Điều Khiển Được và Chỉ Dẫn Mẫu"
},
"understandingImageToImageAndDenoising": {
"title": "Hiểu Rõ Trình Hình Ảnh Sang Hình Ảnh Và Trình Khử Nhiễu",
"description": "Tổng quan về trình biến đổi ảnh sang ảnh và trình khử nhiễu trong Invoke."
},
"exploringAIModelsAndConceptAdapters": {
"title": "Khám Phá Model AI Và Khái Niệm Về Adapter",
"description": "Đào sâu vào model AI và cách dùng những adapter để điều khiển một cách sáng tạo."
"studioSessions": {
"title": "Phiên Studio",
"description": "Đào sâu vào các phiên họp để khám phá những tính năng nâng cao của Invoke, sáng tạo workflow, và thảo luận cộng đồng."
}
},
"controlCanvas": "Điều Khiển Canvas",
"watch": "Xem"
}
},
"modelCache": {
"clearSucceeded": "Cache Model Đã Được Dọn",

View File

@@ -1772,7 +1772,6 @@
},
"ui": {
"tabs": {
"generation": "生成",
"queue": "队列",
"canvas": "画布",
"upscaling": "放大中",

View File

@@ -2,16 +2,16 @@ import { Box } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import { $globalIsLoading } from 'app/store/nanostores/globalIsLoading';
import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction';
import { clearStorage } from 'app/store/enhancers/reduxRemember/driver';
import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import { useClearStorage } from 'common/hooks/useClearStorage';
import { AppContent } from 'features/ui/components/AppContent';
import { memo, useCallback } from 'react';
import { ErrorBoundary } from 'react-error-boundary';
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
import ThemeLocaleProvider from './ThemeLocaleProvider';
const DEFAULT_CONFIG = {};
interface Props {
@@ -20,24 +20,25 @@ interface Props {
}
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
const globalIsLoading = useStore($globalIsLoading);
const clearStorage = useClearStorage();
const didStudioInit = useStore($didStudioInit);
const handleReset = useCallback(() => {
clearStorage();
location.reload();
return false;
}, [clearStorage]);
}, []);
return (
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
<AppContent />
{globalIsLoading && <Loading />}
</Box>
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
<GlobalModalIsolator />
</ErrorBoundary>
<ThemeLocaleProvider>
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
<AppContent />
{!didStudioInit && <Loading />}
</Box>
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
<GlobalModalIsolator />
</ErrorBoundary>
</ThemeLocaleProvider>
);
};

View File

@@ -1,6 +1,8 @@
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
import { setupListeners } from '@reduxjs/toolkit/query';
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection';
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
import { useLogger } from 'app/logging/useLogger';
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
@@ -10,13 +12,16 @@ import type { PartialAppConfig } from 'app/types/invokeai';
import { useFocusRegionWatcher } from 'common/hooks/focus';
import { useCloseChakraTooltipsOnDragFix } from 'common/hooks/useCloseChakraTooltipsOnDragFix';
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
import { size } from 'es-toolkit/compat';
import { useDndMonitor } from 'features/dnd/useDndMonitor';
import { useDynamicPromptsWatcher } from 'features/dynamicPrompts/hooks/useDynamicPromptsWatcher';
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
import { useSyncExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
import { useSyncNodeErrors } from 'features/nodes/store/util/fieldValidators';
import { useReadinessWatcher } from 'features/queue/store/readiness';
import { configChanged } from 'features/system/store/configSlice';
import { selectLanguage } from 'features/system/store/systemSelectors';
import { useNavigationApi } from 'features/ui/layouts/use-navigation-api';
import i18n from 'i18n';
import { memo, useEffect } from 'react';
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
@@ -43,26 +48,33 @@ export const GlobalHookIsolator = memo(
useGetOpenAPISchemaQuery();
useSyncLoggingConfig();
useCloseChakraTooltipsOnDragFix();
useNavigationApi();
useDndMonitor();
useSyncNodeErrors();
useSyncLangDirection();
// Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
// and/or in progress canvas sessions.
useGetQueueCountsByDestinationQuery(queueCountArg);
useSyncExecutionState();
useEffect(() => {
i18n.changeLanguage(language);
}, [language]);
useEffect(() => {
if (size(config)) {
logger.info({ config }, 'Received config');
dispatch(configChanged(config));
}
logger.info({ config }, 'Received config');
dispatch(configChanged(config));
}, [dispatch, config, logger]);
useEffect(() => {
dispatch(appStarted());
}, [dispatch]);
useEffect(() => {
return setupListeners(dispatch);
}, [dispatch]);
useStudioInitAction(studioInitAction);
useStarterModelsToast();
useSyncQueueStatus();

View File

@@ -1,11 +1,14 @@
import { useAppSelector } from 'app/store/storeHooks';
import { useIsRegionFocused } from 'common/hooks/focus';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
import { useImageActions } from 'features/gallery/hooks/useImageActions';
import { useLoadWorkflow } from 'features/gallery/hooks/useLoadWorkflow';
import { useRecallAll } from 'features/gallery/hooks/useRecallAll';
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { memo } from 'react';
import { useImageDTO } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
@@ -27,59 +30,64 @@ GlobalImageHotkeys.displayName = 'GlobalImageHotkeys';
const GlobalImageHotkeysInternal = memo(({ imageDTO }: { imageDTO: ImageDTO }) => {
const isGalleryFocused = useIsRegionFocused('gallery');
const isViewerFocused = useIsRegionFocused('viewer');
const imageActions = useImageActions(imageDTO);
const isStaging = useAppSelector(selectIsStaging);
const isUpscalingEnabled = useFeatureStatus('upscaling');
const isFocusOK = isGalleryFocused || isViewerFocused;
const recallAll = useRecallAll(imageDTO);
const recallRemix = useRecallRemix(imageDTO);
const recallPrompts = useRecallPrompts(imageDTO);
const recallSeed = useRecallSeed(imageDTO);
const recallDimensions = useRecallDimensions(imageDTO);
const loadWorkflow = useLoadWorkflow(imageDTO);
useRegisteredHotkeys({
id: 'loadWorkflow',
category: 'viewer',
callback: imageActions.loadWorkflow,
options: { enabled: isGalleryFocused || isViewerFocused },
dependencies: [imageActions.loadWorkflow, isGalleryFocused, isViewerFocused],
callback: loadWorkflow.load,
options: { enabled: loadWorkflow.isEnabled && isFocusOK },
dependencies: [loadWorkflow, isFocusOK],
});
useRegisteredHotkeys({
id: 'recallAll',
category: 'viewer',
callback: imageActions.recallAll,
options: { enabled: !isStaging && (isGalleryFocused || isViewerFocused) },
dependencies: [imageActions.recallAll, isStaging, isGalleryFocused, isViewerFocused],
callback: recallAll.recall,
options: { enabled: recallAll.isEnabled && isFocusOK },
dependencies: [recallAll, isFocusOK],
});
useRegisteredHotkeys({
id: 'recallSeed',
category: 'viewer',
callback: imageActions.recallSeed,
options: { enabled: isGalleryFocused || isViewerFocused },
dependencies: [imageActions.recallSeed, isGalleryFocused, isViewerFocused],
callback: recallSeed.recall,
options: { enabled: recallSeed.isEnabled && isFocusOK },
dependencies: [recallSeed, isFocusOK],
});
useRegisteredHotkeys({
id: 'recallPrompts',
category: 'viewer',
callback: imageActions.recallPrompts,
options: { enabled: isGalleryFocused || isViewerFocused },
dependencies: [imageActions.recallPrompts, isGalleryFocused, isViewerFocused],
callback: recallPrompts.recall,
options: { enabled: recallPrompts.isEnabled && isFocusOK },
dependencies: [recallPrompts, isFocusOK],
});
useRegisteredHotkeys({
id: 'remix',
category: 'viewer',
callback: imageActions.remix,
options: { enabled: isGalleryFocused || isViewerFocused },
dependencies: [imageActions.remix, isGalleryFocused, isViewerFocused],
callback: recallRemix.recall,
options: { enabled: recallRemix.isEnabled && isFocusOK },
dependencies: [recallRemix, isFocusOK],
});
useRegisteredHotkeys({
id: 'useSize',
category: 'viewer',
callback: imageActions.recallSize,
options: { enabled: !isStaging && (isGalleryFocused || isViewerFocused) },
dependencies: [imageActions.recallSize, isStaging, isGalleryFocused, isViewerFocused],
});
useRegisteredHotkeys({
id: 'runPostprocessing',
category: 'viewer',
callback: imageActions.upscale,
options: { enabled: isUpscalingEnabled && isViewerFocused },
dependencies: [isUpscalingEnabled, imageDTO, isViewerFocused],
callback: recallDimensions.recall,
options: { enabled: recallDimensions.isEnabled && isFocusOK },
dependencies: [recallDimensions, isFocusOK],
});
return null;
});

View File

@@ -1,10 +1,6 @@
import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
import {
NewCanvasSessionDialog,
NewGallerySessionDialog,
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
@@ -50,8 +46,6 @@ export const GlobalModalIsolator = memo(() => {
<RefreshAfterResetModal />
<DeleteBoardModal />
<GlobalImageHotkeys />
<NewGallerySessionDialog />
<NewCanvasSessionDialog />
<ImageContextMenu />
<FullscreenDropzone />
<VideosModal />

View File

@@ -5,6 +5,7 @@ import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
import type { LoggingOverrides } from 'app/logging/logger';
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
import { addStorageListeners } from 'app/store/enhancers/reduxRemember/driver';
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
import { $authToken } from 'app/store/nanostores/authToken';
import { $baseUrl } from 'app/store/nanostores/baseUrl';
@@ -35,14 +36,13 @@ import {
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import type { ToastConfig } from 'features/toast/toast';
import type { PropsWithChildren, ReactNode } from 'react';
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
import React, { lazy, memo, useEffect, useLayoutEffect, useState } from 'react';
import { Provider } from 'react-redux';
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
import { $socketOptions } from 'services/events/stores';
import type { ManagerOptions, SocketOptions } from 'socket.io-client';
const App = lazy(() => import('./App'));
const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
interface Props extends PropsWithChildren {
apiUrl?: string;
@@ -71,6 +71,7 @@ interface Props extends PropsWithChildren {
* If provided, overrides in-app navigation to the model manager
*/
onClickGoToModelManager?: () => void;
storagePersistThrottle?: number;
}
const InvokeAIUI = ({
@@ -97,7 +98,11 @@ const InvokeAIUI = ({
loggingOverrides,
onClickGoToModelManager,
whatsNew,
storagePersistThrottle = 2000,
}: Props) => {
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
const [didRehydrate, setDidRehydrate] = useState(false);
useLayoutEffect(() => {
/*
* We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
@@ -309,30 +314,36 @@ const InvokeAIUI = ({
};
}, [isDebugging]);
const store = useMemo(() => {
return createStore(projectId);
}, [projectId]);
useEffect(() => {
const onRehydrated = () => {
setDidRehydrate(true);
};
const store = createStore({ persist: true, persistThrottle: storagePersistThrottle, onRehydrated });
setStore(store);
$store.set(store);
if (import.meta.env.MODE === 'development') {
window.$store = $store;
}
() => {
const removeStorageListeners = addStorageListeners();
return () => {
removeStorageListeners();
setStore(undefined);
$store.set(undefined);
if (import.meta.env.MODE === 'development') {
window.$store = undefined;
}
};
}, [store]);
}, [storagePersistThrottle]);
if (!store || !didRehydrate) {
return <Loading />;
}
return (
<React.StrictMode>
<Provider store={store}>
<React.Suspense fallback={<Loading />}>
<ThemeLocaleProvider>
<App config={config} studioInitAction={studioInitAction} />
</ThemeLocaleProvider>
<App config={config} studioInitAction={studioInitAction} />
</React.Suspense>
</Provider>
</React.StrictMode>

View File

@@ -3,43 +3,39 @@ import 'overlayscrollbars/overlayscrollbars.css';
import '@xyflow/react/dist/base.css';
import 'common/components/OverlayScrollbars/overlayscrollbars.css';
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
import { ChakraProvider, DarkMode, extendTheme, theme as baseTheme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { $direction } from 'app/hooks/useSyncLangDirection';
import type { ReactNode } from 'react';
import { memo, useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { memo, useMemo } from 'react';
type ThemeLocaleProviderProps = {
children: ReactNode;
};
const buildTheme = (direction: 'ltr' | 'rtl') => {
return extendTheme({
...baseTheme,
direction,
shadows: {
...baseTheme.shadows,
selected:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
hoverSelected:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
hoverUnselected:
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
selectedForCompare:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
hoverSelectedForCompare:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
},
});
};
function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
const { i18n } = useTranslation();
const direction = i18n.dir();
const theme = useMemo(() => {
return extendTheme({
..._theme,
direction,
shadows: {
..._theme.shadows,
selected:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
hoverSelected:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
hoverUnselected:
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
selectedForCompare:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
hoverSelectedForCompare:
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
},
});
}, [direction]);
useEffect(() => {
document.body.dir = direction;
}, [direction]);
const direction = useStore($direction);
const theme = useMemo(() => buildTheme(direction), [direction]);
return (
<ChakraProvider theme={theme} toastOptions={TOAST_OPTIONS}>

View File

@@ -8,7 +8,7 @@ import { paramsReset } from 'features/controlLayers/store/paramsSlice';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import { sentImageToCanvas } from 'features/gallery/store/actions';
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
import { MetadataUtils } from 'features/metadata/parsing';
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
import { $isWorkflowLibraryModalOpen } from 'features/nodes/store/workflowLibraryModal';
import {
@@ -19,7 +19,8 @@ import {
} from 'features/nodes/store/workflowLibrarySlice';
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
import { toast } from 'features/toast/toast';
import { activeTabCanvasRightPanelChanged, setActiveTab } from 'features/ui/store/uiSlice';
import { navigationApi } from 'features/ui/layouts/navigation-api';
import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
import { atom } from 'nanostores';
import { useCallback, useEffect } from 'react';
@@ -90,6 +91,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const overrides: Partial<CanvasRasterLayerState> = {
objects: [imageObject],
};
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
store.dispatch(canvasReset());
store.dispatch(rasterLayerAdded({ overrides, isSelected: true }));
store.dispatch(sentImageToCanvas());
@@ -116,23 +118,23 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const metadata = getImageMetadataResult.value;
store.dispatch(canvasReset());
// This shows a toast
await parseAndRecallAllMetadata(metadata, true);
await MetadataUtils.recallAll(metadata, store);
},
[store, t]
);
const handleLoadWorkflow = useCallback(
async (workflowId: string) => {
(workflowId: string) => {
// This shows a toast
await loadWorkflowWithDialog({
loadWorkflowWithDialog({
type: 'library',
data: workflowId,
onSuccess: () => {
store.dispatch(setActiveTab('workflows'));
navigationApi.switchToTab('workflows');
},
});
},
[loadWorkflowWithDialog, store]
[loadWorkflowWithDialog]
);
const handleSelectStylePreset = useCallback(
@@ -146,7 +148,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
return;
}
store.dispatch(activeStylePresetIdChanged(stylePresetId));
store.dispatch(setActiveTab('canvas'));
navigationApi.switchToTab('canvas');
toast({
title: t('toast.stylePresetLoaded'),
status: 'info',
@@ -156,33 +158,33 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
);
const handleGoToDestination = useCallback(
(destination: StudioDestinationAction['data']['destination']) => {
async (destination: StudioDestinationAction['data']['destination']) => {
switch (destination) {
case 'generation':
// Go to the canvas tab, open the image viewer, and enable send-to-gallery mode
// Go to the generate tab, open the launchpad
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
store.dispatch(paramsReset());
store.dispatch(activeTabCanvasRightPanelChanged('gallery'));
break;
case 'canvas':
// Go to the canvas tab, close the image viewer, and disable send-to-gallery mode
store.dispatch(canvasReset());
// Go to the canvas tab, open the launchpad
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
break;
case 'workflows':
// Go to the workflows tab
store.dispatch(setActiveTab('workflows'));
navigationApi.switchToTab('workflows');
break;
case 'upscaling':
// Go to the upscaling tab
store.dispatch(setActiveTab('upscaling'));
navigationApi.switchToTab('upscaling');
break;
case 'viewAllWorkflows':
// Go to the workflows tab and open the workflow library modal
store.dispatch(setActiveTab('workflows'));
navigationApi.switchToTab('workflows');
$isWorkflowLibraryModalOpen.set(true);
break;
case 'viewAllWorkflowsRecommended':
// Go to the workflows tab and open the workflow library modal with the recommended workflows view
store.dispatch(setActiveTab('workflows'));
navigationApi.switchToTab('workflows');
$isWorkflowLibraryModalOpen.set(true);
store.dispatch(workflowLibraryViewChanged('defaults'));
store.dispatch(workflowLibraryTagsReset());
@@ -194,7 +196,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
break;
case 'viewAllStylePresets':
// Go to the canvas tab and open the style presets menu
store.dispatch(setActiveTab('canvas'));
navigationApi.switchToTab('canvas');
$isStylePresetsMenuOpen.set(true);
break;
}

View File

@@ -0,0 +1,36 @@
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { atom } from 'nanostores';
import { useEffect } from 'react';
import { useTranslation } from 'react-i18next';
/**
* Global atom storing the language direction, to be consumed by the Chakra theme.
*
* Why do we need this? We have a kind of catch-22:
* - The Chakra theme needs to know the language direction to apply the correct styles.
* - The language direction is determined by i18n and the language selection.
* - We want our error boundary to be themed.
* - It's possible that i18n can throw if the language selection is invalid or not supported.
*
* Previously, we had the logic in this file in the theme provider, which wrapped the error boundary. The error
* was properly themed. But then, if i18n threw in the theme provider, the error boundary does not catch the
* error. The app would crash to a white screen.
*
* We tried swapping the component hierarchy so that the error boundary wraps the theme provider, but then the
* error boundary isn't themed!
*
* The solution is to move this i18n direction logic out of the theme provider and into a hook that we can use
* within the error boundary. The error boundary will be themed, _and_ catch any i18n errors.
*/
export const $direction = atom<'ltr' | 'rtl'>('ltr');
export const useSyncLangDirection = () => {
useAssertSingleton('useSyncLangDirection');
const { i18n, t } = useTranslation();
useEffect(() => {
const direction = i18n.dir();
$direction.set(direction);
document.body.dir = direction;
}, [i18n, t]);
};

View File

@@ -2,7 +2,7 @@ import { createLogWriter } from '@roarr/browser-log-writer';
import { atom } from 'nanostores';
import type { Logger, MessageSerializer } from 'roarr';
import { ROARR, Roarr } from 'roarr';
import { z } from 'zod/v4';
import { z } from 'zod';
const serializeMessage: MessageSerializer = (message) => {
return JSON.stringify(message);
@@ -93,5 +93,7 @@ export const configureLogging = (
localStorage.setItem('ROARR_FILTER', filter);
}
ROARR.write = createLogWriter();
const styleOutput = localStorage.getItem('ROARR_STYLE_OUTPUT') === 'false' ? false : true;
ROARR.write = createLogWriter({ styleOutput });
};

View File

@@ -1,3 +1,2 @@
export const STORAGE_PREFIX = '@@invokeai-';
export const EMPTY_ARRAY = [];
export const EMPTY_OBJECT = {};

View File

@@ -1,40 +1,209 @@
import { logger } from 'app/logging/logger';
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
import { $authToken } from 'app/store/nanostores/authToken';
import { $projectId } from 'app/store/nanostores/projectId';
import { $queueId } from 'app/store/nanostores/queueId';
import type { UseStore } from 'idb-keyval';
import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
import { atom } from 'nanostores';
import { createStore as idbCreateStore, del as idbDel, get as idbGet } from 'idb-keyval';
import type { Driver } from 'redux-remember';
import { serializeError } from 'serialize-error';
import { buildV1Url, getBaseUrl } from 'services/api';
import type { JsonObject } from 'type-fest';
// Create a custom idb-keyval store (just needed to customize the name)
const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
const log = logger('system');
export const clearIdbKeyValStore = () => {
clear($idbKeyValStore.get());
const getUrl = (endpoint: 'get_by_key' | 'set_by_key' | 'delete', key?: string) => {
const baseUrl = getBaseUrl();
const query: Record<string, string> = {};
if (key) {
query['key'] = key;
}
const path = buildV1Url(`client_state/${$queueId.get()}/${endpoint}`, query);
const url = `${baseUrl}/${path}`;
return url;
};
// Create redux-remember driver, wrapping idb-keyval
export const idbKeyValDriver: Driver = {
getItem: (key) => {
try {
return get(key, $idbKeyValStore.get());
} catch (originalError) {
throw new StorageError({
key,
projectId: $projectId.get(),
originalError,
});
}
},
setItem: (key, value) => {
try {
return set(key, value, $idbKeyValStore.get());
} catch (originalError) {
throw new StorageError({
key,
value,
projectId: $projectId.get(),
originalError,
});
}
},
const getHeaders = () => {
const headers = new Headers();
const authToken = $authToken.get();
const projectId = $projectId.get();
if (authToken) {
headers.set('Authorization', `Bearer ${authToken}`);
}
if (projectId) {
headers.set('project-id', projectId);
}
return headers;
};
// Persistence happens per slice. To track when persistence is in progress, maintain a ref count, incrementing
// it when a slice is being persisted and decrementing it when the persistence is done.
let persistRefCount = 0;
// Keep track of the last persisted state for each key to avoid unnecessary network requests.
//
// `redux-remember` persists individual slices of state, so we can implicity denylist a slice by not giving it a
// persist config.
//
// However, we may need to avoid persisting individual _fields_ of a slice. `redux-remember` does not provide a
// way to do this directly.
//
// To accomplish this, we add a layer of logic on top of the `redux-remember`. In the state serializer function
// provided to `redux-remember`, we can omit certain fields from the state that we do not want to persist. See
// the implementation in `store.ts` for this logic.
//
// This logic is unknown to `redux-remember`. When an omitted field changes, it will still attempt to persist the
// whole slice, even if the final, _serialized_ slice value is unchanged.
//
// To avoid unnecessary network requests, we keep track of the last persisted state for each key in this map.
// If the value to be persisted is the same as the last persisted value, we will skip the network request.
const lastPersistedState = new Map<string, string | undefined>();
// As of v6.3.0, we use server-backed storage for client state. This replaces the previous IndexedDB-based storage,
// which was implemented using `idb-keyval`.
//
// To facilitate a smooth transition, we implement a migration strategy that attempts to retrieve values from IndexedDB
// and persist them to the new server-backed storage. This is done on a best-effort basis.
// These constants were used in the previous IndexedDB-based storage implementation.
const IDB_DB_NAME = 'invoke';
const IDB_STORE_NAME = 'invoke-store';
const IDB_STORAGE_PREFIX = '@@invokeai-';
// Lazy store creation
let _idbKeyValStore: UseStore | null = null;
const getIdbKeyValStore = () => {
if (_idbKeyValStore === null) {
_idbKeyValStore = idbCreateStore(IDB_DB_NAME, IDB_STORE_NAME);
}
return _idbKeyValStore;
};
const getIdbKey = (key: string) => {
return `${IDB_STORAGE_PREFIX}${key}`;
};
const getItem = async (key: string) => {
try {
const url = getUrl('get_by_key', key);
const headers = getHeaders();
const res = await fetch(url, { method: 'GET', headers });
if (!res.ok) {
throw new Error(`Response status: ${res.status}`);
}
const value = await res.json();
// Best-effort migration from IndexedDB to the new storage system
log.trace({ key, value }, 'Server-backed storage value retrieved');
if (!value) {
const idbKey = getIdbKey(key);
try {
// It's a bit tricky to query IndexedDB directly to check if value exists, so we use `idb-keyval` to do it.
// Thing is, `idb-keyval` requires you to create a store to query it. End result - we are creating a store
// even if we don't use it for anything besides checking if the key is present.
const idbKeyValStore = getIdbKeyValStore();
const idbValue = await idbGet(idbKey, idbKeyValStore);
if (idbValue) {
log.debug(
{ key, idbKey, idbValue },
'No value in server-backed storage, but found value in IndexedDB - attempting migration'
);
await idbDel(idbKey, idbKeyValStore);
await setItem(key, idbValue);
log.debug({ key, idbKey, idbValue }, 'Migration successful');
return idbValue;
}
} catch (error) {
// Just log if IndexedDB retrieval fails - this is a best-effort migration.
log.debug(
{ key, idbKey, error: serializeError(error) } as JsonObject,
'Error checking for or migrating from IndexedDB'
);
}
}
lastPersistedState.set(key, value);
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Getting state for ${key}`);
return value;
} catch (originalError) {
throw new StorageError({
key,
projectId: $projectId.get(),
originalError,
});
}
};
const setItem = async (key: string, value: string) => {
try {
persistRefCount++;
if (lastPersistedState.get(key) === value) {
log.trace(
{ key, last: lastPersistedState.get(key), next: value },
`Skipping persist for ${key} as value is unchanged`
);
return value;
}
log.trace({ key, last: lastPersistedState.get(key), next: value }, `Persisting state for ${key}`);
const url = getUrl('set_by_key', key);
const headers = getHeaders();
const res = await fetch(url, { method: 'POST', headers, body: value });
if (!res.ok) {
throw new Error(`Response status: ${res.status}`);
}
const resultValue = await res.json();
lastPersistedState.set(key, resultValue);
return resultValue;
} catch (originalError) {
throw new StorageError({
key,
value,
projectId: $projectId.get(),
originalError,
});
} finally {
persistRefCount--;
if (persistRefCount < 0) {
log.trace('Persist ref count is negative, resetting to 0');
persistRefCount = 0;
}
}
};
export const reduxRememberDriver: Driver = { getItem, setItem };
export const clearStorage = async () => {
try {
persistRefCount++;
const url = getUrl('delete');
const headers = getHeaders();
const res = await fetch(url, { method: 'POST', headers });
if (!res.ok) {
throw new Error(`Response status: ${res.status}`);
}
} catch {
log.error('Failed to reset client state');
} finally {
persistRefCount--;
lastPersistedState.clear();
if (persistRefCount < 0) {
log.trace('Persist ref count is negative, resetting to 0');
persistRefCount = 0;
}
}
};
export const addStorageListeners = () => {
const onBeforeUnload = (e: BeforeUnloadEvent) => {
if (persistRefCount > 0) {
e.preventDefault();
}
};
window.addEventListener('beforeunload', onBeforeUnload);
return () => {
window.removeEventListener('beforeunload', onBeforeUnload);
};
};

View File

@@ -33,8 +33,9 @@ export class StorageError extends Error {
}
}
const log = logger('system');
export const errorHandler = (err: PersistError | RehydrateError) => {
const log = logger('system');
if (err instanceof PersistError) {
log.error({ error: serializeError(err) }, 'Problem persisting state');
} else if (err instanceof RehydrateError) {

View File

@@ -1,77 +0,0 @@
import type { TypedStartListening } from '@reduxjs/toolkit';
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded';
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
import { addSetDefaultSettingsListener } from 'app/store/middleware/listenerMiddleware/listeners/setDefaultSettings';
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketConnected';
import type { AppDispatch, RootState } from 'app/store/store';
import { addArchivedOrDeletedBoardListener } from './listeners/addArchivedOrDeletedBoardListener';
import { addEnqueueRequestedUpscale } from './listeners/enqueueRequestedUpscale';
export const listenerMiddleware = createListenerMiddleware();
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
const startAppListening = listenerMiddleware.startListening as AppStartListening;
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
/**
* The RTK listener middleware is a lightweight alternative sagas/observables.
*
* Most side effect logic should live in a listener.
*/
// Image uploaded
addImageUploadedFulfilledListener(startAppListening);
// Image deleted
addDeleteBoardAndImagesFulfilledListener(startAppListening);
// User Invoked
addEnqueueRequestedLinear(startAppListening);
addEnqueueRequestedUpscale(startAppListening);
addAnyEnqueuedListener(startAppListening);
addBatchEnqueuedListener(startAppListening);
// Socket.IO
addSocketConnectedEventListener(startAppListening);
// Gallery bulk download
addBulkDownloadListeners(startAppListening);
// Boards
addImageAddedToBoardFulfilledListener(startAppListening);
addImageRemovedFromBoardFulfilledListener(startAppListening);
addBoardIdSelectedListener(startAppListening);
addArchivedOrDeletedBoardListener(startAppListening);
// Node schemas
addGetOpenAPISchemaListener(startAppListening);
// Models
addModelSelectedListener(startAppListening);
// app startup
addAppStartedListener(startAppListening);
addModelsLoadedListener(startAppListening);
addAppConfigReceivedListener(startAppListening);
// Ad-hoc upscale workflwo
addAdHocPostProcessingRequestedListener(startAppListening);
addSetDefaultSettingsListener(startAppListening);

View File

@@ -1,6 +1,6 @@
import { createAction } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppStartListening } from 'app/store/store';
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';

View File

@@ -1,5 +1,5 @@
import { isAnyOf } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppStartListening } from 'app/store/store';
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
import {
autoAddBoardIdChanged,

Some files were not shown because too many files have changed in this diff Show More