Compare commits

..

354 Commits

Author SHA1 Message Date
Brandon Rising
99146be670 Next: Remove deprecated app.on_event usage in api runner 2024-02-29 13:29:01 -05:00
psychedelicious
4426c1de5a chore: ruff 2024-02-29 13:29:01 -05:00
blessedcoolant
2a579ba953 fix: Assertion issue with SDXL Compel 2024-02-29 13:29:01 -05:00
Brandon Rising
95875c6827 Fix merge with next 2024-02-29 13:29:01 -05:00
Brandon Rising
2df7db7e49 Switch absolute path to as_posix in _walk_directory 2024-02-29 13:29:01 -05:00
Brandon Rising
1b839cd26d Ruff checks 2024-02-29 13:29:01 -05:00
Brandon Rising
d370e1fd6f Fix directory called on _walk_directory 2024-02-29 13:29:01 -05:00
Brandon Rising
b13d11eaac Switch ModelSearch from os.walk to os.scandir 2024-02-29 13:29:01 -05:00
Brandon Rising
35dfad7318 Ruff format 2024-02-29 13:29:01 -05:00
Brandon Rising
92a7c1eec5 Ruff check 2024-02-29 13:29:01 -05:00
Brandon Rising
2a6722bb6c Extract TI loading logic into util, disallow it from ever failing a generation 2024-02-29 13:29:01 -05:00
Brandon Rising
9a1e55a305 Fix one last reference to the uncasted model 2024-02-29 13:29:01 -05:00
Brandon Rising
4c9dc7f845 Allow TIs to be either a key or a name in the prompt during our transition to using keys 2024-02-29 13:29:01 -05:00
Lincoln Stein
2da03bebaa handle change to Civitai metadata schema for commercial usage 2024-02-29 13:29:01 -05:00
Jennifer Player
1d5f7acf96 updated to use new import model mutation 2024-02-29 13:29:01 -05:00
psychedelicious
4c787f879d fix(ui): merge conflict 2024-02-29 13:29:01 -05:00
psychedelicious
f400867b9e chore(ui): bump deps
The only major version is `query-string`. The breaking change for it is dropping support for old versions of node. Not a problem for us.
2024-02-29 13:29:01 -05:00
psychedelicious
44b285637a ci: change frontend check to dpdm 2024-02-29 13:29:01 -05:00
psychedelicious
9c589b4449 feat(ui): move from madge to dpdm for circular dependencies 2024-02-29 13:29:01 -05:00
psychedelicious
f8375a9f72 tidy(ui): fix circular dependencies in listeners 2024-02-29 13:29:01 -05:00
psychedelicious
e4be78c256 tidy: remove some traces of ONNX 2024-02-29 13:29:01 -05:00
psychedelicious
3b725126fc chore(ui): typegen, update knip config
Knip should never touch the autogenerated types
2024-02-29 13:29:01 -05:00
psychedelicious
ac012a07a2 chore(ui): update pnpm-lock.yaml
Forgot to run `pnpm i` earlier after removing packages.
2024-02-29 13:29:01 -05:00
psychedelicious
2699755c6e ci: add knip to ui check workflow 2024-02-29 13:29:01 -05:00
psychedelicious
0c2b7b0e0f feat(ui): configure knip 2024-02-29 13:29:01 -05:00
psychedelicious
4a506eb0a7 tidy(ui): clean up unused code 6
unused files
2024-02-29 13:29:01 -05:00
psychedelicious
d435a479f1 tidy(ui): clean up unused code 5
variables, types and schemas
2024-02-29 13:29:01 -05:00
psychedelicious
e38427d5a1 tidy(ui): clean up unused code 4
variables, types and schemas
2024-02-29 13:29:01 -05:00
psychedelicious
a106c1126c tidy(ui): clean up unused code 3
variables, types and schemas
2024-02-29 13:29:01 -05:00
psychedelicious
bca1737b9c tidy(ui): clean up unused code 2
types and schemas
2024-02-29 13:29:00 -05:00
psychedelicious
9db4d9b0c0 feat(mm): add log stmt for download complete event 2024-02-29 13:29:00 -05:00
psychedelicious
929b353305 fix(ui): model install progress sets total bytes correctly 2024-02-29 13:29:00 -05:00
psychedelicious
67f40a3b26 chore(ui): lint 2024-02-29 13:29:00 -05:00
psychedelicious
732a1f135d fix(ui): fix remaining TS issues 2024-02-29 13:29:00 -05:00
psychedelicious
3001b146b1 fix(ui): fix up MM queries & types (wip) 2024-02-29 13:29:00 -05:00
psychedelicious
4a048dd0d4 tidy(api): remove non-heuristic install route 2024-02-29 13:29:00 -05:00
psychedelicious
b38080336d tidy(mm): remove ONNX from AnyModelConfig 2024-02-29 13:29:00 -05:00
psychedelicious
45546fdcea tidy(ui): clean up unused code 1
- Only export when necessary
- Remove totally usused functions, variables, state, etc
- Remove unused packages
2024-02-29 13:29:00 -05:00
psychedelicious
5e83fc8ac7 feat(ui): add knip + minimal config
https://knip.dev/

Replaces `unimported`
2024-02-29 13:29:00 -05:00
psychedelicious
04024fe479 fix(ui): fix missing component import 2024-02-29 13:29:00 -05:00
blessedcoolant
0722eb132d ui: split the canvas mask blur and edge size setting 2024-02-29 13:29:00 -05:00
Jennifer Player
120988b5f3 added add all button to scan models 2024-02-29 13:29:00 -05:00
psychedelicious
76e21fe607 feat(ui): create metadata types for control adapters
These are the same as the existing control adapter types, but the model field is non-nullable, simplifying handling of these objects.
2024-02-29 13:29:00 -05:00
psychedelicious
769ddc0024 fix(ui): model metadata handlers use model identifiers, not configs
Model metadata includes the main model, VAE and refiner model.

These used full model configs, as returned by the server, as their metadata type.

LoRA and control adapter metadata only use the metadata identifier.

This created a difference in handling. After parsing a model/vae/refiner, we have its name and can display it. But for LoRAs and control adapters, we only have the model key and must query for the full model config to get the name.

This change makes main model/vae/refiner metadata only have the model key, like LoRAs and control adapters.

The render function is now async so fetching can occur within it. All metadata fields with models now only contain the identifier, and fetch the model name to render their values.
2024-02-29 13:29:00 -05:00
psychedelicious
4833f9c736 fix(ui): CanvasPasteBack types 2024-02-29 13:29:00 -05:00
psychedelicious
68f9ce5e2d tidy(ui): remove unused metadata schemas 2024-02-29 13:29:00 -05:00
psychedelicious
ac200ab79d feat(nodes): update LoRAMetadataItem model
LoRA model now at under `model` not `lora.
2024-02-29 13:29:00 -05:00
psychedelicious
38cf809cb0 tidy(ui): tidy model identifier logic
- Move some files around
- Use util to extract key and base from model config
2024-02-29 13:29:00 -05:00
psychedelicious
d069f21d97 feat(ui): optimize model query caching
When we retrieve a list of models, upsert that data into the `getModelConfig` and `getModelConfigByAttrs` query caches.

With this change, calls to those two queries are almost always going to be free, because their caches will already have all models in them. The exception is queries for models that no longer exist.
2024-02-29 13:29:00 -05:00
psychedelicious
7d23120c2e fix(ui): fix lora metadata item type 2024-02-29 13:29:00 -05:00
psychedelicious
3b9b11e94b fix(ui): fix node type 2024-02-29 13:29:00 -05:00
psychedelicious
cd0699ada4 feat(ui): add transformation to width/height parameter schemas to round to multiple of 8
This allows image dimensions that are not multiples of 8 to still be recalled with best effort.
2024-02-29 13:29:00 -05:00
psychedelicious
8938765fb6 fix(ui): fix lora metadata rendering 2024-02-29 13:29:00 -05:00
psychedelicious
5b1fe4511e fix(ui): fix type issues related to change in LoRA type 2024-02-29 13:29:00 -05:00
psychedelicious
6a2ae5caa4 feat(ui): migrate all metadata recall logic to new system 2024-02-29 13:29:00 -05:00
psychedelicious
0cb2579109 fix(ui): use id for component key in control adapter components 2024-02-29 13:29:00 -05:00
psychedelicious
c6a95eb9ac feat(ui): no JSX in metadata handlers 2024-02-29 13:29:00 -05:00
psychedelicious
caa05864f8 feat(ui): refactor metadata handling (again)
Add concepts for metadata handlers. Handlers include parsers, recallers and validators for different metadata types:
- Parsers parse a raw metadata object of any shape to a structured object.
- Recallers load the parsed metadata into state. Recallers are optional, as some metadata types don't need to be loaded into state.
- Validators provide an additional layer of validation before recalling the metadata. This is needed because a metadata object may be valid, but not able to be recalled due to some other requirement, like base model compatibility. Validators are optional.

Sometimes metadata is not a single object but a list of items - like LoRAs. Metadata handlers may implement an optional set of "item" handlers which operate on individual items in the list.

Parsers and validators are async to allow fetching additional data, like a model config. Recallers are synchronous.

The these handlers are composed into a public API, exported as a `handlers` object. Besides the handlers functions, a metadata handler set includes:
- A function to get the label of the metadata type.
- An optional function to render the value of the metadata type.
- An optional function to render the _item_ value of the metadata type.
2024-02-29 13:29:00 -05:00
psychedelicious
f24b3c64d3 build(ui): do not fail build on eslint error in dev mode 2024-02-29 13:29:00 -05:00
psychedelicious
70d7b1c756 chore(ui): typegen 2024-02-29 13:29:00 -05:00
psychedelicious
92abcaedcf feat(api): add MM get_by_attrs route
Gets the first model that matches the given name, base and type. Raises 404 if there isn't one.

This will be used for backwards compatibility with old metadata.
2024-02-29 13:29:00 -05:00
Mary Hipp
2eb9d66ef0 undo 2024-02-29 13:29:00 -05:00
Mary Hipp
d893cb954a fix literal strings in MM UI 2024-02-29 13:29:00 -05:00
Mary Hipp
9a6c2089bf fix TI appearing as key in prompt 2024-02-29 13:29:00 -05:00
Mary Hipp
52d0f9c52f fix base model grouping in combobox 2024-02-29 13:29:00 -05:00
psychedelicious
638322e89f fix(mm): fix ModelCacheBase method name 2024-02-29 13:29:00 -05:00
psychedelicious
bc7572797c chore: ruff 2024-02-29 13:29:00 -05:00
Lincoln Stein
2463710497 recover gracefuly from GPU out of memory errors (next version) 2024-02-29 13:29:00 -05:00
Lincoln Stein
3be3bba007 clear out VRAM when an OOM occurs 2024-02-29 13:29:00 -05:00
psychedelicious
8dc33e60a7 feat(ui): bulk download click to download 2024-02-29 13:29:00 -05:00
psychedelicious
bde752bc02 fix(ui): fix node types for canvas graphs 2024-02-29 13:29:00 -05:00
psychedelicious
8f7b1f5068 chore(ui): typegen 2024-02-29 13:29:00 -05:00
psychedelicious
38afb918ad tidy(nodes): rename canvas paste back 2024-02-29 13:29:00 -05:00
blessedcoolant
5ca01bfad4 fix: outpaint result not getting pasted back correctly 2024-02-29 13:29:00 -05:00
blessedcoolant
c9f73c3340 fix: lint errors 2024-02-29 13:29:00 -05:00
blessedcoolant
6df606ac2a canvas: improve paste back (or try to) 2024-02-29 13:29:00 -05:00
blessedcoolant
ba3cf1d873 wip(ui): Replace 2 Layer Coherence pass with Gradient Mask 2024-02-29 13:28:58 -05:00
psychedelicious
1e52a1507b chore: ruff 2024-02-29 13:28:22 -05:00
psychedelicious
d52c956441 fix(ui): fix merge issue 2024-02-29 13:28:22 -05:00
psychedelicious
3130b3db64 fix(ui): use new scan_folder response instead of hook to determine if models are installed already 2024-02-29 13:28:22 -05:00
psychedelicious
8848443eff chore(ui): typegen 2024-02-29 13:28:22 -05:00
psychedelicious
acedc6d127 feat(mm): add logic to scan_folder route to check if a model is already installed
This was done in the frontend before but it's something the backend should handle.

The logic compares the found model paths to the path and source of all installed models. It excludes core models.
2024-02-29 13:28:22 -05:00
psychedelicious
6d78276049 chore(ui): lint 2024-02-29 13:28:22 -05:00
psychedelicious
858300030c build(ui): restore i18n eslint rule 2024-02-29 13:28:22 -05:00
psychedelicious
bcc02c5902 chore: ruff 2024-02-29 13:28:22 -05:00
psychedelicious
92f42dfb2a fix(ui): fix metadata route 2024-02-29 13:28:22 -05:00
psychedelicious
8b18c0d273 chore(ui): typegen 2024-02-29 13:28:22 -05:00
psychedelicious
35c9a29722 feat(api): mm metadata route "meta" -> "metadata" 2024-02-29 13:28:22 -05:00
Mary Hipp
8deca6c9e6 lint fix 2024-02-29 13:28:22 -05:00
Mary Hipp
5a8550d2dd updated translations 2024-02-29 13:28:22 -05:00
maryhipp
866ba7974b fix convert endpoint logic 2024-02-29 13:28:22 -05:00
Mary Hipp
7119a5b88f clean up old model manager components and endpoints 2024-02-29 13:28:22 -05:00
Mary Hipp
af9a62d224 add model convert to checkpoint main models 2024-02-29 13:28:22 -05:00
Mary Hipp
dd9b139e38 fix logic to see if scanned models are already installed, style tweaks 2024-02-29 13:28:22 -05:00
maryhipp
0c0da5480b add error_reason to ModelInstallJob 2024-02-29 13:28:22 -05:00
Mary Hipp
d802900abe add error_reason to UI if import fails 2024-02-29 13:28:22 -05:00
Mary Hipp
34b9ab3674 fix types for ImportQueue, add QuickAdd for scan models 2024-02-29 13:28:22 -05:00
Jennifer Player
7f56e84a8d refactored and fixed issues with advanced import form 2024-02-29 13:28:22 -05:00
psychedelicious
170d9bca98 fix(ui): misc MM cleanup 2024-02-29 13:28:22 -05:00
psychedelicious
84ddad4ea2 chore(ui): temp disable eslint i18 rule 2024-02-29 13:28:22 -05:00
psychedelicious
4854aa8dc2 fix(ui): fix ImportMainModelResponse type 2024-02-29 13:28:22 -05:00
psychedelicious
4afb6b8b0a fix(ui): simplify model install event listeners 2024-02-29 13:28:22 -05:00
psychedelicious
5baa79c903 fix(ui): fix model install event types 2024-02-29 13:28:22 -05:00
Jennifer Player
2db252af31 added advanced import forms, not fully working yet 2024-02-29 13:28:22 -05:00
Mary Hipp
ccdb89534a get positioning/scrolling working for scan results list 2024-02-29 13:28:22 -05:00
Mary Hipp
c23dcc3e77 basic scan working and renders results 2024-02-29 13:28:22 -05:00
Mary Hipp
a9b1f4b8c6 add scan model endpoint, break add model into tabs 2024-02-29 13:28:22 -05:00
Mary Hipp
0030606d99 update metadata endpoint 2024-02-29 13:28:22 -05:00
maryhipp
58ce98101b allow metadata-less models to be used for GET metadata endpoint 2024-02-29 13:28:22 -05:00
Jennifer Player
66df96e425 added status to import queue model 2024-02-29 13:28:22 -05:00
Jennifer Player
eec5f6fb44 delete model imports and prune all finished, update state with socket messages 2024-02-29 13:28:22 -05:00
Mary Hipp
913cfc9583 fix sync model endpoint 2024-02-29 13:28:22 -05:00
Mary Hipp
8185225937 form error handling 2024-02-29 13:28:22 -05:00
Mary Hipp
78365e2c61 finish model update 2024-02-29 13:28:22 -05:00
Jennifer Player
8aafa873ef added socket listeners, added more info to ui 2024-02-29 13:28:22 -05:00
Mary Hipp
52bbd3b5ec edit view for model, depending on type and valid values 2024-02-29 13:28:21 -05:00
Mary Hipp
da6c4bf493 hook up Add Model button 2024-02-29 13:28:21 -05:00
Mary Hipp
f1e21bc9a9 single model view 2024-02-29 13:28:21 -05:00
Jennifer Player
c99eaee6f3 added import model form and importqueue 2024-02-29 13:28:21 -05:00
Mary Hipp
c2fca07c8e model list, filtering, searching 2024-02-29 13:28:21 -05:00
Mary Hipp
49c68a16c2 workspace for mary and jenn 2024-02-29 13:28:21 -05:00
Mary Hipp
3d977def53 get old UI working somewhat with new endpoints 2024-02-29 13:28:21 -05:00
Brandon Rising
70570b7213 Allow passing in key on register 2024-02-29 13:28:21 -05:00
Brandon Rising
957ff8a17e Remove passing keys in on register 2024-02-29 13:28:21 -05:00
Brandon Rising
c32eb17d73 Run ruff 2024-02-29 13:28:21 -05:00
Brandon Rising
57b288e57b Allow users to run model manager without cuda 2024-02-29 13:28:21 -05:00
psychedelicious
4c06132fc6 fix(ui): roll back utility-types
It's `Required` util does not distribute over unions as expected. Also we have `ts-toolbelt` already for some utils.
2024-02-29 13:28:21 -05:00
psychedelicious
9b23820717 feat(ui): refactor metadata handling
Refactor of metadata recall handling. This is in preparation for a backwards compatibility layer for models.

- Create helpers to fetch a model outside react (e.g. not in a hook)
- Created helpers to parse model metadata
- Renamed a lot of types that were confusing and/or had naming collisions
2024-02-29 13:28:21 -05:00
psychedelicious
c323179854 chore(ui): typegen 2024-02-29 13:28:21 -05:00
psychedelicious
a0a143f495 fix(nodes): make fields on ModelConfigBase required
The setup of `ModelConfigBase` means autogenerated types have critical fields flagged as nullable (like `key` and `base`). Need to manually flag them as required.
2024-02-29 13:28:21 -05:00
psychedelicious
ae43e91caa feat(ui): replace type-fest with utility-types
- The new package has more useful types
- Only used `JsonObject` from `type-fest`; added an implementation of that type
2024-02-29 13:28:21 -05:00
Lincoln Stein
adab454c51 several small model install enhancements
- Support extended HF repoid syntax in TUI. This allows
  installation of subfolders and safetensors files, as in
  `XpucT/Deliberate::Deliberate_v5.safetensors`

- Add `error` and `error_traceback` properties to the install
  job objects.

- Rename the `heuristic_import` route to `heuristic_install`.

- Fix the example `config` input in the `heuristic_install` route.
2024-02-29 13:28:21 -05:00
Lincoln Stein
6f27a1ba8c use official Deliberate download repo 2024-02-29 13:28:21 -05:00
Lincoln Stein
dd7fc680bf fix repo-id for the Deliberate v5 model
prevent lora and embedding file suffixes from being stripped during installation

apply psychedelicious patch to get compel to load proper TI embedding
2024-02-29 13:28:21 -05:00
Lincoln Stein
3312dcc58f remove startup dependency on legacy models.yaml file 2024-02-29 13:28:21 -05:00
dunkeroni
c60cfdb285 chore: typing 2024-02-29 13:28:21 -05:00
dunkeroni
83984b92db chore: typing fix 2024-02-29 13:28:21 -05:00
dunkeroni
dcca220885 feat(nodes): added gradient mask node 2024-02-29 13:28:21 -05:00
Brandon Rising
550f6cb437 Run ruff 2024-02-29 13:28:21 -05:00
Mary Hipp
e18763438b rename endpoint for scanning 2024-02-29 13:28:21 -05:00
Brandon Rising
e731645f0e Create /search endpoint, update model object structure in scan model page 2024-02-29 13:28:21 -05:00
psychedelicious
15ce8e35c7 chore(ui): bump deps
Notable updates:
- Minor version of RTK includes customizable selectors for RTK Query, so we can remove the patch that was added to ensure only the LRU memoize function was used for perf reasons. Updated to use the LRU memoize function.
- Major version of react-resizable-panels. No breaking changes, works great, and you can now resize all panels when dragging at the intersection point of panels. Cool!
- Minor (?) version of nanostores. `action` API is removed, we were using it in one spot. Fixed.
- @invoke-ai/eslint-config-react has all deps bumped and now has its dependent plugins/configs listed as normal dependencies (as opposed to peer deps). This means we can remove those packages from explicit dev deps.
2024-02-29 13:28:21 -05:00
psychedelicious
8d6d03bd04 tidy(ui): remove debugging stmt 2024-02-29 13:28:21 -05:00
psychedelicious
64c1ce895c fix(ui): handle new model format for metadata 2024-02-29 13:28:21 -05:00
psychedelicious
1ced80d492 fix(ui): use model names in badges 2024-02-29 13:28:21 -05:00
psychedelicious
6577250523 fix(nodes): fix TI loading 2024-02-29 13:28:21 -05:00
psychedelicious
068886a70c fix(ui): fix package build 2024-02-29 13:28:21 -05:00
psychedelicious
3a97f6e38e feat(ui): do not subscribe to bulk download sio room if baseUrl is set 2024-02-29 13:28:21 -05:00
psychedelicious
85dae6ad1e feat(ui): revise bulk download listeners
- Use a single listener for all of the to keep them in one spot
- Use the bulk download item name as a toast id so we can update the existing toasts
- Update handling to work with other environments
- Move all bulk download handling from components to listener
2024-02-29 13:28:21 -05:00
psychedelicious
ffef5c65bb chore(ui): typegen 2024-02-29 13:28:21 -05:00
psychedelicious
f74e352f96 feat(bulk_download): update response model, messages 2024-02-29 13:28:21 -05:00
Stefan Tobler
6a6958f19b implementing download for bulk_download events 2024-02-29 13:28:21 -05:00
Stefan Tobler
6828962c05 setting up event listeners for bulk download socket 2024-02-29 13:28:21 -05:00
psychedelicious
c7ed5606bd test: clean up & fix tests
- Deduplicate the mock invocation services. This is possible now that the import order issue is resolved.
- Merge `DummyEventService` into `TestEventService` and update all tests to use `TestEventService`.
2024-02-29 13:28:21 -05:00
psychedelicious
fff5d12648 tidy(bulk_download): don't store events service separately
Using the invoker object directly leaves no ambiguity as to what `_events_bus` actually is.
2024-02-29 13:28:21 -05:00
psychedelicious
dabb5e2cf4 tidy(bulk_download): do not rely on pagination API to get all images for board
We can get all images for the board as a list of image names, then pass that to `_image_handler` to get the DTOs, decoupling from the pagination API.
2024-02-29 13:28:21 -05:00
psychedelicious
8927620c2d tidy(bulk_download): nit - use or as a coalescing operator
Just a bit cleaner.
2024-02-29 13:28:21 -05:00
psychedelicious
8244733145 tidy(bulk_download): use single underscore for private attrs
Double underscores are used in the app but it doesn't actually do or convey anything that single underscores don't already do. Considered unpythonic except for actual dunder/magic methods.
2024-02-29 13:28:21 -05:00
psychedelicious
ac25ad0b67 tidy(bulk_download): remove class-level attr annotations
These can be misleading as they shadow actual assigned class attributes. This pattern is in the rest of the app but it shouldn't be.
2024-02-29 13:28:21 -05:00
psychedelicious
8abb57eedb tidy(bulk_download): remove extraneous abstract methods
`start`, `stop` and `__init__` are not required in implementations of an ABC or service.
2024-02-29 13:28:21 -05:00
psychedelicious
b7f152b45a tidy(bulk_download): clean up comments 2024-02-29 13:28:21 -05:00
Stefan Tobler
28ae16001e adding bulk_download_item_name to socket events 2024-02-29 13:28:21 -05:00
Stefan Tobler
6468b044d8 refactoring handlers to do null check 2024-02-29 13:28:21 -05:00
Stefan Tobler
ec129662a6 removing dependency on an output folder, embrace python temp folder for bulk download 2024-02-29 13:28:21 -05:00
Stefan Tobler
024b4580a7 relocating event_service fixture due to import ordering 2024-02-29 13:28:21 -05:00
Stefan Tobler
ba28709f2d moving the responsibility of cleaning up board names to the service not the route 2024-02-29 13:28:21 -05:00
Stefan Tobler
3c881d5b1a updating imports to satisfy ruff 2024-02-29 13:28:21 -05:00
Stefan Tobler
bb40196a17 using temp directory for downloads 2024-02-29 13:28:21 -05:00
Stefan Tobler
b1301e1cbc returning the bulk_download_item_name on response for possible polling 2024-02-29 13:28:21 -05:00
Stefan Tobler
67df224df4 narrowing bulk_download stop service scope 2024-02-29 13:28:21 -05:00
Stefan Tobler
8102decfb9 adding test coverage for new bulk download routes 2024-02-29 13:28:21 -05:00
Stefan Tobler
4dfa1e3d03 cleaning up bulk download zip after the response is complete 2024-02-29 13:28:21 -05:00
Stefan Tobler
ca1c96e8f5 replacing import removed during rebase 2024-02-29 13:28:21 -05:00
Stefan Tobler
45f2370375 97% test coverage on bulk_download 2024-02-29 13:28:21 -05:00
Stefan Tobler
37e80f62b3 refactoring bulk_download to be better managed 2024-02-29 13:28:21 -05:00
Stefan Tobler
72429b1760 refactoring dummy event service, DRY principal; adding bulk_download_event to existing invoker tests 2024-02-29 13:28:21 -05:00
Stefan Tobler
c2b12f8849 refactoring bulkdownload to consider image category 2024-02-29 13:28:21 -05:00
Stefan Tobler
1e00b9760a fixing issue where default board did not return images 2024-02-29 13:28:21 -05:00
Stefan Tobler
c4cdaaf4dd using the board name to download boards 2024-02-29 13:28:21 -05:00
Stefan Tobler
9c61a40659 reworking some of the logic to use a default room, adding endpoint to download file on complete 2024-02-29 13:28:21 -05:00
Stefan Tobler
cacd0b9c4e linted and styling 2024-02-29 13:28:21 -05:00
Stefan Tobler
2dd67c3b71 implementation of bulkdownload background task 2024-02-29 13:28:21 -05:00
Stefan Tobler
cf6eb1394a adding socket events for bulk download 2024-02-29 13:28:21 -05:00
Stefan Tobler
aba9cd3f9a groundwork for the bulk_download_service 2024-02-29 13:28:21 -05:00
psychedelicious
f81388508f fix(ui): get workflow editor model selects working 2024-02-29 13:28:21 -05:00
psychedelicious
abe2055bbb fix(ui): get refiner model select working 2024-02-29 13:28:21 -05:00
psychedelicious
7dab32138c fix(ui): get vae model select working 2024-02-29 13:28:21 -05:00
psychedelicious
d6c9360fa3 fix(ui): get embedding select working 2024-02-29 13:28:20 -05:00
psychedelicious
ff53d828ce fix(ui): get lora select working 2024-02-29 13:28:20 -05:00
psychedelicious
0212d4f4a2 chore(ui): bump @invoke-ai/ui-library 2024-02-29 13:28:20 -05:00
psychedelicious
8db5aa626f fix(ui): fix low-hanging fruit types 2024-02-29 13:28:20 -05:00
Lincoln Stein
6e52d9bbce Add a few convenience targets to Makefile
- "test" to run pytests
- "frontend-install" to reinstall pnpm's node modeuls
2024-02-29 13:28:20 -05:00
psychedelicious
03db2cba6c chore(nodes): update TODO comment 2024-02-29 13:28:20 -05:00
psychedelicious
9ea8c2af54 tidy(nodes): clean up profiler/stats in processor, better comments 2024-02-29 13:28:20 -05:00
psychedelicious
37b8d59347 fix(nodes): fix typing on stats service context manager 2024-02-29 13:28:20 -05:00
psychedelicious
18e1fe83d5 fix(nodes): fix model load events
was accessing incorrect properties in event data
2024-02-29 13:28:20 -05:00
psychedelicious
198ed222c4 feat(nodes): making invocation class var in processor 2024-02-29 13:28:20 -05:00
psychedelicious
62199b0fb1 feat(nodes): improved error messages in processor 2024-02-29 13:28:20 -05:00
psychedelicious
bdb843a6fb feat(nodes): make processor thread limit and polling interval configurable 2024-02-29 13:28:20 -05:00
psychedelicious
817cc616ce tests(nodes): fix tests following removal of services 2024-02-29 13:28:20 -05:00
psychedelicious
d37840712b chore(nodes): better comments for invocation context 2024-02-29 13:28:20 -05:00
psychedelicious
5c4779907f chore(nodes): "context_data" -> "data"
Changed within InvocationContext, for brevity.
2024-02-29 13:28:20 -05:00
psychedelicious
8870e0f8f2 refactor(nodes): move is_canceled to context.util 2024-02-29 13:28:20 -05:00
psychedelicious
d35f986351 feat(nodes): add whole queue_item to InvocationContextData
No reason to not have the whole thing in there.
2024-02-29 13:28:20 -05:00
psychedelicious
fafaa09f5e tidy(nodes): remove extraneous comments 2024-02-29 13:28:20 -05:00
psychedelicious
03c5de78e1 feat(nodes): better invocation error messages 2024-02-29 13:28:20 -05:00
psychedelicious
e85634742e chore(nodes): add comments for cancel state 2024-02-29 13:28:20 -05:00
psychedelicious
b4a120af42 feat(nodes): promote is_canceled to public node API 2024-02-29 13:28:20 -05:00
psychedelicious
276a95ae8e refactor(nodes): merge processors
Consolidate graph processing logic into session processor.

With graphs as the unit of work, and the session queue distributing graphs, we no longer need the invocation queue or processor.

Instead, the session processor dequeues the next session and processes it in a simple loop, greatly simplifying the app.

- Remove `graph_execution_manager` service.
- Remove `queue` (invocation queue) service.
- Remove `processor` (invocation processor) service.
- Remove queue-related logic from `Invoker`. It now only starts and stops the services, providing them with access to other services.
- Remove unused `invocation_retrieval_error` and `session_retrieval_error` events, these are no longer needed.
- Clean up stats service now that it is less coupled to the rest of the app.
- Refactor cancellation logic - cancellations now originate from session queue (i.e. HTTP cancel endpoint) and are emitted as events. Processor gets the events and sets the canceled event. Access to this event is provided to the invocation context for e.g. the step callback.
- Remove `sessions` router; it provided access to `graph_executions` but that no longer exists.
2024-02-29 13:28:20 -05:00
psychedelicious
69a176be92 tidy(nodes): remove commented tests 2024-02-29 13:28:20 -05:00
psychedelicious
d4a7f55c72 chore(ui): typegen 2024-02-29 13:28:20 -05:00
psychedelicious
0977a5e4aa tidy(nodes): remove no-op model_config
Because we now customize the JSON Schema creation for GraphExecutionState, the model_config did nothing.
2024-02-29 13:28:20 -05:00
psychedelicious
b711c46fa4 tidy(nodes): remove LibraryGraphs
The workflow library supersedes this unused feature.
2024-02-29 13:28:20 -05:00
psychedelicious
fb0fe06135 tidy(nodes): move node tests to parent dir
Thanks to the resolution of the import vs union issue, we can put tests anywhere.
2024-02-29 13:28:20 -05:00
psychedelicious
ab83fb2cea tidy(nodes): remove GraphInvocation
`GraphInvocation` is a node that can contain a whole graph. It is removed for a number of reasons:

1. This feature was unused (the UI doesn't support it) and there is no plan for it to be used.

The use-case it served is known in other node execution engines as "node groups" or "blocks" - a self-contained group of nodes, which has group inputs and outputs. This is a planned feature that will be handled client-side.

2. It adds substantial complexity to the graph processing logic. It's probably not enough to have a measurable performance impact but it does make it harder to work in the graph logic.

3. It allows for graphs to be recursive, and the improved invocations union handling does not play well with it. Actually, it works fine within `graph.py` but not in the tests for some reason. I do not understand why. There's probably a workaround, but I took this as encouragement to remove `GraphInvocation` from the app since we don't use it.
2024-02-29 13:28:20 -05:00
psychedelicious
d7adab89bd fix(nodes): fix OpenAPI schema generation
The change to `Graph.nodes` and `GraphExecutionState.results` validation requires some fanagling to get the OpenAPI schema generation to work. See new comments for a details.
2024-02-29 13:28:20 -05:00
psychedelicious
0ad904d2b3 feat(nodes): JIT graph nodes validation
We use pydantic to validate a union of valid invocations when instantiating a graph.

Previously, we constructed the union while creating the `Graph` class. This introduces a dependency on the order of imports.

For example, consider a setup where we have 3 invocations in the app:

- Python executes the module where `FirstInvocation` is defined, registering `FirstInvocation`.
- Python executes the module where `SecondInvocation` is defined, registering `SecondInvocation`.
- Python executes the module where `Graph` is defined. A union of invocations is created and used to define the `Graph.nodes` field. The union contains `FirstInvocation` and `SecondInvocation`.
- Python executes the module where `ThirdInvocation` is defined, registering `ThirdInvocation`.
- A graph is created that includes `ThirdInvocation`. Pydantic validates the graph using the union, which does not know about `ThirdInvocation`, raising a `ValidationError` about an unknown invocation type.

This scenario has been particularly problematic in tests, where we may create invocations dynamically. The test files have to be structured in such a way that the imports happen in the right order. It's a major pain.

This PR refactors the validation of graph nodes to resolve this issue:

- `BaseInvocation` gets a new method `get_typeadapter`. This builds a pydantic `TypeAdapter` for the union of all registered invocations, caching it after the first call.
- `Graph.nodes`'s type is widened to `dict[str, BaseInvocation]`. This actually is a nice bonus, because we get better type hints whenever we reference `some_graph.nodes`.
- A "plain" field validator takes over the validation logic for `Graph.nodes`. "Plain" validators totally override pydantic's own validation logic. The validator grabs the `TypeAdapter` from `BaseInvocation`, then validates each node with it. The validation is identical to the previous implementation - we get the same errors.

`BaseInvocationOutput` gets the same treatment.
2024-02-29 13:28:20 -05:00
Lincoln Stein
75aa93fabb remove errant def that was crashing invokeai-configure 2024-02-29 13:28:20 -05:00
dunkeroni
0bd0cfc025 one more redundant RGB convert removed 2024-02-29 13:28:20 -05:00
dunkeroni
9c2dd21256 chore: ruff formatting 2024-02-29 13:28:20 -05:00
dunkeroni
4b33589def chore(invocations): remove redundant RGB conversions 2024-02-29 13:28:20 -05:00
dunkeroni
3e5a91e3bf chore(invocations): use IMAGE_MODES constant literal 2024-02-29 13:28:20 -05:00
dunkeroni
2dd2f19b46 fix: removed custom module 2024-02-29 13:28:20 -05:00
dunkeroni
5e14c90f94 fix(nodes): canny preprocessor uses RGBA again 2024-02-29 13:28:20 -05:00
dunkeroni
d17a0779cc feat(nodes): format option for get_image method
Also default CNet preprocessors to "RGB"
2024-02-29 13:28:20 -05:00
blessedcoolant
ee2ef470a7 fix: Alpha channel causing issue with DW Processor 2024-02-29 13:28:20 -05:00
psychedelicious
4191ca1a46 final tidying before marking PR as ready for review
- Replace AnyModelLoader with ModelLoaderRegistry
- Fix type check errors in multiple files
- Remove apparently unneeded `get_model_config_enum()` method from model manager
- Remove last vestiges of old model manager
- Updated tests and documentation

resolve conflict with seamless.py
2024-02-29 13:28:20 -05:00
Lincoln Stein
ab46865e5b Tidy names and locations of modules
- Rename old "model_management" directory to "model_management_OLD" in order to catch
  dangling references to original model manager.
- Caught and fixed most dangling references (still checking)
- Rename lora, textual_inversion and model_patcher modules
- Introduce a RawModel base class to simplfy the Union returned by the
  model loaders.
- Tidy up the model manager 2-related tests. Add useful fixtures, and
  a finalizer to the queue and installer fixtures that will stop the
  services and release threads.
2024-02-29 13:28:20 -05:00
Lincoln Stein
3c1b0d01ac Fix issues identified during PR review by RyanjDick and brandonrising
- ModelMetadataStoreService is now injected into ModelRecordStoreService
  (these two services are really joined at the hip, and should someday be merged)
- ModelRecordStoreService is now injected into ModelManagerService
- Reduced timeout value for the various installer and download wait*() methods
- Introduced a Mock modelmanager for testing
- Removed bare print() statement with _logger in the install helper backend.
- Removed unused code from model loader init file
- Made `locker` a private variable in the `LoadedModel` object.
- Fixed up model merge frontend (will be deprecated anyway!)
2024-02-29 13:28:20 -05:00
psychedelicious
8a8e862a5f chore(ui): lint 2024-02-29 13:28:20 -05:00
psychedelicious
ed860ae851 feat(ui): fix main model & control adapter model selects 2024-02-29 13:28:18 -05:00
psychedelicious
eb27951b8c refactor(ui): url builders for each router
The MM2 router is at `api/v2/models`. URL builder utils make this a bit easier to manage.
2024-02-29 13:21:15 -05:00
psychedelicious
527f76250a feat(ui): update model identifier to be key (wip)
- Update most model identifiers to be `{key: string}` instead of name/base/type. Doesn't change the model select components yet.
- Update model _parameters_, stored in redux, to be `{key: string, base: BaseModel}` - we need to store the base model to be able to check model compatibility. May want to store the whole config? Not sure...
2024-02-29 13:16:37 -05:00
psychedelicious
c53d73ddfa fix(nodes): fix t2i adapter model loading 2024-02-29 13:16:37 -05:00
psychedelicious
2d953fe0cc fix(ui): update model types 2024-02-29 13:16:37 -05:00
psychedelicious
c6be4f5b9f tests(ui): add type tests 2024-02-29 13:16:37 -05:00
psychedelicious
ac1382abed tests(ui): enable vitest type testing
This is useful for the zod schemas and types we have created to match the backend.
2024-02-29 13:16:37 -05:00
psychedelicious
f0dcd70515 chore(ui): typegen 2024-02-29 13:16:37 -05:00
psychedelicious
87b0f7d04a feat(ui): export components type 2024-02-29 13:16:37 -05:00
psychedelicious
535350ebce fix(ui): fix type issues 2024-02-29 13:16:37 -05:00
psychedelicious
5873900410 chore: lint 2024-02-29 13:16:37 -05:00
psychedelicious
0f335bef5a chore: ruff 2024-02-29 13:16:37 -05:00
psychedelicious
8958e820c8 feat(nodes): update invocation context for mm2, update nodes model usage 2024-02-29 13:16:37 -05:00
Brandon Rising
7a36cd2832 Raise InvalidModelConfigException when unable to detect load class in ModelLoader 2024-02-29 13:16:37 -05:00
Brandon Rising
5728da5132 Update _get_hf_load_class to support clipvision models 2024-02-29 13:16:37 -05:00
Brandon Rising
aa5d124d70 References to context.services.model_manager.store.get_model can only accept keys, remove invalid assertion 2024-02-29 13:16:37 -05:00
Brandon Rising
5cc73ec5dd Remove references to model_records service, change submodel property on ModelInfo to submodel_type to support new params in model manager 2024-02-29 13:16:37 -05:00
Lincoln Stein
ad9f8542f2 improve swagger documentation 2024-02-29 13:16:37 -05:00
Lincoln Stein
1d95fe6116 fix a number of typechecking errors 2024-02-29 13:16:37 -05:00
Lincoln Stein
6e91d5baaf add route for model conversion from safetensors to diffusers
- Begin to add SwaggerUI documentation for AnyModelConfig and other
  discriminated Unions.
2024-02-29 13:16:37 -05:00
Lincoln Stein
93fb2d1a55 add a JIT download_and_cache() call to the model installer 2024-02-29 13:16:37 -05:00
Lincoln Stein
195768c9ee add back the heuristic_import() method and extend repo_ids to arbitrary file paths 2024-02-29 13:16:37 -05:00
Lincoln Stein
d56337f2d8 make model manager v2 ready for PR review
- Replace legacy model manager service with the v2 manager.

- Update invocations to use new load interface.

- Fixed many but not all type checking errors in the invocations. Most
  were unrelated to model manager

- Updated routes. All the new routes live under the route tag
  `model_manager_v2`. To avoid confusion with the old routes,
  they have the URL prefix `/api/v2/models`. The old routes
  have been de-registered.

- Added a pytest for the loader.

- Updated documentation in contributing/MODEL_MANAGER.md
2024-02-29 13:16:37 -05:00
Lincoln Stein
721ff58e44 consolidate model manager parts into a single class 2024-02-29 13:16:37 -05:00
Lincoln Stein
8f1b7355df probe for required encoder for IPAdapters and add to config 2024-02-29 13:16:37 -05:00
Lincoln Stein
dbd2f8dc5f fix invokeai_configure script to work with new mm; rename CLIs 2024-02-29 13:16:37 -05:00
Lincoln Stein
49df4fa120 BREAKING CHANGES: invocations now require model key, not base/type/name
- Implement new model loader and modify invocations and embeddings

- Finish implementation loaders for all models currently supported by
  InvokeAI.

- Move lora, textual_inversion, and model patching support into
  backend/embeddings.

- Restore support for model cache statistics collection (a little ugly,
  needs work).

- Fixed up invocations that load and patch models.

- Move seamless and silencewarnings utils into better location
2024-02-29 13:16:37 -05:00
Lincoln Stein
92843d55eb Multiple refinements on loaders:
- Cache stat collection enabled.
- Implemented ONNX loading.
- Add ability to specify the repo version variant in installer CLI.
- If caller asks for a repo version that doesn't exist, will fall back
  to empty version rather than raising an error.
2024-02-29 13:16:37 -05:00
Lincoln Stein
fdbd288956 added textual inversion and lora loaders 2024-02-29 13:16:36 -05:00
Lincoln Stein
c0dabb5255 loaders for main, controlnet, ip-adapter, clipvision and t2i 2024-02-29 13:16:36 -05:00
Lincoln Stein
e242fe41f2 model loading and conversion implemented for vaes 2024-02-29 13:16:36 -05:00
Lincoln Stein
231c12fd1e add ram cache module and support files 2024-02-29 13:16:36 -05:00
Lincoln Stein
66e2d1b346 add concept of repo variant 2024-02-29 13:16:36 -05:00
psychedelicious
55147fbb7e tests(ui): add parseFieldType.test.ts 2024-02-29 13:16:36 -05:00
psychedelicious
c99e264bde feat(ui): add more types of FieldParseError
Unfortunately you cannot test for both a specific type of error and match its message. Splitting the error classes makes it easier to test expected error conditions.
2024-02-29 13:16:36 -05:00
psychedelicious
62c3687a9a feat(ui): add vitest
- Add vitest.
- Consolidate vite configs into single file (easier to config everything based on env for testing)
2024-02-29 13:16:36 -05:00
psychedelicious
c7f1fad398 feat(ui): workflow schema v3 (WIP)
The changes aim to deduplicate data between workflows and node templates, decoupling workflows from internal implementation details. A good amount of data that was needlessly duplicated from the node template to the workflow is removed.

These changes substantially reduce the file size of workflows (and therefore the images with embedded workflows):

- Default T2I SD1.5 workflow JSON is reduced from 23.7kb (798 lines) to 10.9kb (407 lines).
- Default tiled upscale workflow JSON is reduced from 102.7kb (3341 lines) to 51.9kb (1774 lines).

The trade-off is that we need to reference node templates to get things like the field type and other things. In practice, this is a non-issue, because we need a node template to do anything with a node anyways.

- Field types are not included in the workflow. They are always pulled from the node templates.

The field type is now properly an internal implementation detail and we can change it as needed. Previously this would require a migration for the workflow itself. With the v3 schema, the structure of a field type is an internal implementation detail that we are free to change as we see fit.

- Workflow nodes no long have an `outputs` property and there is no longer such a thing as a `FieldOutputInstance`. These are only on the templates.

These were never referenced at a time when we didn't also have the templates available, and there'd be no reason to do so.

- Node width and height are no longer stored in the node.

These weren't used. Also, per https://reactflow.dev/api-reference/types/node, we shouldn't be programmatically changing these properties. A future enhancement can properly add node resizing.

- `nodeTemplates` slice is merged back into `nodesSlice` as `nodes.templates`. Turns out it's just a hassle having these separate in separate slices.

- Workflow migration logic updated to support the new schema. V1 workflows migrate all the way to v3 now.

- Changes throughout the nodes code to accommodate the above changes.
2024-02-29 13:16:36 -05:00
psychedelicious
0540e6fb0d chore(ui): regen types 2024-02-29 13:16:36 -05:00
psychedelicious
179aa1de63 feat(nodes): add more missing exports to invocation_api
Crawled through a few custom nodes to figure out what I had missed.
2024-02-29 13:16:36 -05:00
psychedelicious
acc50d9bd2 chore(nodes): "SAMPLER_NAME_VALUES" -> "SCHEDULER_NAME_VALUES"
This was named inaccurately.
2024-02-29 13:16:36 -05:00
psychedelicious
54d92cb246 chore(nodes): remove deprecation logic for nodes API 2024-02-29 13:16:36 -05:00
psychedelicious
17ed6cc82f chore(nodes): export model-related objects from invocation_api 2024-02-29 13:16:36 -05:00
psychedelicious
5927ab9c36 chore(backend): rename ModelInfo -> LoadedModelInfo
We have two different classes named `ModelInfo` which might need to be used by API consumers. We need to export both but have to deal with this naming collision.

The `ModelInfo` I've renamed here is the one that is returned when a model is loaded. It's the object least likely to be used by API consumers.
2024-02-29 13:16:36 -05:00
psychedelicious
08636e42af feat(nodes): use LATENT_SCALE_FACTOR in primitives.py, noise.py
- LatentsOutput.build
- NoiseOutput.build
- Noise.width, Noise.height multiple_of
2024-02-29 13:16:36 -05:00
psychedelicious
2d74a39810 feat(nodes): extract LATENT_SCALE_FACTOR to constants.py 2024-02-29 13:16:36 -05:00
psychedelicious
bcc57dc886 feat(nodes): use TemporaryDirectory to handle ephemeral storage in ObjectSerializerDisk
Replace `delete_on_startup: bool` & associated logic with `ephemeral: bool` and `TemporaryDirectory`.

The temp dir is created inside of `output_dir`. For example, if `output_dir` is `invokeai/outputs/tensors/`, then the temp dir might be `invokeai/outputs/tensors/tmpvj35ht7b/`.

The temp dir is cleaned up when the service is stopped, or when it is GC'd if not properly stopped.

In the event of a catastrophic crash where the temp files are not cleaned up, the user can delete the tempdir themselves.

This situation may not occur in normal use, but if you kill the process, python cannot clean up the temp dir itself. This includes running the app in a debugger and killing the debugger process - something I do relatively often.

Tests updated.
2024-02-29 13:16:36 -05:00
psychedelicious
cda9ab7933 tests: test ObjectSerializerDisk class name extraction 2024-02-29 13:16:36 -05:00
psychedelicious
dc003a4bac chore(nodes): update ObjectSerializerForwardCache docstring 2024-02-29 13:16:36 -05:00
psychedelicious
e464804696 chore(nodes): fix pyright ignore 2024-02-29 13:16:36 -05:00
psychedelicious
6b5f01ed3f tidy(nodes): "latents" -> "obj" 2024-02-29 13:16:36 -05:00
psychedelicious
c05c3e5a7b tidy(nodes): do not store unnecessarily store invoker 2024-02-29 13:16:36 -05:00
psychedelicious
d202243c62 feat(nodes): make delete on startup configurable for obj serializer
- The default is to not delete on startup - feels safer.
- The two services using this class _do_ delete on startup.
- The class has "ephemeral" removed from its name.
- Tests & app updated for this change.
2024-02-29 13:16:36 -05:00
psychedelicious
7e04f2bff9 fix(nodes): use metadata/board_id if provided by user, overriding WithMetadata/WithBoard-provided values 2024-02-29 13:16:36 -05:00
psychedelicious
0282f477b6 tidy(nodes): clarify comment 2024-02-29 13:16:36 -05:00
psychedelicious
9983eddc01 Revert "feat(nodes): use LATENT_SCALE_FACTOR const in tensor output builders"
This reverts commit ef18fc546560277302f3886e456da9a47e8edce0.
2024-02-29 13:16:36 -05:00
psychedelicious
b62be80762 feat(nodes): use LATENT_SCALE_FACTOR const in tensor output builders 2024-02-29 13:16:36 -05:00
psychedelicious
62db617b41 tests: fix broken tests 2024-02-29 13:16:36 -05:00
psychedelicious
6905f18c08 tidy(nodes): minor spelling correction 2024-02-29 13:16:36 -05:00
psychedelicious
62bbfa7b5a tests: add object serializer tests
These test both object serializer and its forward cache implementation.
2024-02-29 13:16:36 -05:00
psychedelicious
27d7a1731b feat(nodes): allow _delete_all in obj serializer to be called at any time
`_delete_all` logged how many items it deleted, and had to be called _after_ service start bc it needed access to logger.

Move the logger call to the startup method and return the the deleted stats from `_delete_all`. This lets `_delete_all` be called at any time.
2024-02-29 13:16:36 -05:00
psychedelicious
787510a65b tidy(nodes): remove object serializer on_saved
It's unused.
2024-02-29 13:16:36 -05:00
psychedelicious
d9dc5d58be revert(nodes): revert making tensors/conditioning use item storage
Turns out they are just different enough in purpose that the implementations would be rather unintuitive. I've made a separate ObjectSerializer service to handle tensors and conditioning.

Refined the class a bit too.
2024-02-29 13:16:36 -05:00
psychedelicious
614f0e8086 feat(nodes): support custom exception in ephemeral disk storage 2024-02-29 13:16:36 -05:00
psychedelicious
723009e163 feat(nodes): support custom save and load functions in ItemStorageEphemeralDisk 2024-02-29 13:16:36 -05:00
psychedelicious
abdc87d5fc feat(nodes): create helper function to generate the item ID 2024-02-29 13:16:36 -05:00
psychedelicious
7cb8e29726 feat(nodes): use ItemStorageABC for tensors and conditioning
Turns out `ItemStorageABC` was almost identical to `PickleStorageBase`. Instead of maintaining separate classes, we can use `ItemStorageABC` for both.

There's only one change needed - the `ItemStorageABC.set` method must return the newly stored item's ID. This allows us to let the service handle the responsibility of naming the item, but still create the requisite output objects during node execution.

The naming implementation is improved here. It extracts the name of the generic and appends a UUID to that string when saving items.
2024-02-29 13:16:36 -05:00
psychedelicious
f593959bea tidy(nodes): do not refer to files as latents in PickleStorageTorch (again) 2024-02-29 13:16:36 -05:00
psychedelicious
c7218dc130 feat(nodes): ItemStorageABC typevar no longer bound to pydantic.BaseModel
This bound is totally unnecessary. There's no requirement for any implementation of `ItemStorageABC` to work only on pydantic models.
2024-02-29 13:16:36 -05:00
psychedelicious
ebc3a24d0d fix(nodes): add super init to PickleStorageTorch 2024-02-29 13:16:36 -05:00
psychedelicious
315681b491 tidy(nodes): do not refer to files as latents in PickleStorageTorch 2024-02-29 13:16:36 -05:00
psychedelicious
0c149cbd3b feat(nodes): replace latents service with tensors and conditioning services
- New generic class `PickleStorageBase`, implements the same API as `LatentsStorageBase`, use for storing non-serializable data via pickling
- Implementation `PickleStorageTorch` uses `torch.save` and `torch.load`, same as `LatentsStorageDisk`
- Add `tensors: PickleStorageBase[torch.Tensor]` to `InvocationServices`
- Add `conditioning: PickleStorageBase[ConditioningFieldData]` to `InvocationServices`
- Remove `latents` service and all `LatentsStorage` classes
- Update `InvocationContext` and all usage of old `latents` service to use the new services/context wrapper methods
2024-02-29 13:16:36 -05:00
psychedelicious
3563d4ecf7 tidy(nodes): delete onnx.py
It doesn't work and keeping it updated to prevent the app from starting was getting tedious. Deleted.
2024-02-29 13:16:36 -05:00
psychedelicious
c4ea96dec2 fix(nodes): rearrange fields.py to avoid needing forward refs 2024-02-29 13:16:36 -05:00
psychedelicious
06d0232841 tidy(nodes): remove unnecessary, shadowing class attr declarations 2024-02-29 13:16:36 -05:00
psychedelicious
bb8c71f706 feat(ui): revise graphs to not use LinearUIOutputInvocation
See this comment for context: https://github.com/invoke-ai/InvokeAI/pull/5491#discussion_r1480760629

- Remove this now-unnecessary node from all graphs
- Update graphs' terminal image-outputting nodes' `is_intermediate` and `board` fields appropriately
- Add util function to prepare the `board` field, tidy the utils
- Update `socketInvocationComplete` listener to work correctly with this change

I've manually tested all graph permutations that were changed (I think this is all...) to ensure images go to the gallery as expected:
- ad-hoc upscaling
- t2i w/ sd1.5
- t2i w/ sd1.5 & hrf
- t2i w/ sdxl
- t2i w/ sdxl + refiner
- i2i w/ sd1.5
- i2i w/ sdxl
- i2i w/ sdxl + refiner
- canvas t2i w/ sd1.5
- canvas t2i w/ sdxl
- canvas t2i w/ sdxl + refiner
- canvas i2i w/ sd1.5
- canvas i2i w/ sdxl
- canvas i2i w/ sdxl + refiner
- canvas inpaint w/ sd1.5
- canvas inpaint w/ sdxl
- canvas inpaint w/ sdxl + refiner
- canvas outpaint w/ sd1.5
- canvas outpaint w/ sdxl
- canvas outpaint w/ sdxl + refiner
2024-02-29 13:16:36 -05:00
psychedelicious
c16f77bb23 chore(ui): regen types 2024-02-29 13:16:36 -05:00
psychedelicious
1e4b953ccd feat(nodes): add WithBoard field helper class
This class works the same way as `WithMetadata` - it simply adds a `board` field to the node. The context wrapper function is able to pull the board id from this. This allows image-outputting nodes to get a board field "for free", and have their outputs automatically saved to it.

This is a breaking change for node authors who may have a field called `board`, because it makes `board` a reserved field name. I'll look into how to avoid this - maybe by naming this invoke-managed field `_board` to avoid collisions?

Supporting changes:
- `WithBoard` is added to all image-outputting nodes, giving them the ability to save to board.
- Unused, duplicate `WithMetadata` and `WithWorkflow` classes are deleted from `baseinvocation.py`. The "real" versions are in `fields.py`.
- Remove `LinearUIOutputInvocation`. Now that all nodes that output images also have a `board` field by default, this node is no longer necessary. See comment here for context: https://github.com/invoke-ai/InvokeAI/pull/5491#discussion_r1480760629
- Without `LinearUIOutputInvocation`, the `ImagesInferface.update` method is no longer needed, and removed.

Note: This commit does not bump all node versions. I will ensure that is done correctly before merging the PR of which this commit is a part.

Note: A followup commit will implement the frontend changes to support this change.
2024-02-29 13:16:36 -05:00
psychedelicious
d6ce901ad2 remove unused configdict import 2024-02-29 13:16:36 -05:00
psychedelicious
060038a8c0 fix(ui): remove original l2i node in HRF graph 2024-02-29 13:16:36 -05:00
psychedelicious
e38d275e20 fix(nodes): do not freeze or cache config in context wrapper
- The config is already cached by the config class's `get_config()` method.
- The config mutates itself in its `root_path` property getter. Freezing the class makes any attempt to grab a path from the config error. Unfortunately this means we cannot easily freeze the class without fiddling with the inner workings of `InvokeAIAppConfig`, which is outside the scope here.
2024-02-29 13:16:36 -05:00
psychedelicious
3fea7a6f00 feat(nodes): context.data -> context._data 2024-02-29 13:16:36 -05:00
psychedelicious
57ae23b222 feat(nodes): context.__services -> context._services 2024-02-29 13:16:36 -05:00
psychedelicious
10a4f1df8a feat(nodes): cache invocation interface config 2024-02-29 13:16:36 -05:00
psychedelicious
0fde0d1ff7 feat(nodes): do not hide services in invocation context interfaces 2024-02-29 13:16:36 -05:00
psychedelicious
0ff466ebc4 fix(nodes): restore missing context type annotations 2024-02-29 13:16:36 -05:00
psychedelicious
c3bfb4a38f tests(nodes): fix mock InvocationContext 2024-02-29 13:16:36 -05:00
psychedelicious
a976130899 chore(nodes): add comments for ConfigInterface 2024-02-29 13:16:36 -05:00
psychedelicious
fcfc0c9a94 feat(nodes): export more things from `invocation_api" 2024-02-29 13:16:36 -05:00
psychedelicious
42c99efddf feat(nodes): add boards interface to invocation context 2024-02-29 13:16:36 -05:00
psychedelicious
87d28b2519 fix(nodes): restore type annotations for InvocationContext 2024-02-29 13:16:36 -05:00
psychedelicious
e600f495a2 feat(nodes): do not freeze InvocationContextData, prevents it from being subclassesd 2024-02-29 13:16:36 -05:00
psychedelicious
e4bf66ef5b feat: tweak pyright config 2024-02-29 13:16:36 -05:00
psychedelicious
4e266e7466 feat(nodes): create invocation_api.py
This is the public API for invocations.

Everything a custom node might need should be re-exported from this file.
2024-02-29 13:16:36 -05:00
psychedelicious
26b17d778d feat(nodes): move ConditioningFieldData to conditioning_data.py 2024-02-29 13:16:35 -05:00
psychedelicious
1ca6c0798f tests: fix missing arg for InvocationContext 2024-02-29 13:16:35 -05:00
psychedelicious
2515a2dffd feat(nodes): restore previous invocation context methods with deprecation warnings 2024-02-29 13:16:35 -05:00
psychedelicious
cc4a4f1275 chore: ruff 2024-02-29 13:16:35 -05:00
psychedelicious
c58951dfcc feat(nodes): tidy invocation_context.py, improve comments 2024-02-29 13:16:35 -05:00
psychedelicious
8dc1207790 tests: fix tests for new invocation context 2024-02-29 13:16:35 -05:00
psychedelicious
248176604f docs: update INVOCATIONS.md 2024-02-29 13:16:35 -05:00
psychedelicious
2254a0c078 feat(nodes): update all invocations to use new invocation context
Update all invocations to use the new context. The changes are all fairly simple, but there are a lot of them.

Supporting minor changes:
- Patch bump for all nodes that use the context
- Update invocation processor to provide new context
- Minor change to `EventServiceBase` to accept a node's ID instead of the dict version of a node
- Minor change to `ModelManagerService` to support the new wrapped context
- Fanagling of imports to avoid circular dependencies
2024-02-29 13:16:35 -05:00
psychedelicious
ff30ec4746 feat: add pyright config
I was having issues with mypy bother over- and under-reporting certain problems. I've added a pyright config.
2024-02-29 13:16:35 -05:00
psychedelicious
7657c361a3 feat(nodes): restricts invocation context power
Creates a low-power `InvocationContext` with simplified methods and data.

See `invocation_context.py` for detailed comments.
2024-02-29 13:16:35 -05:00
psychedelicious
c3f0d2e273 tidy(nodes): move all field things to fields.py
Unfortunately, this is necessary to prevent circular imports at runtime.
2024-02-29 13:16:35 -05:00
Васянатор
63ab5ff5a2 translationBot(ui): update translation (Russian)
Currently translated at 98.3% (1398 of 1422 strings)

Co-authored-by: Васянатор <ilabulanov339@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ru/
Translation: InvokeAI/Web UI
2024-02-29 23:27:36 +11:00
Samantha Morello
9a8a9c5848 translationBot(ui): update translation (Italian)
Currently translated at 98.0% (1441 of 1470 strings)

Co-authored-by: Samantha Morello <tildsart@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-02-29 23:27:36 +11:00
Alexander Eichhorn
1a3ffb6e94 translationBot(ui): update translation (German)
Currently translated at 80.4% (1183 of 1470 strings)

Co-authored-by: Alexander Eichhorn <pfannkuchensack@einfach-doof.de>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-02-29 23:27:36 +11:00
skunkworxdark
3a09bceea4 Update communityNodes.md
Updated description of metadata nodes
2024-02-26 14:20:09 -05:00
Riccardo Giovanetti
2ec6b51d8b translationBot(ui): update translation (Italian)
Currently translated at 97.2% (1430 of 1470 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-02-26 17:41:00 +11:00
B N
34b0ea20dc translationBot(ui): update translation (German)
Currently translated at 80.3% (1181 of 1470 strings)

translationBot(ui): update translation (German)

Currently translated at 80.1% (1178 of 1470 strings)

Co-authored-by: B N <berndnieschalk@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-02-26 17:41:00 +11:00
Alexander Eichhorn
9986fce1a6 translationBot(ui): update translation (German)
Currently translated at 80.0% (1176 of 1470 strings)

Co-authored-by: Alexander Eichhorn <pfannkuchensack@einfach-doof.de>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-02-23 07:57:15 +11:00
Riccardo Giovanetti
228f1d7f62 translationBot(ui): update translation (Italian)
Currently translated at 95.6% (1406 of 1470 strings)

translationBot(ui): update translation (Italian)

Currently translated at 93.9% (1381 of 1470 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-02-23 07:57:15 +11:00
B N
01a6378dc1 translationBot(ui): update translation (German)
Currently translated at 78.8% (1159 of 1470 strings)

Co-authored-by: B N <berndnieschalk@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-02-23 07:57:15 +11:00
Hosted Weblate
e01769294f translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2024-02-20 22:33:03 +11:00
chainchompa
16aa261e28 updated tooltip popovers (#5751)
## What type of PR is this? (check all applicable)

- [ ] Refactor
- [ ] Feature
- [ ] Bug Fix
- [X] Optimization
- [ ] Documentation Update
- [ ] Community Node Submission


## Have you discussed this change with the InvokeAI team?
- [X] Yes
- [ ] No, because:

      
## Have you updated all relevant documentation?
- [ ] Yes
- [ ] No


## Description
Added new tooltip popovers and updated copy of existing ones

## Related Tickets & Documents

<!--
For pull requests that relate or close an issue, please include them
below. 

For example having the text: "closes #1234" would connect the current
pull
request to issue 1234.  And when we merge the pull request, Github will
automatically close the issue.
-->

- Related Issue #
- Closes #

## QA Instructions, Screenshots, Recordings

<!-- 
Please provide steps on how to test changes, any hardware or 
software specifications as well as any other pertinent information. 
-->

## Merge Plan

<!--
A merge plan describes how this PR should be handled after it is
approved.

Example merge plans:
- "This PR can be merged when approved"
- "This must be squash-merged when approved"
- "DO NOT MERGE - I will rebase and tidy commits before merging"
- "#dev-chat on discord needs to be advised of this change when it is
merged"

A merge plan is particularly important for large PRs or PRs that touch
the
database in any way.
-->

## Added/updated tests?

- [ ] Yes
- [ ] No : _please replace this line with details on why tests
      have not been included_

## [optional] Are there any post deployment tasks we need to perform?
2024-02-19 13:12:47 -05:00
chainchompa
1dabf18d14 Merge branch 'main' into chainchompa/tooltip-popovers 2024-02-19 13:04:15 -05:00
Jennifer Player
115d92b1ae updated copy 2024-02-19 12:50:35 -05:00
Jennifer Player
f0d4c71960 updated tooltip popovers 2024-02-19 12:50:11 -05:00
gogurtenjoyer
3e48edda6f add latent-upscale to communityNodes.md (#5728)
Adds the 'latent upscale' community node
2024-02-19 16:53:35 +00:00
Riccardo Giovanetti
716b584f03 translationBot(ui): update translation (Italian)
Currently translated at 97.1% (1384 of 1424 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-02-19 08:18:33 +11:00
B N
d43b843c23 translationBot(ui): update translation (German)
Currently translated at 80.2% (1143 of 1424 strings)

Co-authored-by: B N <berndnieschalk@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-02-18 01:47:01 +11:00
psychedelicious
f36b5990ed fix(ui): do not provide auth headers for openapi.json 2024-02-15 10:38:26 -05:00
Millun Atluri
5706237ec7 {release} 3.7.0 (#5727)
## What type of PR is this? (check all applicable)

Release - Invoke 3.7.0

## Have you discussed this change with the InvokeAI team?
- [X] Yes
- [ ] No, because:

      
## Have you updated all relevant documentation?
- [X] Yes
- [ ] No


## Description
Invoke 3.7.0 Release

## QA Instructions, Screenshots, Recordings
Test Installer: 

[InvokeAI-installer-v3.7.0.zip](https://github.com/invoke-ai/InvokeAI/files/14298200/InvokeAI-installer-v3.7.0.zip)

<!-- 
Please provide steps on how to test changes, any hardware or 
software specifications as well as any other pertinent information. 
-->

## Merge Plan
Merge once approved
<!--
A merge plan describes how this PR should be handled after it is
approved.

Example merge plans:
- "This PR can be merged when approved"
- "This must be squash-merged when approved"
- "DO NOT MERGE - I will rebase and tidy commits before merging"
- "#dev-chat on discord needs to be advised of this change when it is
merged"

A merge plan is particularly important for large PRs or PRs that touch
the
database in any way.
-->

## Added/updated tests?

- [ ] Yes
- [X] No : _please replace this line with details on why tests
      have not been included_

## [optional] Are there any post deployment tasks we need to perform?
1. Release on PyPi
2. Release on GitHub
3. Announce on Discord
2024-02-15 07:59:20 -07:00
Millun Atluri
163b22a7b3 {release} 3.7.0 2024-02-15 07:34:31 -07:00
418 changed files with 12068 additions and 15449 deletions

View File

@@ -36,8 +36,10 @@ jobs:
- name: Typescript
run: 'pnpm run lint:tsc'
- name: Madge
run: 'pnpm run lint:madge'
run: 'pnpm run lint:dpdm'
- name: ESLint
run: 'pnpm run lint:eslint'
- name: Prettier
run: 'pnpm run lint:prettier'
- name: Knip
run: 'pnpm run lint:knip'

View File

@@ -6,33 +6,44 @@ default: help
help:
@echo Developer commands:
@echo
@echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
@echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
@echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
@echo "frontend-build Build the frontend in order to run on localhost:9090"
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
@echo "installer-zip Build the installer .zip file for the current version"
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
@echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
@echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
@echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
@echo "test" Run the unit tests.
@echo "frontend-install" Install the pnpm modules needed for the front end
@echo "frontend-build Build the frontend in order to run on localhost:9090"
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
@echo "installer-zip Build the installer .zip file for the current version"
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
# Runs ruff, fixing any safely-fixable errors and formatting
ruff:
ruff check . --fix
ruff format .
ruff check . --fix
ruff format .
# Runs ruff, fixing all errors it can fix and formatting
ruff-unsafe:
ruff check . --fix --unsafe-fixes
ruff format .
ruff check . --fix --unsafe-fixes
ruff format .
# Runs mypy, using the config in pyproject.toml
mypy:
mypy scripts/invokeai-web.py
mypy scripts/invokeai-web.py
# Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports
# (many files are ignored by the config, so this is useful for checking all files)
mypy-all:
mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
# Run the unit tests
test:
pytest ./tests
# Install the pnpm modules needed for the front end
frontend-install:
rm -rf invokeai/frontend/web/node_modules
cd invokeai/frontend/web && pnpm install
# Build the frontend
frontend-build:

View File

@@ -32,6 +32,7 @@ To use a community workflow, download the the `.json` node graph file and load i
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
+ [Image Picker](#image-picker)
+ [Image Resize Plus](#image-resize-plus)
+ [Latent Upscale](#latent-upscale)
+ [Load Video Frame](#load-video-frame)
+ [Make 3D](#make-3d)
+ [Mask Operations](#mask-operations)
@@ -290,6 +291,13 @@ View:
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-resize-plus-node/master/.readme/node.png" width="500" />
--------------------------------
### Latent Upscale
**Description:** This node uses a small (~2.4mb) model to upscale the latents used in a Stable Diffusion 1.5 or Stable Diffusion XL image generation, rather than the typical interpolation method, avoiding the traditional downsides of the latent upscale technique.
**Node Link:** [https://github.com/gogurtenjoyer/latent-upscale](https://github.com/gogurtenjoyer/latent-upscale)
--------------------------------
### Load Video Frame
@@ -346,12 +354,21 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
**Description:** A set of nodes for Metadata. Collect Metadata from within an `iterate` node & extract metadata from an image.
- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node.
- `Metadata From Image` - Provides Metadata from an image.
- `Metadata To String` - Extracts a String value of a label from metadata.
- `Metadata To Integer` - Extracts an Integer value of a label from metadata.
- `Metadata To Float` - Extracts a Float value of a label from metadata.
- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata.
- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node
- `Metadata From Image` - Provides Metadata from an image
- `Metadata To String` - Extracts a String value of a label from metadata
- `Metadata To Integer` - Extracts an Integer value of a label from metadata
- `Metadata To Float` - Extracts a Float value of a label from metadata
- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata
- `Metadata To Bool` - Extracts Bool types from metadata
- `Metadata To Model` - Extracts model types from metadata
- `Metadata To SDXL Model` - Extracts SDXL model types from metadata
- `Metadata To LoRAs` - Extracts Loras from metadata.
- `Metadata To SDXL LoRAs` - Extracts SDXL Loras from metadata
- `Metadata To ControlNets` - Extracts ControNets from metadata
- `Metadata To IP-Adapters` - Extracts IP-Adapters from metadata
- `Metadata To T2I-Adapters` - Extracts T2I-Adapters from metadata
- `Denoise Latents + Metadata` - This is an inherited version of the existing `Denoise Latents` node but with a metadata input and output.
**Node Link:** https://github.com/skunkworxdark/metadata-linked-nodes

View File

@@ -4,7 +4,6 @@ from logging import Logger
import torch
from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory
from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk
from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
@@ -16,14 +15,13 @@ from ..services.board_image_records.board_image_records_sqlite import SqliteBoar
from ..services.board_images.board_images_default import BoardImagesService
from ..services.board_records.board_records_sqlite import SqliteBoardRecordStorage
from ..services.boards.boards_default import BoardService
from ..services.bulk_download.bulk_download_default import BulkDownloadService
from ..services.config import InvokeAIAppConfig
from ..services.download import DownloadQueueService
from ..services.image_files.image_files_disk import DiskImageFileStorage
from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage
from ..services.images.images_default import ImageService
from ..services.invocation_cache.invocation_cache_memory import MemoryInvocationCache
from ..services.invocation_processor.invocation_processor_default import DefaultInvocationProcessor
from ..services.invocation_queue.invocation_queue_memory import MemoryInvocationQueue
from ..services.invocation_services import InvocationServices
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
from ..services.invoker import Invoker
@@ -33,7 +31,6 @@ from ..services.model_records import ModelRecordServiceSQL
from ..services.names.names_default import SimpleNameService
from ..services.session_processor.session_processor_default import DefaultSessionProcessor
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
from ..services.shared.graph import GraphExecutionState
from ..services.urls.urls_default import LocalUrlService
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
from .events import FastAPIEventService
@@ -85,7 +82,7 @@ class ApiDependencies:
board_records = SqliteBoardRecordStorage(db=db)
boards = BoardService()
events = FastAPIEventService(event_handler_id)
graph_execution_manager = ItemStorageMemory[GraphExecutionState]()
bulk_download = BulkDownloadService()
image_records = SqliteImageRecordStorage(db=db)
images = ImageService()
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
@@ -105,8 +102,6 @@ class ApiDependencies:
)
names = SimpleNameService()
performance_statistics = InvocationStatsService()
processor = DefaultInvocationProcessor()
queue = MemoryInvocationQueue()
session_processor = DefaultSessionProcessor()
session_queue = SqliteSessionQueue(db=db)
urls = LocalUrlService()
@@ -117,9 +112,9 @@ class ApiDependencies:
board_images=board_images,
board_records=board_records,
boards=boards,
bulk_download=bulk_download,
configuration=configuration,
events=events,
graph_execution_manager=graph_execution_manager,
image_files=image_files,
image_records=image_records,
images=images,
@@ -129,8 +124,6 @@ class ApiDependencies:
download_queue=download_queue_service,
names=names,
performance_statistics=performance_statistics,
processor=processor,
queue=queue,
session_processor=session_processor,
session_queue=session_queue,
urls=urls,

View File

@@ -2,7 +2,7 @@ import io
import traceback
from typing import Optional
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
from fastapi.responses import FileResponse
from fastapi.routing import APIRouter
from PIL import Image
@@ -375,16 +375,67 @@ async def unstar_images_in_list(
class ImagesDownloaded(BaseModel):
response: Optional[str] = Field(
description="If defined, the message to display to the user when images begin downloading"
default=None, description="The message to display to the user when images begin downloading"
)
bulk_download_item_name: Optional[str] = Field(
default=None, description="The name of the bulk download item for which events will be emitted"
)
@images_router.post("/download", operation_id="download_images_from_list", response_model=ImagesDownloaded)
@images_router.post(
"/download", operation_id="download_images_from_list", response_model=ImagesDownloaded, status_code=202
)
async def download_images_from_list(
image_names: list[str] = Body(description="The list of names of images to download", embed=True),
background_tasks: BackgroundTasks,
image_names: Optional[list[str]] = Body(
default=None, description="The list of names of images to download", embed=True
),
board_id: Optional[str] = Body(
default=None, description="The board from which image should be downloaded from", embed=True
default=None, description="The board from which image should be downloaded", embed=True
),
) -> ImagesDownloaded:
# return ImagesDownloaded(response="Your images are downloading")
raise HTTPException(status_code=501, detail="Endpoint is not yet implemented")
if (image_names is None or len(image_names) == 0) and board_id is None:
raise HTTPException(status_code=400, detail="No images or board id specified.")
bulk_download_item_id: str = ApiDependencies.invoker.services.bulk_download.generate_item_id(board_id)
background_tasks.add_task(
ApiDependencies.invoker.services.bulk_download.handler,
image_names,
board_id,
bulk_download_item_id,
)
return ImagesDownloaded(bulk_download_item_name=bulk_download_item_id + ".zip")
@images_router.api_route(
"/download/{bulk_download_item_name}",
methods=["GET"],
operation_id="get_bulk_download_item",
response_class=Response,
responses={
200: {
"description": "Return the complete bulk download item",
"content": {"application/zip": {}},
},
404: {"description": "Image not found"},
},
)
async def get_bulk_download_item(
background_tasks: BackgroundTasks,
bulk_download_item_name: str = Path(description="The bulk_download_item_name of the bulk download item to get"),
) -> FileResponse:
"""Gets a bulk download zip file"""
try:
path = ApiDependencies.invoker.services.bulk_download.get_path(bulk_download_item_name)
response = FileResponse(
path,
media_type="application/zip",
filename=bulk_download_item_name,
content_disposition_type="inline",
)
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
background_tasks.add_task(ApiDependencies.invoker.services.bulk_download.delete, bulk_download_item_name)
return response
except Exception:
raise HTTPException(status_code=404)

View File

@@ -9,11 +9,11 @@ from typing import Any, Dict, List, Optional, Set
from fastapi import Body, Path, Query, Response
from fastapi.routing import APIRouter
from pydantic import BaseModel, ConfigDict
from pydantic import BaseModel, ConfigDict, Field
from starlette.exceptions import HTTPException
from typing_extensions import Annotated
from invokeai.app.services.model_install import ModelInstallJob, ModelSource
from invokeai.app.services.model_install import ModelInstallJob
from invokeai.app.services.model_records import (
DuplicateModelException,
InvalidModelException,
@@ -32,6 +32,7 @@ from invokeai.backend.model_manager.config import (
)
from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
from invokeai.backend.model_manager.search import ModelSearch
from ..dependencies import ApiDependencies
@@ -164,6 +165,27 @@ async def list_model_records(
return ModelsList(models=found_models)
@model_manager_router.get(
"/get_by_attrs",
operation_id="get_model_records_by_attrs",
response_model=AnyModelConfig,
)
async def get_model_records_by_attrs(
name: str = Query(description="The name of the model"),
type: ModelType = Query(description="The type of the model"),
base: BaseModelType = Query(description="The base model of the model"),
) -> AnyModelConfig:
"""Gets a model by its attributes. The main use of this route is to provide backwards compatibility with the old
model manager, which identified models by a combination of name, base and type."""
configs = ApiDependencies.invoker.services.model_manager.store.search_by_attr(
base_model=base, model_type=type, model_name=name
)
if not configs:
raise HTTPException(status_code=404, detail="No model found with these attributes")
return configs[0]
@model_manager_router.get(
"/i/{key}",
operation_id="get_model_record",
@@ -201,7 +223,7 @@ async def list_model_summary(
@model_manager_router.get(
"/meta/i/{key}",
"/i/{key}/metadata",
operation_id="get_model_metadata",
responses={
200: {
@@ -209,7 +231,6 @@ async def list_model_summary(
"content": {"application/json": {"example": example_model_metadata}},
},
400: {"description": "Bad request"},
404: {"description": "No metadata available"},
},
)
async def get_model_metadata(
@@ -218,8 +239,7 @@ async def get_model_metadata(
"""Get a model metadata object."""
record_store = ApiDependencies.invoker.services.model_manager.store
result: Optional[AnyModelRepoMetadata] = record_store.get_metadata(key)
if not result:
raise HTTPException(status_code=404, detail="No metadata for a model with this key")
return result
@@ -234,6 +254,75 @@ async def list_tags() -> Set[str]:
return result
class FoundModel(BaseModel):
path: str = Field(description="Path to the model")
is_installed: bool = Field(description="Whether or not the model is already installed")
@model_manager_router.get(
"/scan_folder",
operation_id="scan_for_models",
responses={
200: {"description": "Directory scanned successfully"},
400: {"description": "Invalid directory path"},
},
status_code=200,
response_model=List[FoundModel],
)
async def scan_for_models(
scan_path: str = Query(description="Directory path to search for models", default=None),
) -> List[FoundModel]:
path = pathlib.Path(scan_path)
if not scan_path or not path.is_dir():
raise HTTPException(
status_code=400,
detail=f"The search path '{scan_path}' does not exist or is not directory",
)
search = ModelSearch()
try:
found_model_paths = search.search(path)
models_path = ApiDependencies.invoker.services.configuration.models_path
# If the search path includes the main models directory, we need to exclude core models from the list.
# TODO(MM2): Core models should be handled by the model manager so we can determine if they are installed
# without needing to crawl the filesystem.
core_models_path = pathlib.Path(models_path, "core").resolve()
non_core_model_paths = [p for p in found_model_paths if not p.is_relative_to(core_models_path)]
installed_models = ApiDependencies.invoker.services.model_manager.store.search_by_attr()
resolved_installed_model_paths: list[str] = []
installed_model_sources: list[str] = []
# This call lists all installed models.
for model in installed_models:
path = pathlib.Path(model.path)
# If the model has a source, we need to add it to the list of installed sources.
if model.source:
installed_model_sources.append(model.source)
# If the path is not absolute, that means it is in the app models directory, and we need to join it with
# the models path before resolving.
if not path.is_absolute():
resolved_installed_model_paths.append(str(pathlib.Path(models_path, path).resolve()))
continue
resolved_installed_model_paths.append(str(path.resolve()))
scan_results: list[FoundModel] = []
# Check if the model is installed by comparing the resolved paths, appending to the scan result.
for p in non_core_model_paths:
path = str(p)
is_installed = path in resolved_installed_model_paths or path in installed_model_sources
found_model = FoundModel(path=path, is_installed=is_installed)
scan_results.append(found_model)
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"An error occurred while searching the directory: {e}",
)
return scan_results
@model_manager_router.get(
"/tags/search",
operation_id="search_by_metadata_tags",
@@ -350,8 +439,8 @@ async def add_model_record(
@model_manager_router.post(
"/heuristic_import",
operation_id="heuristic_import_model",
"/install",
operation_id="install_model",
responses={
201: {"description": "The model imported successfully"},
415: {"description": "Unrecognized file/folder format"},
@@ -360,12 +449,13 @@ async def add_model_record(
},
status_code=201,
)
async def heuristic_import(
source: str,
async def install_model(
source: str = Query(description="Model source to install, can be a local path, repo_id, or remote URL"),
# TODO(MM2): Can we type this?
config: Optional[Dict[str, Any]] = Body(
description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
default=None,
example={"name": "modelT", "description": "antique cars"},
example={"name": "string", "description": "string"},
),
access_token: Optional[str] = None,
) -> ModelInstallJob:
@@ -402,106 +492,7 @@ async def heuristic_import(
result: ModelInstallJob = installer.heuristic_import(
source=source,
config=config,
)
logger.info(f"Started installation of {source}")
except UnknownModelException as e:
logger.error(str(e))
raise HTTPException(status_code=424, detail=str(e))
except InvalidModelException as e:
logger.error(str(e))
raise HTTPException(status_code=415)
except ValueError as e:
logger.error(str(e))
raise HTTPException(status_code=409, detail=str(e))
return result
@model_manager_router.post(
"/install",
operation_id="import_model",
responses={
201: {"description": "The model imported successfully"},
415: {"description": "Unrecognized file/folder format"},
424: {"description": "The model appeared to import successfully, but could not be found in the model manager"},
409: {"description": "There is already a model corresponding to this path or repo_id"},
},
status_code=201,
)
async def import_model(
source: ModelSource,
config: Optional[Dict[str, Any]] = Body(
description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
default=None,
),
) -> ModelInstallJob:
"""Install a model using its local path, repo_id, or remote URL.
Models will be downloaded, probed, configured and installed in a
series of background threads. The return object has `status` attribute
that can be used to monitor progress.
The source object is a discriminated Union of LocalModelSource,
HFModelSource and URLModelSource. Set the "type" field to the
appropriate value:
* To install a local path using LocalModelSource, pass a source of form:
```
{
"type": "local",
"path": "/path/to/model",
"inplace": false
}
```
The "inplace" flag, if true, will register the model in place in its
current filesystem location. Otherwise, the model will be copied
into the InvokeAI models directory.
* To install a HuggingFace repo_id using HFModelSource, pass a source of form:
```
{
"type": "hf",
"repo_id": "stabilityai/stable-diffusion-2.0",
"variant": "fp16",
"subfolder": "vae",
"access_token": "f5820a918aaf01"
}
```
The `variant`, `subfolder` and `access_token` fields are optional.
* To install a remote model using an arbitrary URL, pass:
```
{
"type": "url",
"url": "http://www.civitai.com/models/123456",
"access_token": "f5820a918aaf01"
}
```
The `access_token` field is optonal
The model's configuration record will be probed and filled in
automatically. To override the default guesses, pass "metadata"
with a Dict containing the attributes you wish to override.
Installation occurs in the background. Either use list_model_install_jobs()
to poll for completion, or listen on the event bus for the following events:
* "model_install_running"
* "model_install_completed"
* "model_install_error"
On successful completion, the event's payload will contain the field "key"
containing the installed ID of the model. On an error, the event's payload
will contain the fields "error_type" and "error" describing the nature of the
error and its traceback, respectively.
"""
logger = ApiDependencies.invoker.services.logger
try:
installer = ApiDependencies.invoker.services.model_manager.install
result: ModelInstallJob = installer.import_model(
source=source,
config=config,
access_token=access_token,
)
logger.info(f"Started installation of {source}")
except UnknownModelException as e:
@@ -637,6 +628,7 @@ async def convert_model(
Note that during the conversion process the key and model hash will change.
The return value is the model configuration for the converted model.
"""
model_manager = ApiDependencies.invoker.services.model_manager
logger = ApiDependencies.invoker.services.logger
loader = ApiDependencies.invoker.services.model_manager.load
store = ApiDependencies.invoker.services.model_manager.store
@@ -653,7 +645,7 @@ async def convert_model(
raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.")
# loading the model will convert it into a cached diffusers file
loader.load_model_by_config(model_config, submodel_type=SubModelType.Scheduler)
model_manager.load_model_by_config(model_config, submodel_type=SubModelType.Scheduler)
# Get the path of the converted model from the loader
cache_path = loader.convert_cache.cache_path(key)

View File

@@ -1,276 +0,0 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from fastapi import HTTPException, Path
from fastapi.routing import APIRouter
from ...services.shared.graph import GraphExecutionState
from ..dependencies import ApiDependencies
session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"])
# @session_router.post(
# "/",
# operation_id="create_session",
# responses={
# 200: {"model": GraphExecutionState},
# 400: {"description": "Invalid json"},
# },
# deprecated=True,
# )
# async def create_session(
# queue_id: str = Query(default="", description="The id of the queue to associate the session with"),
# graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with"),
# ) -> GraphExecutionState:
# """Creates a new session, optionally initializing it with an invocation graph"""
# session = ApiDependencies.invoker.create_execution_state(queue_id=queue_id, graph=graph)
# return session
# @session_router.get(
# "/",
# operation_id="list_sessions",
# responses={200: {"model": PaginatedResults[GraphExecutionState]}},
# deprecated=True,
# )
# async def list_sessions(
# page: int = Query(default=0, description="The page of results to get"),
# per_page: int = Query(default=10, description="The number of results per page"),
# query: str = Query(default="", description="The query string to search for"),
# ) -> PaginatedResults[GraphExecutionState]:
# """Gets a list of sessions, optionally searching"""
# if query == "":
# result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page)
# else:
# result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page)
# return result
@session_router.get(
"/{session_id}",
operation_id="get_session",
responses={
200: {"model": GraphExecutionState},
404: {"description": "Session not found"},
},
)
async def get_session(
session_id: str = Path(description="The id of the session to get"),
) -> GraphExecutionState:
"""Gets a session"""
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
if session is None:
raise HTTPException(status_code=404)
else:
return session
# @session_router.post(
# "/{session_id}/nodes",
# operation_id="add_node",
# responses={
# 200: {"model": str},
# 400: {"description": "Invalid node or link"},
# 404: {"description": "Session not found"},
# },
# deprecated=True,
# )
# async def add_node(
# session_id: str = Path(description="The id of the session"),
# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore
# description="The node to add"
# ),
# ) -> str:
# """Adds a node to the graph"""
# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
# if session is None:
# raise HTTPException(status_code=404)
# try:
# session.add_node(node)
# ApiDependencies.invoker.services.graph_execution_manager.set(
# session
# ) # TODO: can this be done automatically, or add node through an API?
# return session.id
# except NodeAlreadyExecutedError:
# raise HTTPException(status_code=400)
# except IndexError:
# raise HTTPException(status_code=400)
# @session_router.put(
# "/{session_id}/nodes/{node_path}",
# operation_id="update_node",
# responses={
# 200: {"model": GraphExecutionState},
# 400: {"description": "Invalid node or link"},
# 404: {"description": "Session not found"},
# },
# deprecated=True,
# )
# async def update_node(
# session_id: str = Path(description="The id of the session"),
# node_path: str = Path(description="The path to the node in the graph"),
# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore
# description="The new node"
# ),
# ) -> GraphExecutionState:
# """Updates a node in the graph and removes all linked edges"""
# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
# if session is None:
# raise HTTPException(status_code=404)
# try:
# session.update_node(node_path, node)
# ApiDependencies.invoker.services.graph_execution_manager.set(
# session
# ) # TODO: can this be done automatically, or add node through an API?
# return session
# except NodeAlreadyExecutedError:
# raise HTTPException(status_code=400)
# except IndexError:
# raise HTTPException(status_code=400)
# @session_router.delete(
# "/{session_id}/nodes/{node_path}",
# operation_id="delete_node",
# responses={
# 200: {"model": GraphExecutionState},
# 400: {"description": "Invalid node or link"},
# 404: {"description": "Session not found"},
# },
# deprecated=True,
# )
# async def delete_node(
# session_id: str = Path(description="The id of the session"),
# node_path: str = Path(description="The path to the node to delete"),
# ) -> GraphExecutionState:
# """Deletes a node in the graph and removes all linked edges"""
# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
# if session is None:
# raise HTTPException(status_code=404)
# try:
# session.delete_node(node_path)
# ApiDependencies.invoker.services.graph_execution_manager.set(
# session
# ) # TODO: can this be done automatically, or add node through an API?
# return session
# except NodeAlreadyExecutedError:
# raise HTTPException(status_code=400)
# except IndexError:
# raise HTTPException(status_code=400)
# @session_router.post(
# "/{session_id}/edges",
# operation_id="add_edge",
# responses={
# 200: {"model": GraphExecutionState},
# 400: {"description": "Invalid node or link"},
# 404: {"description": "Session not found"},
# },
# deprecated=True,
# )
# async def add_edge(
# session_id: str = Path(description="The id of the session"),
# edge: Edge = Body(description="The edge to add"),
# ) -> GraphExecutionState:
# """Adds an edge to the graph"""
# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
# if session is None:
# raise HTTPException(status_code=404)
# try:
# session.add_edge(edge)
# ApiDependencies.invoker.services.graph_execution_manager.set(
# session
# ) # TODO: can this be done automatically, or add node through an API?
# return session
# except NodeAlreadyExecutedError:
# raise HTTPException(status_code=400)
# except IndexError:
# raise HTTPException(status_code=400)
# # TODO: the edge being in the path here is really ugly, find a better solution
# @session_router.delete(
# "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}",
# operation_id="delete_edge",
# responses={
# 200: {"model": GraphExecutionState},
# 400: {"description": "Invalid node or link"},
# 404: {"description": "Session not found"},
# },
# deprecated=True,
# )
# async def delete_edge(
# session_id: str = Path(description="The id of the session"),
# from_node_id: str = Path(description="The id of the node the edge is coming from"),
# from_field: str = Path(description="The field of the node the edge is coming from"),
# to_node_id: str = Path(description="The id of the node the edge is going to"),
# to_field: str = Path(description="The field of the node the edge is going to"),
# ) -> GraphExecutionState:
# """Deletes an edge from the graph"""
# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
# if session is None:
# raise HTTPException(status_code=404)
# try:
# edge = Edge(
# source=EdgeConnection(node_id=from_node_id, field=from_field),
# destination=EdgeConnection(node_id=to_node_id, field=to_field),
# )
# session.delete_edge(edge)
# ApiDependencies.invoker.services.graph_execution_manager.set(
# session
# ) # TODO: can this be done automatically, or add node through an API?
# return session
# except NodeAlreadyExecutedError:
# raise HTTPException(status_code=400)
# except IndexError:
# raise HTTPException(status_code=400)
# @session_router.put(
# "/{session_id}/invoke",
# operation_id="invoke_session",
# responses={
# 200: {"model": None},
# 202: {"description": "The invocation is queued"},
# 400: {"description": "The session has no invocations ready to invoke"},
# 404: {"description": "Session not found"},
# },
# deprecated=True,
# )
# async def invoke_session(
# queue_id: str = Query(description="The id of the queue to associate the session with"),
# session_id: str = Path(description="The id of the session to invoke"),
# all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"),
# ) -> Response:
# """Invokes a session"""
# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
# if session is None:
# raise HTTPException(status_code=404)
# if session.is_complete():
# raise HTTPException(status_code=400)
# ApiDependencies.invoker.invoke(queue_id, session, invoke_all=all)
# return Response(status_code=202)
# @session_router.delete(
# "/{session_id}/invoke",
# operation_id="cancel_session_invoke",
# responses={202: {"description": "The invocation is canceled"}},
# deprecated=True,
# )
# async def cancel_session_invoke(
# session_id: str = Path(description="The id of the session to cancel"),
# ) -> Response:
# """Invokes a session"""
# ApiDependencies.invoker.cancel(session_id)
# return Response(status_code=202)

View File

@@ -12,16 +12,26 @@ class SocketIO:
__sio: AsyncServer
__app: ASGIApp
__sub_queue: str = "subscribe_queue"
__unsub_queue: str = "unsubscribe_queue"
__sub_bulk_download: str = "subscribe_bulk_download"
__unsub_bulk_download: str = "unsubscribe_bulk_download"
def __init__(self, app: FastAPI):
self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io")
app.mount("/ws", self.__app)
self.__sio.on("subscribe_queue", handler=self._handle_sub_queue)
self.__sio.on("unsubscribe_queue", handler=self._handle_unsub_queue)
self.__sio.on(self.__sub_queue, handler=self._handle_sub_queue)
self.__sio.on(self.__unsub_queue, handler=self._handle_unsub_queue)
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._handle_queue_event)
local_handler.register(event_name=EventServiceBase.model_event, _func=self._handle_model_event)
self.__sio.on(self.__sub_bulk_download, handler=self._handle_sub_bulk_download)
self.__sio.on(self.__unsub_bulk_download, handler=self._handle_unsub_bulk_download)
local_handler.register(event_name=EventServiceBase.bulk_download_event, _func=self._handle_bulk_download_event)
async def _handle_queue_event(self, event: Event):
await self.__sio.emit(
event=event[1]["event"],
@@ -39,3 +49,18 @@ class SocketIO:
async def _handle_model_event(self, event: Event) -> None:
await self.__sio.emit(event=event[1]["event"], data=event[1]["data"])
async def _handle_bulk_download_event(self, event: Event):
await self.__sio.emit(
event=event[1]["event"],
data=event[1]["data"],
room=event[1]["data"]["bulk_download_id"],
)
async def _handle_sub_bulk_download(self, sid, data, *args, **kwargs):
if "bulk_download_id" in data:
await self.__sio.enter_room(sid, data["bulk_download_id"])
async def _handle_unsub_bulk_download(self, sid, data, *args, **kwargs):
if "bulk_download_id" in data:
await self.__sio.leave_room(sid, data["bulk_download_id"])

View File

@@ -2,6 +2,7 @@
# which are imported/used before parse_args() is called will get the default config values instead of the
# values from the command line or config file.
import sys
from contextlib import asynccontextmanager
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
from invokeai.version.invokeai_version import __version__
@@ -50,7 +51,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
images,
model_manager,
session_queue,
sessions,
utilities,
workflows,
)
@@ -72,9 +72,25 @@ logger = InvokeAILogger.get_logger(config=app_config)
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("text/css", ".css")
@asynccontextmanager
async def lifespan(app: FastAPI):
# Add startup event to load dependencies
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger)
yield
# Shut down threads
ApiDependencies.shutdown()
# Create the app
# TODO: create this all in a method so configuration/etc. can be passed in?
app = FastAPI(title="Invoke - Community Edition", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
app = FastAPI(
title="Invoke - Community Edition",
docs_url=None,
redoc_url=None,
separate_input_output_schemas=False,
lifespan=lifespan,
)
# Add event handler
event_handler_id: int = id(app)
@@ -97,21 +113,7 @@ app.add_middleware(
app.add_middleware(GZipMiddleware, minimum_size=1000)
# Add startup event to load dependencies
@app.on_event("startup")
async def startup_event() -> None:
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger)
# Shut down threads
@app.on_event("shutdown")
async def shutdown_event() -> None:
ApiDependencies.shutdown()
# Include all routers
app.include_router(sessions.session_router, prefix="/api")
app.include_router(utilities.utilities_router, prefix="/api")
app.include_router(model_manager.model_manager_router, prefix="/api")
app.include_router(download_queue.download_queue_router, prefix="/api")
@@ -151,6 +153,8 @@ def custom_openapi() -> dict[str, Any]:
# TODO: note that we assume the schema_key here is the TYPE.__name__
# This could break in some cases, figure out a better way to do it
output_type_titles[schema_key] = output_schema["title"]
openapi_schema["components"]["schemas"][schema_key] = output_schema
openapi_schema["components"]["schemas"][schema_key]["class"] = "output"
# Add Node Editor UI helper schemas
ui_config_schemas = models_json_schema(
@@ -173,7 +177,6 @@ def custom_openapi() -> dict[str, Any]:
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
invoker_schema["output"] = outputs_ref
invoker_schema["class"] = "invocation"
openapi_schema["components"]["schemas"][f"{output_type_title}"]["class"] = "output"
# This code no longer seems to be necessary?
# Leave it here just in case

View File

@@ -8,13 +8,26 @@ import warnings
from abc import ABC, abstractmethod
from enum import Enum
from inspect import signature
from types import UnionType
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union, cast
from typing import (
TYPE_CHECKING,
Annotated,
Any,
Callable,
ClassVar,
Iterable,
Literal,
Optional,
Type,
TypeVar,
Union,
cast,
)
import semver
from pydantic import BaseModel, ConfigDict, Field, create_model
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model
from pydantic.fields import FieldInfo
from pydantic_core import PydanticUndefined
from typing_extensions import TypeAliasType
from invokeai.app.invocations.fields import (
FieldKind,
@@ -84,6 +97,7 @@ class BaseInvocationOutput(BaseModel):
"""
_output_classes: ClassVar[set[BaseInvocationOutput]] = set()
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
@classmethod
def register_output(cls, output: BaseInvocationOutput) -> None:
@@ -96,10 +110,14 @@ class BaseInvocationOutput(BaseModel):
return cls._output_classes
@classmethod
def get_outputs_union(cls) -> UnionType:
"""Gets a union of all invocation outputs."""
outputs_union = Union[tuple(cls._output_classes)] # type: ignore [valid-type]
return outputs_union # type: ignore [return-value]
def get_typeadapter(cls) -> TypeAdapter[Any]:
"""Gets a pydantc TypeAdapter for the union of all invocation output types."""
if not cls._typeadapter:
InvocationOutputsUnion = TypeAliasType(
"InvocationOutputsUnion", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")]
)
cls._typeadapter = TypeAdapter(InvocationOutputsUnion)
return cls._typeadapter
@classmethod
def get_output_types(cls) -> Iterable[str]:
@@ -148,6 +166,7 @@ class BaseInvocation(ABC, BaseModel):
"""
_invocation_classes: ClassVar[set[BaseInvocation]] = set()
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
@classmethod
def get_type(cls) -> str:
@@ -160,10 +179,14 @@ class BaseInvocation(ABC, BaseModel):
cls._invocation_classes.add(invocation)
@classmethod
def get_invocations_union(cls) -> UnionType:
"""Gets a union of all invocation types."""
invocations_union = Union[tuple(cls._invocation_classes)] # type: ignore [valid-type]
return invocations_union # type: ignore [return-value]
def get_typeadapter(cls) -> TypeAdapter[Any]:
"""Gets a pydantc TypeAdapter for the union of all invocation types."""
if not cls._typeadapter:
InvocationsUnion = TypeAliasType(
"InvocationsUnion", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")]
)
cls._typeadapter = TypeAdapter(InvocationsUnion)
return cls._typeadapter
@classmethod
def get_invocations(cls) -> Iterable[BaseInvocation]:

View File

@@ -1,24 +1,15 @@
from typing import Iterator, List, Optional, Tuple, Union
from typing import Iterator, List, Optional, Tuple, Union, cast
import torch
from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from transformers import CLIPTokenizer
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
import invokeai.backend.util.logging as logger
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
InputField,
OutputField,
UIComponent,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIComponent
from invokeai.app.invocations.primitives import ConditioningOutput
from invokeai.app.services.model_records import UnknownModelException
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.ti_utils import extract_ti_triggers_from_prompt
from invokeai.app.util.ti_utils import generate_ti_list
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_manager import ModelType
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
@@ -26,15 +17,9 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
ExtraConditioningInfo,
SDXLConditioningInfo,
)
from invokeai.backend.textual_inversion import TextualInversionModelRaw
from invokeai.backend.util.devices import torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
invocation,
invocation_output,
)
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from .model import ClipField
# unconditioned: Optional[torch.Tensor]
@@ -70,7 +55,11 @@ class CompelInvocation(BaseInvocation):
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ConditioningOutput:
tokenizer_info = context.models.load(**self.clip.tokenizer.model_dump())
tokenizer_model = tokenizer_info.model
assert isinstance(tokenizer_model, CLIPTokenizer)
text_encoder_info = context.models.load(**self.clip.text_encoder.model_dump())
text_encoder_model = text_encoder_info.model
assert isinstance(text_encoder_model, CLIPTextModel)
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
for lora in self.clip.loras:
@@ -82,21 +71,10 @@ class CompelInvocation(BaseInvocation):
# loras = [(context.models.get(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
ti_list = []
for trigger in extract_ti_triggers_from_prompt(self.prompt):
name = trigger[1:-1]
try:
loaded_model = context.models.load(**self.clip.text_encoder.model_dump()).model
assert isinstance(loaded_model, TextualInversionModelRaw)
ti_list.append((name, loaded_model))
except UnknownModelException:
# print(e)
# import traceback
# print(traceback.format_exc())
print(f'Warn: trigger: "{trigger}" not found')
ti_list = generate_ti_list(self.prompt, text_encoder_info.config.base, context)
with (
ModelPatcher.apply_ti(tokenizer_info.model, text_encoder_info.model, ti_list) as (
ModelPatcher.apply_ti(tokenizer_model, text_encoder_model, ti_list) as (
tokenizer,
ti_manager,
),
@@ -104,8 +82,9 @@ class CompelInvocation(BaseInvocation):
# Apply the LoRA after text_encoder has been moved to its target device for faster patching.
ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder_info.model, self.clip.skipped_layers),
ModelPatcher.apply_clip_skip(text_encoder_model, self.clip.skipped_layers),
):
assert isinstance(text_encoder, CLIPTextModel)
compel = Compel(
tokenizer=tokenizer,
text_encoder=text_encoder,
@@ -155,7 +134,11 @@ class SDXLPromptInvocationBase:
zero_on_empty: bool,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[ExtraConditioningInfo]]:
tokenizer_info = context.models.load(**clip_field.tokenizer.model_dump())
tokenizer_model = tokenizer_info.model
assert isinstance(tokenizer_model, CLIPTokenizer)
text_encoder_info = context.models.load(**clip_field.text_encoder.model_dump())
text_encoder_model = text_encoder_info.model
assert isinstance(text_encoder_model, (CLIPTextModel, CLIPTextModelWithProjection))
# return zero on empty
if prompt == "" and zero_on_empty:
@@ -189,25 +172,10 @@ class SDXLPromptInvocationBase:
# loras = [(context.models.get(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
ti_list = []
for trigger in extract_ti_triggers_from_prompt(prompt):
name = trigger[1:-1]
try:
ti_model = context.models.load_by_attrs(
model_name=name, base_model=text_encoder_info.config.base, model_type=ModelType.TextualInversion
).model
assert isinstance(ti_model, TextualInversionModelRaw)
ti_list.append((name, ti_model))
except UnknownModelException:
# print(e)
# import traceback
# print(traceback.format_exc())
logger.warning(f'trigger: "{trigger}" not found')
except ValueError:
logger.warning(f'trigger: "{trigger}" more than one similarly-named textual inversion models')
ti_list = generate_ti_list(prompt, text_encoder_info.config.base, context)
with (
ModelPatcher.apply_ti(tokenizer_info.model, text_encoder_info.model, ti_list) as (
ModelPatcher.apply_ti(tokenizer_model, text_encoder_model, ti_list) as (
tokenizer,
ti_manager,
),
@@ -215,8 +183,10 @@ class SDXLPromptInvocationBase:
# Apply the LoRA after text_encoder has been moved to its target device for faster patching.
ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder_info.model, clip_field.skipped_layers),
ModelPatcher.apply_clip_skip(text_encoder_model, clip_field.skipped_layers),
):
assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
text_encoder = cast(CLIPTextModel, text_encoder)
compel = Compel(
tokenizer=tokenizer,
text_encoder=text_encoder,
@@ -417,7 +387,7 @@ class ClipSkipInvocation(BaseInvocation):
"""Skip layers in clip text_encoder model."""
clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP")
skipped_layers: int = InputField(default=0, description=FieldDescriptions.skipped_layers)
skipped_layers: int = InputField(default=0, ge=0, description=FieldDescriptions.skipped_layers)
def invoke(self, context: InvocationContext) -> ClipSkipInvocationOutput:
self.clip.skipped_layers += self.skipped_layers

View File

@@ -199,6 +199,7 @@ class DenoiseMaskField(BaseModel):
mask_name: str = Field(description="The name of the mask image")
masked_latents_name: Optional[str] = Field(default=None, description="The name of the masked image latents")
gradient: bool = Field(default=False, description="Used for gradient inpainting")
class LatentsField(BaseModel):

View File

@@ -22,11 +22,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
from invokeai.backend.image_util.safety_checker import SafetyChecker
from .baseinvocation import (
BaseInvocation,
Classification,
invocation,
)
from .baseinvocation import BaseInvocation, Classification, invocation
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.1")
@@ -934,3 +930,40 @@ class SaveImageInvocation(BaseInvocation, WithMetadata, WithBoard):
image_dto = context.images.save(image=image)
return ImageOutput.build(image_dto)
@invocation(
"canvas_paste_back",
title="Canvas Paste Back",
tags=["image", "combine"],
category="image",
version="1.0.0",
)
class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Combines two images by using the mask provided. Intended for use on the Unified Canvas."""
source_image: ImageField = InputField(description="The source image")
target_image: ImageField = InputField(default=None, description="The target image")
mask: ImageField = InputField(
description="The mask to use when pasting",
)
mask_blur: int = InputField(default=0, ge=0, description="The amount to blur the mask by")
def _prepare_mask(self, mask: Image.Image) -> Image.Image:
mask_array = numpy.array(mask)
kernel = numpy.ones((self.mask_blur, self.mask_blur), numpy.uint8)
dilated_mask_array = cv2.erode(mask_array, kernel, iterations=3)
dilated_mask = Image.fromarray(dilated_mask_array)
if self.mask_blur > 0:
mask = dilated_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
return ImageOps.invert(mask.convert("L"))
def invoke(self, context: InvocationContext) -> ImageOutput:
source_image = context.images.get_pil(self.source_image.image_name)
target_image = context.images.get_pil(self.target_image.image_name)
mask = self._prepare_mask(context.images.get_pil(self.mask.image_name))
source_image.paste(target_image, (0, 0), mask)
image_dto = context.images.save(image=source_image)
return ImageOutput.build(image_dto)

View File

@@ -23,7 +23,7 @@ from diffusers.models.attention_processor import (
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from diffusers.schedulers import DPMSolverSDEScheduler
from diffusers.schedulers import SchedulerMixin as Scheduler
from PIL import Image
from PIL import Image, ImageFilter
from pydantic import field_validator
from torchvision.transforms.functional import resize as tv_resize
@@ -128,7 +128,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
ui_order=4,
)
def prep_mask_tensor(self, mask_image: Image) -> torch.Tensor:
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
if mask_image.mode != "L":
mask_image = mask_image.convert("L")
mask_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
@@ -169,6 +169,62 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
return DenoiseMaskOutput.build(
mask_name=mask_name,
masked_latents_name=masked_latents_name,
gradient=False,
)
@invocation(
"create_gradient_mask",
title="Create Gradient Mask",
tags=["mask", "denoise"],
category="latents",
version="1.0.0",
)
class CreateGradientMaskInvocation(BaseInvocation):
"""Creates mask for denoising model run."""
mask: ImageField = InputField(default=None, description="Image which will be masked", ui_order=1)
edge_radius: int = InputField(
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
)
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
minimum_denoise: float = InputField(
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> DenoiseMaskOutput:
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
if self.coherence_mode == "Box Blur":
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
else: # Gaussian Blur OR Staged
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
mask_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
# redistribute blur so that the edges are 0 and blur out to 1
blur_tensor = (blur_tensor - 0.5) * 2
threshold = 1 - self.minimum_denoise
if self.coherence_mode == "Staged":
# wherever the blur_tensor is masked to any degree, convert it to threshold
blur_tensor = torch.where((blur_tensor < 1), threshold, blur_tensor)
else:
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
# multiply original mask to force actually masked regions to 0
blur_tensor = mask_tensor * blur_tensor
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
return DenoiseMaskOutput.build(
mask_name=mask_name,
masked_latents_name=None,
gradient=True,
)
@@ -606,9 +662,9 @@ class DenoiseLatentsInvocation(BaseInvocation):
def prep_inpaint_mask(
self, context: InvocationContext, latents: torch.Tensor
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], bool]:
if self.denoise_mask is None:
return None, None
return None, None, False
mask = context.tensors.load(self.denoise_mask.mask_name)
mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
@@ -617,7 +673,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
else:
masked_latents = None
return 1 - mask, masked_latents
return 1 - mask, masked_latents, self.denoise_mask.gradient
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
@@ -644,7 +700,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
if seed is None:
seed = 0
mask, masked_latents = self.prep_inpaint_mask(context, latents)
mask, masked_latents, gradient_mask = self.prep_inpaint_mask(context, latents)
# TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets,
# below. Investigate whether this is appropriate.
@@ -732,6 +788,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
seed=seed,
mask=mask,
masked_latents=masked_latents,
gradient_mask=gradient_mask,
num_inference_steps=num_inference_steps,
conditioning_data=conditioning_data,
control_data=controlnet_data,

View File

@@ -33,7 +33,7 @@ class MetadataItemField(BaseModel):
class LoRAMetadataField(BaseModel):
"""LoRA Metadata Field"""
lora: LoRAModelField = Field(description=FieldDescriptions.lora_model)
model: LoRAModelField = Field(description=FieldDescriptions.lora_model)
weight: float = Field(description=FieldDescriptions.lora_weight)
@@ -114,7 +114,7 @@ GENERATION_MODES = Literal[
]
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.1")
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.1.1")
class CoreMetadataInvocation(BaseInvocation):
"""Collects core generation metadata into a MetadataField"""

View File

@@ -299,9 +299,13 @@ class DenoiseMaskOutput(BaseInvocationOutput):
denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run")
@classmethod
def build(cls, mask_name: str, masked_latents_name: Optional[str] = None) -> "DenoiseMaskOutput":
def build(
cls, mask_name: str, masked_latents_name: Optional[str] = None, gradient: bool = False
) -> "DenoiseMaskOutput":
return cls(
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name),
denoise_mask=DenoiseMaskField(
mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=gradient
),
)

View File

@@ -0,0 +1,44 @@
from abc import ABC, abstractmethod
from typing import Optional
class BulkDownloadBase(ABC):
"""Responsible for creating a zip file containing the images specified by the given image names or board id."""
@abstractmethod
def handler(
self, image_names: Optional[list[str]], board_id: Optional[str], bulk_download_item_id: Optional[str]
) -> None:
"""
Create a zip file containing the images specified by the given image names or board id.
:param image_names: A list of image names to include in the zip file.
:param board_id: The ID of the board. If provided, all images associated with the board will be included in the zip file.
:param bulk_download_item_id: The bulk_download_item_id that will be used to retrieve the bulk download item when it is prepared, if none is provided a uuid will be generated.
"""
@abstractmethod
def get_path(self, bulk_download_item_name: str) -> str:
"""
Get the path to the bulk download file.
:param bulk_download_item_name: The name of the bulk download item.
:return: The path to the bulk download file.
"""
@abstractmethod
def generate_item_id(self, board_id: Optional[str]) -> str:
"""
Generate an item ID for a bulk download item.
:param board_id: The ID of the board whose name is to be included in the item id.
:return: The generated item ID.
"""
@abstractmethod
def delete(self, bulk_download_item_name: str) -> None:
"""
Delete the bulk download file.
:param bulk_download_item_name: The name of the bulk download item.
"""

View File

@@ -0,0 +1,25 @@
DEFAULT_BULK_DOWNLOAD_ID = "default"
class BulkDownloadException(Exception):
"""Exception raised when a bulk download fails."""
def __init__(self, message="Bulk download failed"):
super().__init__(message)
self.message = message
class BulkDownloadTargetException(BulkDownloadException):
"""Exception raised when a bulk download target is not found."""
def __init__(self, message="The bulk download target was not found"):
super().__init__(message)
self.message = message
class BulkDownloadParametersException(BulkDownloadException):
"""Exception raised when a bulk download parameter is invalid."""
def __init__(self, message="No image names or board ID provided"):
super().__init__(message)
self.message = message

View File

@@ -0,0 +1,157 @@
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Optional, Union
from zipfile import ZipFile
from invokeai.app.services.board_records.board_records_common import BoardRecordNotFoundException
from invokeai.app.services.bulk_download.bulk_download_common import (
DEFAULT_BULK_DOWNLOAD_ID,
BulkDownloadException,
BulkDownloadParametersException,
BulkDownloadTargetException,
)
from invokeai.app.services.image_records.image_records_common import ImageRecordNotFoundException
from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invoker import Invoker
from invokeai.app.util.misc import uuid_string
from .bulk_download_base import BulkDownloadBase
class BulkDownloadService(BulkDownloadBase):
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
def __init__(self):
self._temp_directory = TemporaryDirectory()
self._bulk_downloads_folder = Path(self._temp_directory.name) / "bulk_downloads"
self._bulk_downloads_folder.mkdir(parents=True, exist_ok=True)
def handler(
self, image_names: Optional[list[str]], board_id: Optional[str], bulk_download_item_id: Optional[str]
) -> None:
bulk_download_id: str = DEFAULT_BULK_DOWNLOAD_ID
bulk_download_item_id = bulk_download_item_id or uuid_string()
bulk_download_item_name = bulk_download_item_id + ".zip"
self._signal_job_started(bulk_download_id, bulk_download_item_id, bulk_download_item_name)
try:
image_dtos: list[ImageDTO] = []
if board_id:
image_dtos = self._board_handler(board_id)
elif image_names:
image_dtos = self._image_handler(image_names)
else:
raise BulkDownloadParametersException()
bulk_download_item_name: str = self._create_zip_file(image_dtos, bulk_download_item_id)
self._signal_job_completed(bulk_download_id, bulk_download_item_id, bulk_download_item_name)
except (
ImageRecordNotFoundException,
BoardRecordNotFoundException,
BulkDownloadException,
BulkDownloadParametersException,
) as e:
self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e)
except Exception as e:
self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e)
self._invoker.services.logger.error("Problem bulk downloading images.")
raise e
def _image_handler(self, image_names: list[str]) -> list[ImageDTO]:
return [self._invoker.services.images.get_dto(image_name) for image_name in image_names]
def _board_handler(self, board_id: str) -> list[ImageDTO]:
image_names = self._invoker.services.board_image_records.get_all_board_image_names_for_board(board_id)
return self._image_handler(image_names)
def generate_item_id(self, board_id: Optional[str]) -> str:
return uuid_string() if board_id is None else self._get_clean_board_name(board_id) + "_" + uuid_string()
def _get_clean_board_name(self, board_id: str) -> str:
if board_id == "none":
return "Uncategorized"
return self._clean_string_to_path_safe(self._invoker.services.board_records.get(board_id).board_name)
def _create_zip_file(self, image_dtos: list[ImageDTO], bulk_download_item_id: str) -> str:
"""
Create a zip file containing the images specified by the given image names or board id.
If download with the same bulk_download_id already exists, it will be overwritten.
:return: The name of the zip file.
"""
zip_file_name = bulk_download_item_id + ".zip"
zip_file_path = self._bulk_downloads_folder / (zip_file_name)
with ZipFile(zip_file_path, "w") as zip_file:
for image_dto in image_dtos:
image_zip_path = Path(image_dto.image_category.value) / image_dto.image_name
image_disk_path = self._invoker.services.images.get_path(image_dto.image_name)
zip_file.write(image_disk_path, arcname=image_zip_path)
return str(zip_file_name)
# from https://stackoverflow.com/questions/7406102/create-sane-safe-filename-from-any-unsafe-string
def _clean_string_to_path_safe(self, s: str) -> str:
"""Clean a string to be path safe."""
return "".join([c for c in s if c.isalpha() or c.isdigit() or c == " " or c == "_" or c == "-"]).rstrip()
def _signal_job_started(
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
) -> None:
"""Signal that a bulk download job has started."""
if self._invoker:
assert bulk_download_id is not None
self._invoker.services.events.emit_bulk_download_started(
bulk_download_id=bulk_download_id,
bulk_download_item_id=bulk_download_item_id,
bulk_download_item_name=bulk_download_item_name,
)
def _signal_job_completed(
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
) -> None:
"""Signal that a bulk download job has completed."""
if self._invoker:
assert bulk_download_id is not None
assert bulk_download_item_name is not None
self._invoker.services.events.emit_bulk_download_completed(
bulk_download_id=bulk_download_id,
bulk_download_item_id=bulk_download_item_id,
bulk_download_item_name=bulk_download_item_name,
)
def _signal_job_failed(
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, exception: Exception
) -> None:
"""Signal that a bulk download job has failed."""
if self._invoker:
assert bulk_download_id is not None
assert exception is not None
self._invoker.services.events.emit_bulk_download_failed(
bulk_download_id=bulk_download_id,
bulk_download_item_id=bulk_download_item_id,
bulk_download_item_name=bulk_download_item_name,
error=str(exception),
)
def stop(self, *args, **kwargs):
self._temp_directory.cleanup()
def delete(self, bulk_download_item_name: str) -> None:
path = self.get_path(bulk_download_item_name)
Path(path).unlink()
def get_path(self, bulk_download_item_name: str) -> str:
path = str(self._bulk_downloads_folder / bulk_download_item_name)
if not self._is_valid_path(path):
raise BulkDownloadTargetException()
return path
def _is_valid_path(self, path: Union[str, Path]) -> bool:
"""Validates the path given for a bulk download."""
path = path if isinstance(path, Path) else Path(path)
return path.exists()

View File

@@ -156,6 +156,7 @@ class InvokeAISettings(BaseSettings):
"lora_dir",
"embedding_dir",
"controlnet_dir",
"conf_path",
]
@classmethod

View File

@@ -30,7 +30,6 @@ InvokeAI:
lora_dir: null
embedding_dir: null
controlnet_dir: null
conf_path: configs/models.yaml
models_dir: models
legacy_conf_dir: configs/stable-diffusion
db_dir: databases
@@ -123,7 +122,6 @@ a Path object:
root_path - path to InvokeAI root
output_path - path to default outputs directory
model_conf_path - path to models.yaml
conf - alias for the above
embedding_path - path to the embeddings directory
lora_path - path to the LoRA directory
@@ -163,7 +161,6 @@ two configs are kept in separate sections of the config file:
InvokeAI:
Paths:
root: /home/lstein/invokeai-main
conf_path: configs/models.yaml
legacy_conf_dir: configs/stable-diffusion
outdir: outputs
...
@@ -237,7 +234,6 @@ class InvokeAIAppConfig(InvokeAISettings):
# PATHS
root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths)
autoimport_dir : Path = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths)
conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths)
models_dir : Path = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths)
convert_cache_dir : Path = Field(default=Path('models/.cache'), description='Path to the converted models cache directory', json_schema_extra=Categories.Paths)
legacy_conf_dir : Path = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths)
@@ -301,6 +297,7 @@ class InvokeAIAppConfig(InvokeAISettings):
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths)
# this is not referred to in the source code and can be removed entirely
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)

View File

@@ -3,7 +3,7 @@
from typing import Any, Dict, List, Optional, Union
from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.app.services.session_queue.session_queue_common import (
BatchStatus,
EnqueueBatchResult,
@@ -16,6 +16,7 @@ from invokeai.backend.model_manager import AnyModelConfig
class EventServiceBase:
queue_event: str = "queue_event"
bulk_download_event: str = "bulk_download_event"
download_event: str = "download_event"
model_event: str = "model_event"
@@ -24,6 +25,14 @@ class EventServiceBase:
def dispatch(self, event_name: str, payload: Any) -> None:
pass
def _emit_bulk_download_event(self, event_name: str, payload: dict) -> None:
"""Bulk download events are emitted to a room with queue_id as the room name"""
payload["timestamp"] = get_timestamp()
self.dispatch(
event_name=EventServiceBase.bulk_download_event,
payload={"event": event_name, "data": payload},
)
def __emit_queue_event(self, event_name: str, payload: dict) -> None:
"""Queue events are emitted to a room with queue_id as the room name"""
payload["timestamp"] = get_timestamp()
@@ -204,52 +213,6 @@ class EventServiceBase:
},
)
def emit_session_retrieval_error(
self,
queue_id: str,
queue_item_id: int,
queue_batch_id: str,
graph_execution_state_id: str,
error_type: str,
error: str,
) -> None:
"""Emitted when session retrieval fails"""
self.__emit_queue_event(
event_name="session_retrieval_error",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"error_type": error_type,
"error": error,
},
)
def emit_invocation_retrieval_error(
self,
queue_id: str,
queue_item_id: int,
queue_batch_id: str,
graph_execution_state_id: str,
node_id: str,
error_type: str,
error: str,
) -> None:
"""Emitted when invocation retrieval fails"""
self.__emit_queue_event(
event_name="invocation_retrieval_error",
payload={
"queue_id": queue_id,
"queue_item_id": queue_item_id,
"queue_batch_id": queue_batch_id,
"graph_execution_state_id": graph_execution_state_id,
"node_id": node_id,
"error_type": error_type,
"error": error,
},
)
def emit_session_canceled(
self,
queue_id: str,
@@ -394,6 +357,7 @@ class EventServiceBase:
bytes: int,
total_bytes: int,
parts: List[Dict[str, Union[str, int]]],
id: int,
) -> None:
"""
Emit at intervals while the install job is in progress (remote models only).
@@ -413,6 +377,7 @@ class EventServiceBase:
"bytes": bytes,
"total_bytes": total_bytes,
"parts": parts,
"id": id,
},
)
@@ -427,7 +392,7 @@ class EventServiceBase:
payload={"source": source},
)
def emit_model_install_completed(self, source: str, key: str, total_bytes: Optional[int] = None) -> None:
def emit_model_install_completed(self, source: str, key: str, id: int, total_bytes: Optional[int] = None) -> None:
"""
Emit when an install job is completed successfully.
@@ -437,11 +402,7 @@ class EventServiceBase:
"""
self.__emit_model_event(
event_name="model_install_completed",
payload={
"source": source,
"total_bytes": total_bytes,
"key": key,
},
payload={"source": source, "total_bytes": total_bytes, "key": key, "id": id},
)
def emit_model_install_cancelled(self, source: str) -> None:
@@ -455,12 +416,7 @@ class EventServiceBase:
payload={"source": source},
)
def emit_model_install_error(
self,
source: str,
error_type: str,
error: str,
) -> None:
def emit_model_install_error(self, source: str, error_type: str, error: str, id: int) -> None:
"""
Emit when an install job encounters an exception.
@@ -470,9 +426,45 @@ class EventServiceBase:
"""
self.__emit_model_event(
event_name="model_install_error",
payload={"source": source, "error_type": error_type, "error": error, "id": id},
)
def emit_bulk_download_started(
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
) -> None:
"""Emitted when a bulk download starts"""
self._emit_bulk_download_event(
event_name="bulk_download_started",
payload={
"source": source,
"error_type": error_type,
"bulk_download_id": bulk_download_id,
"bulk_download_item_id": bulk_download_item_id,
"bulk_download_item_name": bulk_download_item_name,
},
)
def emit_bulk_download_completed(
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
) -> None:
"""Emitted when a bulk download completes"""
self._emit_bulk_download_event(
event_name="bulk_download_completed",
payload={
"bulk_download_id": bulk_download_id,
"bulk_download_item_id": bulk_download_item_id,
"bulk_download_item_name": bulk_download_item_name,
},
)
def emit_bulk_download_failed(
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, error: str
) -> None:
"""Emitted when a bulk download fails"""
self._emit_bulk_download_event(
event_name="bulk_download_failed",
payload={
"bulk_download_id": bulk_download_id,
"bulk_download_item_id": bulk_download_item_id,
"bulk_download_item_name": bulk_download_item_name,
"error": error,
},
)

View File

@@ -1,5 +0,0 @@
from abc import ABC
class InvocationProcessorABC(ABC): # noqa: B024
pass

View File

@@ -1,15 +0,0 @@
from pydantic import BaseModel, Field
class ProgressImage(BaseModel):
"""The progress image sent intermittently during processing"""
width: int = Field(description="The effective width of the image in pixels")
height: int = Field(description="The effective height of the image in pixels")
dataURL: str = Field(description="The image data as a b64 data URL")
class CanceledException(Exception):
"""Execution canceled by user."""
pass

View File

@@ -1,243 +0,0 @@
import time
import traceback
from contextlib import suppress
from threading import BoundedSemaphore, Event, Thread
from typing import Optional
import invokeai.backend.util.logging as logger
from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem
from invokeai.app.services.invocation_stats.invocation_stats_common import (
GESStatsNotFoundError,
)
from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context
from invokeai.app.util.profiler import Profiler
from ..invoker import Invoker
from .invocation_processor_base import InvocationProcessorABC
from .invocation_processor_common import CanceledException
class DefaultInvocationProcessor(InvocationProcessorABC):
__invoker_thread: Thread
__stop_event: Event
__invoker: Invoker
__threadLimit: BoundedSemaphore
def start(self, invoker: Invoker) -> None:
# LS - this will probably break
# but the idea is to enable multithreading up to the number of available
# GPUs. Nodes will block on model loading if no GPU is free.
self.__threadLimit = BoundedSemaphore(invoker.services.model_manager.gpu_count)
self.__invoker = invoker
self.__stop_event = Event()
self.__invoker_thread = Thread(
name="invoker_processor",
target=self.__process,
kwargs={"stop_event": self.__stop_event},
)
self.__invoker_thread.daemon = True # TODO: make async and do not use threads
self.__invoker_thread.start()
def stop(self, *args, **kwargs) -> None:
self.__stop_event.set()
def __process(self, stop_event: Event):
try:
self.__threadLimit.acquire()
queue_item: Optional[InvocationQueueItem] = None
profiler = (
Profiler(
logger=self.__invoker.services.logger,
output_dir=self.__invoker.services.configuration.profiles_path,
prefix=self.__invoker.services.configuration.profile_prefix,
)
if self.__invoker.services.configuration.profile_graphs
else None
)
def stats_cleanup(graph_execution_state_id: str) -> None:
if profiler:
profile_path = profiler.stop()
stats_path = profile_path.with_suffix(".json")
self.__invoker.services.performance_statistics.dump_stats(
graph_execution_state_id=graph_execution_state_id, output_path=stats_path
)
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.log_stats(graph_execution_state_id)
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state_id)
while not stop_event.is_set():
try:
queue_item = self.__invoker.services.queue.get()
except Exception as e:
self.__invoker.services.logger.error("Exception while getting from queue:\n%s" % e)
if not queue_item: # Probably stopping
# do not hammer the queue
time.sleep(0.5)
continue
if profiler and profiler.profile_id != queue_item.graph_execution_state_id:
profiler.start(profile_id=queue_item.graph_execution_state_id)
try:
graph_execution_state = self.__invoker.services.graph_execution_manager.get(
queue_item.graph_execution_state_id
)
except Exception as e:
self.__invoker.services.logger.error("Exception while retrieving session:\n%s" % e)
self.__invoker.services.events.emit_session_retrieval_error(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=queue_item.graph_execution_state_id,
error_type=e.__class__.__name__,
error=traceback.format_exc(),
)
continue
try:
invocation = graph_execution_state.execution_graph.get_node(queue_item.invocation_id)
except Exception as e:
self.__invoker.services.logger.error("Exception while retrieving invocation:\n%s" % e)
self.__invoker.services.events.emit_invocation_retrieval_error(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=queue_item.graph_execution_state_id,
node_id=queue_item.invocation_id,
error_type=e.__class__.__name__,
error=traceback.format_exc(),
)
continue
# get the source node id to provide to clients (the prepared node id is not as useful)
source_node_id = graph_execution_state.prepared_source_mapping[invocation.id]
# Send starting event
self.__invoker.services.events.emit_invocation_started(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
node=invocation.model_dump(),
source_node_id=source_node_id,
)
# Invoke
try:
graph_id = graph_execution_state.id
with self.__invoker.services.performance_statistics.collect_stats(invocation, graph_id):
# use the internal invoke_internal(), which wraps the node's invoke() method,
# which handles a few things:
# - nodes that require a value, but get it only from a connection
# - referencing the invocation cache instead of executing the node
context_data = InvocationContextData(
invocation=invocation,
session_id=graph_id,
workflow=queue_item.workflow,
source_node_id=source_node_id,
queue_id=queue_item.session_queue_id,
queue_item_id=queue_item.session_queue_item_id,
batch_id=queue_item.session_queue_batch_id,
)
context = build_invocation_context(
services=self.__invoker.services,
context_data=context_data,
)
outputs = invocation.invoke_internal(context=context, services=self.__invoker.services)
# Check queue to see if this is canceled, and skip if so
if self.__invoker.services.queue.is_canceled(graph_execution_state.id):
continue
# Save outputs and history
graph_execution_state.complete(invocation.id, outputs)
# Save the state changes
self.__invoker.services.graph_execution_manager.set(graph_execution_state)
# Send complete event
self.__invoker.services.events.emit_invocation_complete(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
node=invocation.model_dump(),
source_node_id=source_node_id,
result=outputs.model_dump(),
)
except KeyboardInterrupt:
pass
except CanceledException:
stats_cleanup(graph_execution_state.id)
pass
except Exception as e:
error = traceback.format_exc()
logger.error(error)
# Save error
graph_execution_state.set_node_error(invocation.id, error)
# Save the state changes
self.__invoker.services.graph_execution_manager.set(graph_execution_state)
self.__invoker.services.logger.error("Error while invoking:\n%s" % e)
# Send error event
self.__invoker.services.events.emit_invocation_error(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
node=invocation.model_dump(),
source_node_id=source_node_id,
error_type=e.__class__.__name__,
error=error,
)
pass
# Check queue to see if this is canceled, and skip if so
if self.__invoker.services.queue.is_canceled(graph_execution_state.id):
continue
# Queue any further commands if invoking all
is_complete = graph_execution_state.is_complete()
if queue_item.invoke_all and not is_complete:
try:
self.__invoker.invoke(
session_queue_batch_id=queue_item.session_queue_batch_id,
session_queue_item_id=queue_item.session_queue_item_id,
session_queue_id=queue_item.session_queue_id,
graph_execution_state=graph_execution_state,
workflow=queue_item.workflow,
invoke_all=True,
)
except Exception as e:
self.__invoker.services.logger.error("Error while invoking:\n%s" % e)
self.__invoker.services.events.emit_invocation_error(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
node=invocation.model_dump(),
source_node_id=source_node_id,
error_type=e.__class__.__name__,
error=traceback.format_exc(),
)
elif is_complete:
self.__invoker.services.events.emit_graph_execution_complete(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
)
stats_cleanup(graph_execution_state.id)
except KeyboardInterrupt:
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor
finally:
self.__threadLimit.release()

View File

@@ -1,26 +0,0 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from abc import ABC, abstractmethod
from typing import Optional
from .invocation_queue_common import InvocationQueueItem
class InvocationQueueABC(ABC):
"""Abstract base class for all invocation queues"""
@abstractmethod
def get(self) -> InvocationQueueItem:
pass
@abstractmethod
def put(self, item: Optional[InvocationQueueItem]) -> None:
pass
@abstractmethod
def cancel(self, graph_execution_state_id: str) -> None:
pass
@abstractmethod
def is_canceled(self, graph_execution_state_id: str) -> bool:
pass

View File

@@ -1,23 +0,0 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import time
from typing import Optional
from pydantic import BaseModel, Field
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
class InvocationQueueItem(BaseModel):
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
invocation_id: str = Field(description="The ID of the node being invoked")
session_queue_id: str = Field(description="The ID of the session queue from which this invocation queue item came")
session_queue_item_id: int = Field(
description="The ID of session queue item from which this invocation queue item came"
)
session_queue_batch_id: str = Field(
description="The ID of the session batch from which this invocation queue item came"
)
workflow: Optional[WorkflowWithoutID] = Field(description="The workflow associated with this queue item")
invoke_all: bool = Field(default=False)
timestamp: float = Field(default_factory=time.time)

View File

@@ -1,44 +0,0 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import time
from queue import Queue
from typing import Optional
from .invocation_queue_base import InvocationQueueABC
from .invocation_queue_common import InvocationQueueItem
class MemoryInvocationQueue(InvocationQueueABC):
__queue: Queue
__cancellations: dict[str, float]
def __init__(self):
self.__queue = Queue()
self.__cancellations = {}
def get(self) -> InvocationQueueItem:
item = self.__queue.get()
while (
isinstance(item, InvocationQueueItem)
and item.graph_execution_state_id in self.__cancellations
and self.__cancellations[item.graph_execution_state_id] > item.timestamp
):
item = self.__queue.get()
# Clear old items
for graph_execution_state_id in list(self.__cancellations.keys()):
if self.__cancellations[graph_execution_state_id] < item.timestamp:
del self.__cancellations[graph_execution_state_id]
return item
def put(self, item: Optional[InvocationQueueItem]) -> None:
self.__queue.put(item)
def cancel(self, graph_execution_state_id: str) -> None:
if graph_execution_state_id not in self.__cancellations:
self.__cancellations[graph_execution_state_id] = time.time()
def is_canceled(self, graph_execution_state_id: str) -> bool:
return graph_execution_state_id in self.__cancellations

View File

@@ -16,6 +16,7 @@ if TYPE_CHECKING:
from .board_images.board_images_base import BoardImagesServiceABC
from .board_records.board_records_base import BoardRecordStorageBase
from .boards.boards_base import BoardServiceABC
from .bulk_download.bulk_download_base import BulkDownloadBase
from .config import InvokeAIAppConfig
from .download import DownloadQueueServiceBase
from .events.events_base import EventServiceBase
@@ -23,15 +24,11 @@ if TYPE_CHECKING:
from .image_records.image_records_base import ImageRecordStorageBase
from .images.images_base import ImageServiceABC
from .invocation_cache.invocation_cache_base import InvocationCacheBase
from .invocation_processor.invocation_processor_base import InvocationProcessorABC
from .invocation_queue.invocation_queue_base import InvocationQueueABC
from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase
from .item_storage.item_storage_base import ItemStorageABC
from .model_manager.model_manager_base import ModelManagerServiceBase
from .names.names_base import NameServiceBase
from .session_processor.session_processor_base import SessionProcessorBase
from .session_queue.session_queue_base import SessionQueueBase
from .shared.graph import GraphExecutionState
from .urls.urls_base import UrlServiceBase
from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase
@@ -45,18 +42,16 @@ class InvocationServices:
board_image_records: "BoardImageRecordStorageBase",
boards: "BoardServiceABC",
board_records: "BoardRecordStorageBase",
bulk_download: "BulkDownloadBase",
configuration: "InvokeAIAppConfig",
events: "EventServiceBase",
graph_execution_manager: "ItemStorageABC[GraphExecutionState]",
images: "ImageServiceABC",
image_files: "ImageFileStorageBase",
image_records: "ImageRecordStorageBase",
logger: "Logger",
model_manager: "ModelManagerServiceBase",
download_queue: "DownloadQueueServiceBase",
processor: "InvocationProcessorABC",
performance_statistics: "InvocationStatsServiceBase",
queue: "InvocationQueueABC",
session_queue: "SessionQueueBase",
session_processor: "SessionProcessorBase",
invocation_cache: "InvocationCacheBase",
@@ -70,18 +65,16 @@ class InvocationServices:
self.board_image_records = board_image_records
self.boards = boards
self.board_records = board_records
self.bulk_download = bulk_download
self.configuration = configuration
self.events = events
self.graph_execution_manager = graph_execution_manager
self.images = images
self.image_files = image_files
self.image_records = image_records
self.logger = logger
self.model_manager = model_manager
self.download_queue = download_queue
self.processor = processor
self.performance_statistics = performance_statistics
self.queue = queue
self.session_queue = session_queue
self.session_processor = session_processor
self.invocation_cache = invocation_cache

View File

@@ -3,7 +3,7 @@
Usage:
statistics = InvocationStatsService(graph_execution_manager)
statistics = InvocationStatsService()
with statistics.collect_stats(invocation, graph_execution_state.id):
... execute graphs...
statistics.log_stats()
@@ -30,7 +30,7 @@ writes to the system log is stored in InvocationServices.performance_statistics.
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Iterator
from typing import ContextManager
from invokeai.app.invocations.baseinvocation import BaseInvocation
from invokeai.app.services.invocation_stats.invocation_stats_common import InvocationStatsSummary
@@ -50,7 +50,7 @@ class InvocationStatsServiceBase(ABC):
self,
invocation: BaseInvocation,
graph_execution_state_id: str,
) -> Iterator[None]:
) -> ContextManager[None]:
"""
Return a context object that will capture the statistics on the execution
of invocaation. Use with: to place around the part of the code that executes the invocation.
@@ -60,12 +60,8 @@ class InvocationStatsServiceBase(ABC):
pass
@abstractmethod
def reset_stats(self, graph_execution_state_id: str) -> None:
"""
Reset all statistics for the indicated graph.
:param graph_execution_state_id: The id of the session whose stats to reset.
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
"""
def reset_stats(self):
"""Reset all stored statistics."""
pass
@abstractmethod

View File

@@ -2,7 +2,7 @@ import json
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
from typing import Generator
import psutil
import torch
@@ -10,7 +10,6 @@ import torch
import invokeai.backend.util.logging as logger
from invokeai.app.invocations.baseinvocation import BaseInvocation
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.item_storage.item_storage_common import ItemNotFoundError
from invokeai.backend.model_manager.load.model_cache import CacheStats
from .invocation_stats_base import InvocationStatsServiceBase
@@ -42,7 +41,7 @@ class InvocationStatsService(InvocationStatsServiceBase):
self._invoker = invoker
@contextmanager
def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str) -> Iterator[None]:
def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str) -> Generator[None, None, None]:
# This is to handle case of the model manager not being initialized, which happens
# during some tests.
services = self._invoker.services
@@ -51,9 +50,6 @@ class InvocationStatsService(InvocationStatsServiceBase):
self._stats[graph_execution_state_id] = GraphExecutionStats()
self._cache_stats[graph_execution_state_id] = CacheStats()
# Prune stale stats. There should be none since we're starting a new graph, but just in case.
self._prune_stale_stats()
# Record state before the invocation.
start_time = time.time()
start_ram = psutil.Process().memory_info().rss
@@ -78,42 +74,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
)
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
def _prune_stale_stats(self) -> None:
"""Check all graphs being tracked and prune any that have completed/errored.
This shouldn't be necessary, but we don't have totally robust upstream handling of graph completions/errors, so
for now we call this function periodically to prevent them from accumulating.
"""
to_prune: list[str] = []
for graph_execution_state_id in self._stats:
try:
graph_execution_state = self._invoker.services.graph_execution_manager.get(graph_execution_state_id)
except ItemNotFoundError:
# TODO(ryand): What would cause this? Should this exception just be allowed to propagate?
logger.warning(f"Failed to get graph state for {graph_execution_state_id}.")
continue
if not graph_execution_state.is_complete():
# The graph is still running, don't prune it.
continue
to_prune.append(graph_execution_state_id)
for graph_execution_state_id in to_prune:
del self._stats[graph_execution_state_id]
del self._cache_stats[graph_execution_state_id]
if len(to_prune) > 0:
logger.info(f"Pruned stale graph stats for {to_prune}.")
def reset_stats(self, graph_execution_state_id: str):
try:
del self._stats[graph_execution_state_id]
del self._cache_stats[graph_execution_state_id]
except KeyError as e:
raise GESStatsNotFoundError(
f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
) from e
def reset_stats(self):
self._stats = {}
self._cache_stats = {}
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)

View File

@@ -1,12 +1,7 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Optional
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
from .invocation_queue.invocation_queue_common import InvocationQueueItem
from .invocation_services import InvocationServices
from .shared.graph import Graph, GraphExecutionState
class Invoker:
@@ -18,51 +13,6 @@ class Invoker:
self.services = services
self._start()
def invoke(
self,
session_queue_id: str,
session_queue_item_id: int,
session_queue_batch_id: str,
graph_execution_state: GraphExecutionState,
workflow: Optional[WorkflowWithoutID] = None,
invoke_all: bool = False,
) -> Optional[str]:
"""Determines the next node to invoke and enqueues it, preparing if needed.
Returns the id of the queued node, or `None` if there are no nodes left to enqueue."""
# Get the next invocation
invocation = graph_execution_state.next()
if not invocation:
return None
# Save the execution state
self.services.graph_execution_manager.set(graph_execution_state)
# Queue the invocation
self.services.queue.put(
InvocationQueueItem(
session_queue_id=session_queue_id,
session_queue_item_id=session_queue_item_id,
session_queue_batch_id=session_queue_batch_id,
graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id,
workflow=workflow,
invoke_all=invoke_all,
)
)
return invocation.id
def create_execution_state(self, graph: Optional[Graph] = None) -> GraphExecutionState:
"""Creates a new execution state for the given graph"""
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
self.services.graph_execution_manager.set(new_state)
return new_state
def cancel(self, graph_execution_state_id: str) -> None:
"""Cancels the given execution state"""
self.services.queue.cancel(graph_execution_state_id)
def __start_service(self, service) -> None:
# Call start() method on any services that have it
start_op = getattr(service, "start", None)
@@ -85,5 +35,3 @@ class Invoker:
# First stop all services
for service in vars(self.services):
self.__stop_service(getattr(self.services, service))
self.services.queue.put(None)

View File

@@ -156,6 +156,7 @@ class ModelInstallJob(BaseModel):
id: int = Field(description="Unique ID for this job")
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
error_reason: Optional[str] = Field(default=None, description="Information about why the job failed")
config_in: Dict[str, Any] = Field(
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
)
@@ -177,6 +178,12 @@ class ModelInstallJob(BaseModel):
download_parts: Set[DownloadJob] = Field(
default_factory=set, description="Download jobs contributing to this install"
)
error: Optional[str] = Field(
default=None, description="On an error condition, this field will contain the text of the exception"
)
error_traceback: Optional[str] = Field(
default=None, description="On an error condition, this field will contain the exception traceback"
)
# internal flags and transitory settings
_install_tmpdir: Optional[Path] = PrivateAttr(default=None)
_exception: Optional[Exception] = PrivateAttr(default=None)
@@ -184,7 +191,10 @@ class ModelInstallJob(BaseModel):
def set_error(self, e: Exception) -> None:
"""Record the error and traceback from an exception."""
self._exception = e
self.error = str(e)
self.error_traceback = self._format_error(e)
self.status = InstallStatus.ERROR
self.error_reason = self._exception.__class__.__name__ if self._exception else None
def cancel(self) -> None:
"""Call to cancel the job."""
@@ -195,10 +205,9 @@ class ModelInstallJob(BaseModel):
"""Class name of the exception that led to status==ERROR."""
return self._exception.__class__.__name__ if self._exception else None
@property
def error(self) -> Optional[str]:
def _format_error(self, exception: Exception) -> str:
"""Error traceback."""
return "".join(traceback.format_exception(self._exception)) if self._exception else None
return "".join(traceback.format_exception(exception))
@property
def cancelled(self) -> bool:

View File

@@ -154,8 +154,12 @@ class ModelInstallService(ModelInstallServiceBase):
info: AnyModelConfig = self._probe_model(Path(model_path), config)
old_hash = info.current_hash
if preferred_name := config.get("name"):
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
dest_path = (
self.app_config.models_path / info.base.value / info.type.value / (config.get("name") or model_path.name)
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
)
try:
new_path = self._copy_model(model_path, dest_path)
@@ -538,8 +542,10 @@ class ModelInstallService(ModelInstallServiceBase):
def _register(
self, model_path: Path, config: Optional[Dict[str, Any]] = None, info: Optional[AnyModelConfig] = None
) -> str:
info = info or ModelProbe.probe(model_path, config)
key = self._create_key()
if config and not config.get("key", None):
config["key"] = key
info = info or ModelProbe.probe(model_path, config)
model_path = model_path.absolute()
if model_path.is_relative_to(self.app_config.models_path):
@@ -552,8 +558,8 @@ class ModelInstallService(ModelInstallServiceBase):
# make config relative to our root
legacy_conf = (self.app_config.root_dir / self.app_config.legacy_conf_dir / info.config).resolve()
info.config = legacy_conf.relative_to(self.app_config.root_dir).as_posix()
self.record_store.add_model(key, info)
return key
self.record_store.add_model(info.key, info)
return info.key
def _next_id(self) -> int:
with self._lock:
@@ -737,6 +743,7 @@ class ModelInstallService(ModelInstallServiceBase):
self._signal_job_downloading(install_job)
def _download_complete_callback(self, download_job: DownloadJob) -> None:
self._logger.info(f"{download_job.source}: model download complete")
with self._lock:
install_job = self._download_cache[download_job.source]
self._download_cache.pop(download_job.source, None)
@@ -769,7 +776,7 @@ class ModelInstallService(ModelInstallServiceBase):
if not install_job:
return
self._downloads_changed_event.set()
self._logger.warning(f"Download {download_job.source} cancelled.")
self._logger.warning(f"{download_job.source}: model download cancelled")
# if install job has already registered an error, then do not replace its status with cancelled
if not install_job.errored:
install_job.cancel()
@@ -816,6 +823,7 @@ class ModelInstallService(ModelInstallServiceBase):
parts=parts,
bytes=job.bytes,
total_bytes=job.total_bytes,
id=job.id,
)
def _signal_job_completed(self, job: ModelInstallJob) -> None:
@@ -828,7 +836,7 @@ class ModelInstallService(ModelInstallServiceBase):
assert job.local_path is not None
assert job.config_out is not None
key = job.config_out.key
self._event_bus.emit_model_install_completed(str(job.source), key)
self._event_bus.emit_model_install_completed(str(job.source), key, id=job.id)
def _signal_job_errored(self, job: ModelInstallJob) -> None:
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}\n{job.error}")
@@ -837,7 +845,7 @@ class ModelInstallService(ModelInstallServiceBase):
error = job.error
assert error_type is not None
assert error is not None
self._event_bus.emit_model_install_error(str(job.source), error_type, error)
self._event_bus.emit_model_install_error(str(job.source), error_type, error, id=job.id)
def _signal_job_cancelled(self, job: ModelInstallJob) -> None:
self._logger.info(f"{job.source}: model installation was cancelled")

View File

@@ -38,8 +38,3 @@ class ModelLoadServiceBase(ABC):
@abstractmethod
def convert_cache(self) -> ModelConvertCacheBase:
"""Return the checkpoint convert cache used by this loader."""
@property
@abstractmethod
def gpu_count(self) -> int:
"""Return the number of GPUs we are configured to use."""

View File

@@ -4,7 +4,6 @@
from typing import Optional, Type
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.invocation_processor.invocation_processor_common import CanceledException
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.shared.invocation_context import InvocationContextData
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
@@ -40,7 +39,6 @@ class ModelLoadService(ModelLoadServiceBase):
self._registry = registry
def start(self, invoker: Invoker) -> None:
"""Start the service."""
self._invoker = invoker
@property
@@ -48,11 +46,6 @@ class ModelLoadService(ModelLoadServiceBase):
"""Return the RAM cache used by this loader."""
return self._ram_cache
@property
def gpu_count(self) -> int:
"""Return the number of GPUs available for our uses."""
return len(self._ram_cache.execution_devices)
@property
def convert_cache(self) -> ModelConvertCacheBase:
"""Return the checkpoint convert cache used by this loader."""
@@ -101,22 +94,20 @@ class ModelLoadService(ModelLoadServiceBase):
) -> None:
if not self._invoker:
return
if self._invoker.services.queue.is_canceled(context_data.session_id):
raise CanceledException()
if not loaded:
self._invoker.services.events.emit_model_load_started(
queue_id=context_data.queue_id,
queue_item_id=context_data.queue_item_id,
queue_batch_id=context_data.batch_id,
graph_execution_state_id=context_data.session_id,
queue_id=context_data.queue_item.queue_id,
queue_item_id=context_data.queue_item.item_id,
queue_batch_id=context_data.queue_item.batch_id,
graph_execution_state_id=context_data.queue_item.session_id,
model_config=model_config,
)
else:
self._invoker.services.events.emit_model_load_completed(
queue_id=context_data.queue_id,
queue_item_id=context_data.queue_item_id,
queue_batch_id=context_data.batch_id,
graph_execution_state_id=context_data.session_id,
queue_id=context_data.queue_item.queue_id,
queue_item_id=context_data.queue_item.item_id,
queue_batch_id=context_data.queue_item.batch_id,
graph_execution_state_id=context_data.queue_item.session_id,
model_config=model_config,
)

View File

@@ -3,6 +3,7 @@
from abc import ABC, abstractmethod
from typing import Optional
import torch
from typing_extensions import Self
from invokeai.app.services.invoker import Invoker
@@ -16,7 +17,6 @@ from ..events.events_base import EventServiceBase
from ..model_install import ModelInstallServiceBase
from ..model_load import ModelLoadServiceBase
from ..model_records import ModelRecordServiceBase
from ..shared.sqlite.sqlite_database import SqliteDatabase
class ModelManagerServiceBase(ABC):
@@ -32,9 +32,10 @@ class ModelManagerServiceBase(ABC):
def build_model_manager(
cls,
app_config: InvokeAIAppConfig,
db: SqliteDatabase,
model_record_service: ModelRecordServiceBase,
download_queue: DownloadQueueServiceBase,
events: EventServiceBase,
execution_device: torch.device,
) -> Self:
"""
Construct the model manager service instance.
@@ -98,8 +99,3 @@ class ModelManagerServiceBase(ABC):
context_data: Optional[InvocationContextData] = None,
) -> LoadedModel:
pass
@property
@abstractmethod
def gpu_count(self) -> int:
"""Return the number of GPUs we are configured to use."""

View File

@@ -3,12 +3,14 @@
from typing import Optional
import torch
from typing_extensions import Self
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.shared.invocation_context import InvocationContextData
from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, LoadedModel, ModelType, SubModelType
from invokeai.backend.model_manager.load import ModelCache, ModelConvertCache, ModelLoaderRegistry
from invokeai.backend.util.devices import choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from ..config import InvokeAIAppConfig
@@ -112,11 +114,6 @@ class ModelManagerService(ModelManagerServiceBase):
else:
return self.load.load_model(configs[0], submodel, context_data)
@property
def gpu_count(self) -> int:
"""Return the number of GPUs we are using."""
return self.load.gpu_count
@classmethod
def build_model_manager(
cls,
@@ -124,6 +121,7 @@ class ModelManagerService(ModelManagerServiceBase):
model_record_service: ModelRecordServiceBase,
download_queue: DownloadQueueServiceBase,
events: EventServiceBase,
execution_device: torch.device = choose_torch_device(),
) -> Self:
"""
Construct the model manager service instance.
@@ -134,7 +132,10 @@ class ModelManagerService(ModelManagerServiceBase):
logger.setLevel(app_config.log_level.upper())
ram_cache = ModelCache(
max_cache_size=app_config.ram_cache_size, max_vram_cache_size=app_config.vram_cache_size, logger=logger
max_cache_size=app_config.ram_cache_size,
max_vram_cache_size=app_config.vram_cache_size,
logger=logger,
execution_device=execution_device,
)
convert_cache = ModelConvertCache(
cache_path=app_config.models_convert_cache_path, max_size=app_config.convert_cache_size

View File

@@ -4,3 +4,17 @@ from pydantic import BaseModel, Field
class SessionProcessorStatus(BaseModel):
is_started: bool = Field(description="Whether the session processor is started")
is_processing: bool = Field(description="Whether a session is being processed")
class CanceledException(Exception):
"""Execution canceled by user."""
pass
class ProgressImage(BaseModel):
"""The progress image sent intermittently during processing"""
width: int = Field(description="The effective width of the image in pixels")
height: int = Field(description="The effective height of the image in pixels")
dataURL: str = Field(description="The image data as a b64 data URL")

View File

@@ -1,4 +1,5 @@
import traceback
from contextlib import suppress
from threading import BoundedSemaphore, Thread
from threading import Event as ThreadEvent
from typing import Optional
@@ -6,136 +7,270 @@ from typing import Optional
from fastapi_events.handlers.local import local_handler
from fastapi_events.typing import Event as FastAPIEvent
from invokeai.app.invocations.baseinvocation import BaseInvocation
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError
from invokeai.app.services.session_processor.session_processor_common import CanceledException
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context
from invokeai.app.util.profiler import Profiler
from ..invoker import Invoker
from .session_processor_base import SessionProcessorBase
from .session_processor_common import SessionProcessorStatus
POLLING_INTERVAL = 1
THREAD_LIMIT = 1
class DefaultSessionProcessor(SessionProcessorBase):
def start(self, invoker: Invoker) -> None:
self.__invoker: Invoker = invoker
self.__queue_item: Optional[SessionQueueItem] = None
def start(self, invoker: Invoker, thread_limit: int = 1, polling_interval: int = 1) -> None:
self._invoker: Invoker = invoker
self._queue_item: Optional[SessionQueueItem] = None
self._invocation: Optional[BaseInvocation] = None
self.__resume_event = ThreadEvent()
self.__stop_event = ThreadEvent()
self.__poll_now_event = ThreadEvent()
self._resume_event = ThreadEvent()
self._stop_event = ThreadEvent()
self._poll_now_event = ThreadEvent()
self._cancel_event = ThreadEvent()
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_queue_event)
self.__threadLimit = BoundedSemaphore(THREAD_LIMIT)
self.__thread = Thread(
self._thread_limit = thread_limit
self._thread_semaphore = BoundedSemaphore(thread_limit)
self._polling_interval = polling_interval
# If profiling is enabled, create a profiler. The same profiler will be used for all sessions. Internally,
# the profiler will create a new profile for each session.
self._profiler = (
Profiler(
logger=self._invoker.services.logger,
output_dir=self._invoker.services.configuration.profiles_path,
prefix=self._invoker.services.configuration.profile_prefix,
)
if self._invoker.services.configuration.profile_graphs
else None
)
self._thread = Thread(
name="session_processor",
target=self.__process,
target=self._process,
kwargs={
"stop_event": self.__stop_event,
"poll_now_event": self.__poll_now_event,
"resume_event": self.__resume_event,
"stop_event": self._stop_event,
"poll_now_event": self._poll_now_event,
"resume_event": self._resume_event,
"cancel_event": self._cancel_event,
},
)
self.__thread.start()
self._thread.start()
def stop(self, *args, **kwargs) -> None:
self.__stop_event.set()
self._stop_event.set()
def _poll_now(self) -> None:
self.__poll_now_event.set()
self._poll_now_event.set()
async def _on_queue_event(self, event: FastAPIEvent) -> None:
event_name = event[1]["event"]
# This was a match statement, but match is not supported on python 3.9
if event_name in [
"graph_execution_state_complete",
"invocation_error",
"session_retrieval_error",
"invocation_retrieval_error",
]:
self.__queue_item = None
self._poll_now()
elif (
event_name == "session_canceled"
and self.__queue_item is not None
and self.__queue_item.session_id == event[1]["data"]["graph_execution_state_id"]
):
self.__queue_item = None
if event_name == "session_canceled" or event_name == "queue_cleared":
# These both mean we should cancel the current session.
self._cancel_event.set()
self._poll_now()
elif event_name == "batch_enqueued":
self._poll_now()
elif event_name == "queue_cleared":
self.__queue_item = None
self._poll_now()
def resume(self) -> SessionProcessorStatus:
if not self.__resume_event.is_set():
self.__resume_event.set()
if not self._resume_event.is_set():
self._resume_event.set()
return self.get_status()
def pause(self) -> SessionProcessorStatus:
if self.__resume_event.is_set():
self.__resume_event.clear()
if self._resume_event.is_set():
self._resume_event.clear()
return self.get_status()
def get_status(self) -> SessionProcessorStatus:
return SessionProcessorStatus(
is_started=self.__resume_event.is_set(),
is_processing=self.__queue_item is not None,
is_started=self._resume_event.is_set(),
is_processing=self._queue_item is not None,
)
def __process(
def _process(
self,
stop_event: ThreadEvent,
poll_now_event: ThreadEvent,
resume_event: ThreadEvent,
cancel_event: ThreadEvent,
):
# Outermost processor try block; any unhandled exception is a fatal processor error
try:
self._thread_semaphore.acquire()
stop_event.clear()
resume_event.set()
self.__threadLimit.acquire()
queue_item: Optional[SessionQueueItem] = None
cancel_event.clear()
while not stop_event.is_set():
poll_now_event.clear()
# Middle processor try block; any unhandled exception is a non-fatal processor error
try:
# do not dequeue if there is already a session running
if self.__queue_item is None and resume_event.is_set():
queue_item = self.__invoker.services.session_queue.dequeue()
# Get the next session to process
self._queue_item = self._invoker.services.session_queue.dequeue()
if self._queue_item is not None and resume_event.is_set():
self._invoker.services.logger.debug(f"Executing queue item {self._queue_item.item_id}")
cancel_event.clear()
if queue_item is not None:
self.__invoker.services.logger.debug(f"Executing queue item {queue_item.item_id}")
self.__queue_item = queue_item
self.__invoker.services.graph_execution_manager.set(queue_item.session)
self.__invoker.invoke(
session_queue_batch_id=queue_item.batch_id,
session_queue_id=queue_item.queue_id,
session_queue_item_id=queue_item.item_id,
graph_execution_state=queue_item.session,
workflow=queue_item.workflow,
invoke_all=True,
# If profiling is enabled, start the profiler
if self._profiler is not None:
self._profiler.start(profile_id=self._queue_item.session_id)
# Prepare invocations and take the first
self._invocation = self._queue_item.session.next()
# Loop over invocations until the session is complete or canceled
while self._invocation is not None and not cancel_event.is_set():
# get the source node id to provide to clients (the prepared node id is not as useful)
source_invocation_id = self._queue_item.session.prepared_source_mapping[self._invocation.id]
# Send starting event
self._invoker.services.events.emit_invocation_started(
queue_batch_id=self._queue_item.batch_id,
queue_item_id=self._queue_item.item_id,
queue_id=self._queue_item.queue_id,
graph_execution_state_id=self._queue_item.session_id,
node=self._invocation.model_dump(),
source_node_id=source_invocation_id,
)
queue_item = None
if queue_item is None:
self.__invoker.services.logger.debug("Waiting for next polling interval or event")
poll_now_event.wait(POLLING_INTERVAL)
# Innermost processor try block; any unhandled exception is an invocation error & will fail the graph
try:
with self._invoker.services.performance_statistics.collect_stats(
self._invocation, self._queue_item.session.id
):
# Build invocation context (the node-facing API)
data = InvocationContextData(
invocation=self._invocation,
source_invocation_id=source_invocation_id,
queue_item=self._queue_item,
)
context = build_invocation_context(
data=data,
services=self._invoker.services,
cancel_event=self._cancel_event,
)
# Invoke the node
outputs = self._invocation.invoke_internal(
context=context, services=self._invoker.services
)
# Save outputs and history
self._queue_item.session.complete(self._invocation.id, outputs)
# Send complete event
self._invoker.services.events.emit_invocation_complete(
queue_batch_id=self._queue_item.batch_id,
queue_item_id=self._queue_item.item_id,
queue_id=self._queue_item.queue_id,
graph_execution_state_id=self._queue_item.session.id,
node=self._invocation.model_dump(),
source_node_id=source_invocation_id,
result=outputs.model_dump(),
)
except KeyboardInterrupt:
# TODO(MM2): Create an event for this
pass
except CanceledException:
# When the user cancels the graph, we first set the cancel event. The event is checked
# between invocations, in this loop. Some invocations are long-running, and we need to
# be able to cancel them mid-execution.
#
# For example, denoising is a long-running invocation with many steps. A step callback
# is executed after each step. This step callback checks if the canceled event is set,
# then raises a CanceledException to stop execution immediately.
#
# When we get a CanceledException, we don't need to do anything - just pass and let the
# loop go to its next iteration, and the cancel event will be handled correctly.
pass
except Exception as e:
error = traceback.format_exc()
# Save error
self._queue_item.session.set_node_error(self._invocation.id, error)
self._invoker.services.logger.error(
f"Error while invoking session {self._queue_item.session_id}, invocation {self._invocation.id} ({self._invocation.get_type()}):\n{e}"
)
# Send error event
self._invoker.services.events.emit_invocation_error(
queue_batch_id=self._queue_item.session_id,
queue_item_id=self._queue_item.item_id,
queue_id=self._queue_item.queue_id,
graph_execution_state_id=self._queue_item.session.id,
node=self._invocation.model_dump(),
source_node_id=source_invocation_id,
error_type=e.__class__.__name__,
error=error,
)
pass
# The session is complete if the all invocations are complete or there was an error
if self._queue_item.session.is_complete() or cancel_event.is_set():
# Send complete event
self._invoker.services.events.emit_graph_execution_complete(
queue_batch_id=self._queue_item.batch_id,
queue_item_id=self._queue_item.item_id,
queue_id=self._queue_item.queue_id,
graph_execution_state_id=self._queue_item.session.id,
)
# If we are profiling, stop the profiler and dump the profile & stats
if self._profiler:
profile_path = self._profiler.stop()
stats_path = profile_path.with_suffix(".json")
self._invoker.services.performance_statistics.dump_stats(
graph_execution_state_id=self._queue_item.session.id, output_path=stats_path
)
# We'll get a GESStatsNotFoundError if we try to log stats for an untracked graph, but in the processor
# we don't care about that - suppress the error.
with suppress(GESStatsNotFoundError):
self._invoker.services.performance_statistics.log_stats(self._queue_item.session.id)
self._invoker.services.performance_statistics.reset_stats()
# Set the invocation to None to prepare for the next session
self._invocation = None
else:
# Prepare the next invocation
self._invocation = self._queue_item.session.next()
# The session is complete, immediately poll for next session
self._queue_item = None
poll_now_event.set()
else:
# The queue was empty, wait for next polling interval or event to try again
self._invoker.services.logger.debug("Waiting for next polling interval or event")
poll_now_event.wait(self._polling_interval)
continue
except Exception as e:
self.__invoker.services.logger.error(f"Error in session processor: {e}")
if queue_item is not None:
self.__invoker.services.session_queue.cancel_queue_item(
queue_item.item_id, error=traceback.format_exc()
except Exception:
# Non-fatal error in processor
self._invoker.services.logger.error(
f"Non-fatal error in session processor:\n{traceback.format_exc()}"
)
# Cancel the queue item
if self._queue_item is not None:
self._invoker.services.session_queue.cancel_queue_item(
self._queue_item.item_id, error=traceback.format_exc()
)
poll_now_event.wait(POLLING_INTERVAL)
# Reset the invocation to None to prepare for the next session
self._invocation = None
# Immediately poll for next queue item
poll_now_event.wait(self._polling_interval)
continue
except Exception as e:
self.__invoker.services.logger.error(f"Fatal Error in session processor: {e}")
except Exception:
# Fatal error in processor, log and pass - we're done here
self._invoker.services.logger.error(f"Fatal Error in session processor:\n{traceback.format_exc()}")
pass
finally:
stop_event.clear()
poll_now_event.clear()
self.__queue_item = None
self.__threadLimit.release()
self._queue_item = None
self._thread_semaphore.release()

View File

@@ -60,7 +60,7 @@ class SqliteSessionQueue(SessionQueueBase):
# This was a match statement, but match is not supported on python 3.9
if event_name == "graph_execution_state_complete":
await self._handle_complete_event(event)
elif event_name in ["invocation_error", "session_retrieval_error", "invocation_retrieval_error"]:
elif event_name == "invocation_error":
await self._handle_error_event(event)
elif event_name == "session_canceled":
await self._handle_cancel_event(event)
@@ -429,7 +429,6 @@ class SqliteSessionQueue(SessionQueueBase):
if queue_item.status not in ["canceled", "failed", "completed"]:
status = "failed" if error is not None else "canceled"
queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here
self.__invoker.services.queue.cancel(queue_item.session_id)
self.__invoker.services.events.emit_session_canceled(
queue_item_id=queue_item.item_id,
queue_id=queue_item.queue_id,
@@ -471,7 +470,6 @@ class SqliteSessionQueue(SessionQueueBase):
)
self.__conn.commit()
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
self.__invoker.services.queue.cancel(current_queue_item.session_id)
self.__invoker.services.events.emit_session_canceled(
queue_item_id=current_queue_item.item_id,
queue_id=current_queue_item.queue_id,
@@ -523,7 +521,6 @@ class SqliteSessionQueue(SessionQueueBase):
)
self.__conn.commit()
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
self.__invoker.services.queue.cancel(current_queue_item.session_id)
self.__invoker.services.events.emit_session_canceled(
queue_item_id=current_queue_item.item_id,
queue_id=current_queue_item.queue_id,

View File

@@ -1,92 +0,0 @@
from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC
from ...invocations.compel import CompelInvocation
from ...invocations.image import ImageNSFWBlurInvocation
from ...invocations.latent import DenoiseLatentsInvocation, LatentsToImageInvocation
from ...invocations.noise import NoiseInvocation
from ...invocations.primitives import IntegerInvocation
from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph
default_text_to_image_graph_id = "539b2af5-2b4d-4d8c-8071-e54a3255fc74"
def create_text_to_image() -> LibraryGraph:
graph = Graph(
nodes={
"width": IntegerInvocation(id="width", value=512),
"height": IntegerInvocation(id="height", value=512),
"seed": IntegerInvocation(id="seed", value=-1),
"3": NoiseInvocation(id="3"),
"4": CompelInvocation(id="4"),
"5": CompelInvocation(id="5"),
"6": DenoiseLatentsInvocation(id="6"),
"7": LatentsToImageInvocation(id="7"),
"8": ImageNSFWBlurInvocation(id="8"),
},
edges=[
Edge(
source=EdgeConnection(node_id="width", field="value"),
destination=EdgeConnection(node_id="3", field="width"),
),
Edge(
source=EdgeConnection(node_id="height", field="value"),
destination=EdgeConnection(node_id="3", field="height"),
),
Edge(
source=EdgeConnection(node_id="seed", field="value"),
destination=EdgeConnection(node_id="3", field="seed"),
),
Edge(
source=EdgeConnection(node_id="3", field="noise"),
destination=EdgeConnection(node_id="6", field="noise"),
),
Edge(
source=EdgeConnection(node_id="6", field="latents"),
destination=EdgeConnection(node_id="7", field="latents"),
),
Edge(
source=EdgeConnection(node_id="4", field="conditioning"),
destination=EdgeConnection(node_id="6", field="positive_conditioning"),
),
Edge(
source=EdgeConnection(node_id="5", field="conditioning"),
destination=EdgeConnection(node_id="6", field="negative_conditioning"),
),
Edge(
source=EdgeConnection(node_id="7", field="image"),
destination=EdgeConnection(node_id="8", field="image"),
),
],
)
return LibraryGraph(
id=default_text_to_image_graph_id,
name="t2i",
description="Converts text to an image",
graph=graph,
exposed_inputs=[
ExposedNodeInput(node_path="4", field="prompt", alias="positive_prompt"),
ExposedNodeInput(node_path="5", field="prompt", alias="negative_prompt"),
ExposedNodeInput(node_path="width", field="value", alias="width"),
ExposedNodeInput(node_path="height", field="value", alias="height"),
ExposedNodeInput(node_path="seed", field="value", alias="seed"),
],
exposed_outputs=[ExposedNodeOutput(node_path="8", field="image", alias="image")],
)
def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[LibraryGraph]:
"""Creates the default system graphs, or adds new versions if the old ones don't match"""
# TODO: Uncomment this when we are ready to fix this up to prevent breaking changes
graphs: list[LibraryGraph] = []
text_to_image = graph_library.get(default_text_to_image_graph_id)
# TODO: Check if the graph is the same as the default one, and if not, update it
# if text_to_image is None:
text_to_image = create_text_to_image()
graph_library.set(text_to_image)
graphs.append(text_to_image)
return graphs

View File

@@ -5,8 +5,14 @@ import itertools
from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
import networkx as nx
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
from pydantic import (
BaseModel,
GetJsonSchemaHandler,
field_validator,
)
from pydantic.fields import Field
from pydantic.json_schema import JsonSchemaValue
from pydantic_core import CoreSchema
# Importing * is bad karma but needed here for node detection
from invokeai.app.invocations import * # noqa: F401 F403
@@ -176,10 +182,6 @@ class NodeIdMismatchError(ValueError):
pass
class InvalidSubGraphError(ValueError):
pass
class CyclicalGraphError(ValueError):
pass
@@ -188,25 +190,6 @@ class UnknownGraphValidationError(ValueError):
pass
# TODO: Create and use an Empty output?
@invocation_output("graph_output")
class GraphInvocationOutput(BaseInvocationOutput):
pass
# TODO: Fill this out and move to invocations
@invocation("graph", version="1.0.0")
class GraphInvocation(BaseInvocation):
"""Execute a graph"""
# TODO: figure out how to create a default here
graph: "Graph" = InputField(description="The graph to run", default=None)
def invoke(self, context: InvocationContext) -> GraphInvocationOutput:
"""Invoke with provided services and return outputs."""
return GraphInvocationOutput()
@invocation_output("iterate_output")
class IterateInvocationOutput(BaseInvocationOutput):
"""Used to connect iteration outputs. Will be expanded to a specific output."""
@@ -260,21 +243,73 @@ class CollectInvocation(BaseInvocation):
return CollectInvocationOutput(collection=copy.copy(self.collection))
InvocationsUnion: Any = BaseInvocation.get_invocations_union()
InvocationOutputsUnion: Any = BaseInvocationOutput.get_outputs_union()
class Graph(BaseModel):
id: str = Field(description="The id of this graph", default_factory=uuid_string)
# TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me
nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(
description="The nodes in this graph", default_factory=dict
)
nodes: dict[str, BaseInvocation] = Field(description="The nodes in this graph", default_factory=dict)
edges: list[Edge] = Field(
description="The connections between nodes and their fields in this graph",
default_factory=list,
)
@field_validator("nodes", mode="plain")
@classmethod
def validate_nodes(cls, v: dict[str, Any]):
"""Validates the nodes in the graph by retrieving a union of all node types and validating each node."""
# Invocations register themselves as their python modules are executed. The union of all invocations is
# constructed at runtime. We use pydantic to validate `Graph.nodes` using that union.
#
# It's possible that when `graph.py` is executed, not all invocation-containing modules will have executed. If
# we construct the invocation union as `graph.py` is executed, we may miss some invocations. Those missing
# invocations will cause a graph to fail if they are used.
#
# We can get around this by validating the nodes in the graph using a "plain" validator, which overrides the
# pydantic validation entirely. This allows us to validate the nodes using the union of invocations at runtime.
#
# This same pattern is used in `GraphExecutionState`.
nodes: dict[str, BaseInvocation] = {}
typeadapter = BaseInvocation.get_typeadapter()
for node_id, node in v.items():
nodes[node_id] = typeadapter.validate_python(node)
return nodes
@classmethod
def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
# We use a "plain" validator to validate the nodes in the graph. Pydantic is unable to create a JSON Schema for
# fields that use "plain" validators, so we have to hack around this. Also, we need to add all invocations to
# the generated schema as options for the `nodes` field.
#
# The workaround is to create a new BaseModel that has the same fields as `Graph` but without the validator and
# with the invocation union as the type for the `nodes` field. Pydantic then generates the JSON Schema as
# expected.
#
# You might be tempted to do something like this:
#
# ```py
# cloned_model = create_model(cls.__name__, __base__=cls, nodes=...)
# delattr(cloned_model, "validate_nodes")
# cloned_model.model_rebuild(force=True)
# json_schema = handler(cloned_model.__pydantic_core_schema__)
# ```
#
# Unfortunately, this does not work. Calling `handler` here results in infinite recursion as pydantic attempts
# to build the JSON Schema for the cloned model. Instead, we have to manually clone the model.
#
# This same pattern is used in `GraphExecutionState`.
class Graph(BaseModel):
id: Optional[str] = Field(default=None, description="The id of this graph")
nodes: dict[
str, Annotated[Union[tuple(BaseInvocation._invocation_classes)], Field(discriminator="type")]
] = Field(description="The nodes in this graph")
edges: list[Edge] = Field(description="The connections between nodes and their fields in this graph")
json_schema = handler(Graph.__pydantic_core_schema__)
json_schema = handler.resolve_ref_schema(json_schema)
return json_schema
def add_node(self, node: BaseInvocation) -> None:
"""Adds a node to a graph
@@ -286,41 +321,21 @@ class Graph(BaseModel):
self.nodes[node.id] = node
def _get_graph_and_node(self, node_path: str) -> tuple["Graph", str]:
"""Returns the graph and node id for a node path."""
# Materialized graphs may have nodes at the top level
if node_path in self.nodes:
return (self, node_path)
node_id = node_path if "." not in node_path else node_path[: node_path.index(".")]
if node_id not in self.nodes:
raise NodeNotFoundError(f"Node {node_path} not found in graph")
node = self.nodes[node_id]
if not isinstance(node, GraphInvocation):
# There's more node path left but this isn't a graph - failure
raise NodeNotFoundError("Node path terminated early at a non-graph node")
return node.graph._get_graph_and_node(node_path[node_path.index(".") + 1 :])
def delete_node(self, node_path: str) -> None:
def delete_node(self, node_id: str) -> None:
"""Deletes a node from a graph"""
try:
graph, node_id = self._get_graph_and_node(node_path)
# Delete edges for this node
input_edges = self._get_input_edges_and_graphs(node_path)
output_edges = self._get_output_edges_and_graphs(node_path)
input_edges = self._get_input_edges(node_id)
output_edges = self._get_output_edges(node_id)
for edge_graph, _, edge in input_edges:
edge_graph.delete_edge(edge)
for edge in input_edges:
self.delete_edge(edge)
for edge_graph, _, edge in output_edges:
edge_graph.delete_edge(edge)
for edge in output_edges:
self.delete_edge(edge)
del graph.nodes[node_id]
del self.nodes[node_id]
except NodeNotFoundError:
pass # Ignore, not doesn't exist (should this throw?)
@@ -370,13 +385,6 @@ class Graph(BaseModel):
if k != v.id:
raise NodeIdMismatchError(f"Node ids must match, got {k} and {v.id}")
# Validate all subgraphs
for gn in (n for n in self.nodes.values() if isinstance(n, GraphInvocation)):
try:
gn.graph.validate_self()
except Exception as e:
raise InvalidSubGraphError(f"Subgraph {gn.id} is invalid") from e
# Validate that all edges match nodes and fields in the graph
for edge in self.edges:
source_node = self.nodes.get(edge.source.node_id, None)
@@ -438,7 +446,6 @@ class Graph(BaseModel):
except (
DuplicateNodeIdError,
NodeIdMismatchError,
InvalidSubGraphError,
NodeNotFoundError,
NodeFieldNotFoundError,
CyclicalGraphError,
@@ -459,7 +466,7 @@ class Graph(BaseModel):
def _validate_edge(self, edge: Edge):
"""Validates that a new edge doesn't create a cycle in the graph"""
# Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly)
# Validate that the nodes exist
try:
from_node = self.get_node(edge.source.node_id)
to_node = self.get_node(edge.destination.node_id)
@@ -526,171 +533,90 @@ class Graph(BaseModel):
f"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
)
def has_node(self, node_path: str) -> bool:
def has_node(self, node_id: str) -> bool:
"""Determines whether or not a node exists in the graph."""
try:
n = self.get_node(node_path)
if n is not None:
return True
else:
return False
_ = self.get_node(node_id)
return True
except NodeNotFoundError:
return False
def get_node(self, node_path: str) -> BaseInvocation:
"""Gets a node from the graph using a node path."""
# Materialized graphs may have nodes at the top level
graph, node_id = self._get_graph_and_node(node_path)
return graph.nodes[node_id]
def get_node(self, node_id: str) -> BaseInvocation:
"""Gets a node from the graph."""
try:
return self.nodes[node_id]
except KeyError as e:
raise NodeNotFoundError(f"Node {node_id} not found in graph") from e
def _get_node_path(self, node_id: str, prefix: Optional[str] = None) -> str:
return node_id if prefix is None or prefix == "" else f"{prefix}.{node_id}"
def update_node(self, node_path: str, new_node: BaseInvocation) -> None:
def update_node(self, node_id: str, new_node: BaseInvocation) -> None:
"""Updates a node in the graph."""
graph, node_id = self._get_graph_and_node(node_path)
node = graph.nodes[node_id]
node = self.nodes[node_id]
# Ensure the node type matches the new node
if type(node) is not type(new_node):
raise TypeError(f"Node {node_path} is type {type(node)} but new node is type {type(new_node)}")
raise TypeError(f"Node {node_id} is type {type(node)} but new node is type {type(new_node)}")
# Ensure the new id is either the same or is not in the graph
prefix = None if "." not in node_path else node_path[: node_path.rindex(".")]
new_path = self._get_node_path(new_node.id, prefix=prefix)
if new_node.id != node.id and self.has_node(new_path):
raise NodeAlreadyInGraphError("Node with id {new_node.id} already exists in graph")
if new_node.id != node.id and self.has_node(new_node.id):
raise NodeAlreadyInGraphError(f"Node with id {new_node.id} already exists in graph")
# Set the new node in the graph
graph.nodes[new_node.id] = new_node
self.nodes[new_node.id] = new_node
if new_node.id != node.id:
input_edges = self._get_input_edges_and_graphs(node_path)
output_edges = self._get_output_edges_and_graphs(node_path)
input_edges = self._get_input_edges(node_id)
output_edges = self._get_output_edges(node_id)
# Delete node and all edges
graph.delete_node(node_path)
self.delete_node(node_id)
# Create new edges for each input and output
for graph, _, edge in input_edges:
# Remove the graph prefix from the node path
new_graph_node_path = (
new_node.id
if "." not in edge.destination.node_id
else f'{edge.destination.node_id[edge.destination.node_id.rindex("."):]}.{new_node.id}'
)
graph.add_edge(
for edge in input_edges:
self.add_edge(
Edge(
source=edge.source,
destination=EdgeConnection(node_id=new_graph_node_path, field=edge.destination.field),
destination=EdgeConnection(node_id=new_node.id, field=edge.destination.field),
)
)
for graph, _, edge in output_edges:
# Remove the graph prefix from the node path
new_graph_node_path = (
new_node.id
if "." not in edge.source.node_id
else f'{edge.source.node_id[edge.source.node_id.rindex("."):]}.{new_node.id}'
)
graph.add_edge(
for edge in output_edges:
self.add_edge(
Edge(
source=EdgeConnection(node_id=new_graph_node_path, field=edge.source.field),
source=EdgeConnection(node_id=new_node.id, field=edge.source.field),
destination=edge.destination,
)
)
def _get_input_edges(self, node_path: str, field: Optional[str] = None) -> list[Edge]:
"""Gets all input edges for a node"""
edges = self._get_input_edges_and_graphs(node_path)
def _get_input_edges(self, node_id: str, field: Optional[str] = None) -> list[Edge]:
"""Gets all input edges for a node. If field is provided, only edges to that field are returned."""
# Filter to edges that match the field
filtered_edges = (e for e in edges if field is None or e[2].destination.field == field)
edges = [e for e in self.edges if e.destination.node_id == node_id]
# Create full node paths for each edge
return [
Edge(
source=EdgeConnection(
node_id=self._get_node_path(e.source.node_id, prefix=prefix),
field=e.source.field,
),
destination=EdgeConnection(
node_id=self._get_node_path(e.destination.node_id, prefix=prefix),
field=e.destination.field,
),
)
for _, prefix, e in filtered_edges
]
if field is None:
return edges
def _get_input_edges_and_graphs(
self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", Union[str, None], Edge]]:
"""Gets all input edges for a node along with the graph they are in and the graph's path"""
edges = []
filtered_edges = [e for e in edges if e.destination.field == field]
# Return any input edges that appear in this graph
edges.extend([(self, prefix, e) for e in self.edges if e.destination.node_id == node_path])
return filtered_edges
node_id = node_path if "." not in node_path else node_path[: node_path.index(".")]
node = self.nodes[node_id]
def _get_output_edges(self, node_id: str, field: Optional[str] = None) -> list[Edge]:
"""Gets all output edges for a node. If field is provided, only edges from that field are returned."""
edges = [e for e in self.edges if e.source.node_id == node_id]
if isinstance(node, GraphInvocation):
graph = node.graph
graph_path = node.id if prefix is None or prefix == "" else self._get_node_path(node.id, prefix=prefix)
graph_edges = graph._get_input_edges_and_graphs(node_path[(len(node_id) + 1) :], prefix=graph_path)
edges.extend(graph_edges)
if field is None:
return edges
return edges
filtered_edges = [e for e in edges if e.source.field == field]
def _get_output_edges(self, node_path: str, field: str) -> list[Edge]:
"""Gets all output edges for a node"""
edges = self._get_output_edges_and_graphs(node_path)
# Filter to edges that match the field
filtered_edges = (e for e in edges if e[2].source.field == field)
# Create full node paths for each edge
return [
Edge(
source=EdgeConnection(
node_id=self._get_node_path(e.source.node_id, prefix=prefix),
field=e.source.field,
),
destination=EdgeConnection(
node_id=self._get_node_path(e.destination.node_id, prefix=prefix),
field=e.destination.field,
),
)
for _, prefix, e in filtered_edges
]
def _get_output_edges_and_graphs(
self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", Union[str, None], Edge]]:
"""Gets all output edges for a node along with the graph they are in and the graph's path"""
edges = []
# Return any input edges that appear in this graph
edges.extend([(self, prefix, e) for e in self.edges if e.source.node_id == node_path])
node_id = node_path if "." not in node_path else node_path[: node_path.index(".")]
node = self.nodes[node_id]
if isinstance(node, GraphInvocation):
graph = node.graph
graph_path = node.id if prefix is None or prefix == "" else self._get_node_path(node.id, prefix=prefix)
graph_edges = graph._get_output_edges_and_graphs(node_path[(len(node_id) + 1) :], prefix=graph_path)
edges.extend(graph_edges)
return edges
return filtered_edges
def _is_iterator_connection_valid(
self,
node_path: str,
node_id: str,
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> bool:
inputs = [e.source for e in self._get_input_edges(node_path, "collection")]
outputs = [e.destination for e in self._get_output_edges(node_path, "item")]
inputs = [e.source for e in self._get_input_edges(node_id, "collection")]
outputs = [e.destination for e in self._get_output_edges(node_id, "item")]
if new_input is not None:
inputs.append(new_input)
@@ -718,12 +644,12 @@ class Graph(BaseModel):
def _is_collector_connection_valid(
self,
node_path: str,
node_id: str,
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> bool:
inputs = [e.source for e in self._get_input_edges(node_path, "item")]
outputs = [e.destination for e in self._get_output_edges(node_path, "collection")]
inputs = [e.source for e in self._get_input_edges(node_id, "item")]
outputs = [e.destination for e in self._get_output_edges(node_id, "collection")]
if new_input is not None:
inputs.append(new_input)
@@ -779,27 +705,17 @@ class Graph(BaseModel):
g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges})
return g
def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph:
def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None) -> nx.DiGraph:
"""Returns a flattened NetworkX DiGraph, including all subgraphs (but not with iterations expanded)"""
g = nx_graph or nx.DiGraph()
# Add all nodes from this graph except graph/iteration nodes
g.add_nodes_from(
[
self._get_node_path(n.id, prefix)
for n in self.nodes.values()
if not isinstance(n, GraphInvocation) and not isinstance(n, IterateInvocation)
]
)
# Expand graph nodes
for sgn in (gn for gn in self.nodes.values() if isinstance(gn, GraphInvocation)):
g = sgn.graph.nx_graph_flat(g, self._get_node_path(sgn.id, prefix))
g.add_nodes_from([n.id for n in self.nodes.values() if not isinstance(n, IterateInvocation)])
# TODO: figure out if iteration nodes need to be expanded
unique_edges = {(e.source.node_id, e.destination.node_id) for e in self.edges}
g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges])
g.add_edges_from([(e[0], e[1]) for e in unique_edges])
return g
@@ -824,9 +740,7 @@ class GraphExecutionState(BaseModel):
)
# The results of executed nodes
results: dict[str, Annotated[InvocationOutputsUnion, Field(discriminator="type")]] = Field(
description="The results of node executions", default_factory=dict
)
results: dict[str, BaseInvocationOutput] = Field(description="The results of node executions", default_factory=dict)
# Errors raised when executing nodes
errors: dict[str, str] = Field(description="Errors raised when executing nodes", default_factory=dict)
@@ -843,27 +757,51 @@ class GraphExecutionState(BaseModel):
default_factory=dict,
)
@field_validator("results", mode="plain")
@classmethod
def validate_results(cls, v: dict[str, BaseInvocationOutput]):
"""Validates the results in the GES by retrieving a union of all output types and validating each result."""
# See the comment in `Graph.validate_nodes` for an explanation of this logic.
results: dict[str, BaseInvocationOutput] = {}
typeadapter = BaseInvocationOutput.get_typeadapter()
for result_id, result in v.items():
results[result_id] = typeadapter.validate_python(result)
return results
@field_validator("graph")
def graph_is_valid(cls, v: Graph):
"""Validates that the graph is valid"""
v.validate_self()
return v
model_config = ConfigDict(
json_schema_extra={
"required": [
"id",
"graph",
"execution_graph",
"executed",
"executed_history",
"results",
"errors",
"prepared_source_mapping",
"source_prepared_mapping",
]
}
)
@classmethod
def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
# See the comment in `Graph.__get_pydantic_json_schema__` for an explanation of this logic.
class GraphExecutionState(BaseModel):
"""Tracks the state of a graph execution"""
id: str = Field(description="The id of the execution state")
graph: Graph = Field(description="The graph being executed")
execution_graph: Graph = Field(description="The expanded graph of activated and executed nodes")
executed: set[str] = Field(description="The set of node ids that have been executed")
executed_history: list[str] = Field(
description="The list of node ids that have been executed, in order of execution"
)
results: dict[
str, Annotated[Union[tuple(BaseInvocationOutput._output_classes)], Field(discriminator="type")]
] = Field(description="The results of node executions")
errors: dict[str, str] = Field(description="Errors raised when executing nodes")
prepared_source_mapping: dict[str, str] = Field(
description="The map of prepared nodes to original graph nodes"
)
source_prepared_mapping: dict[str, set[str]] = Field(
description="The map of original graph nodes to prepared nodes"
)
json_schema = handler(GraphExecutionState.__pydantic_core_schema__)
json_schema = handler.resolve_ref_schema(json_schema)
return json_schema
def next(self) -> Optional[BaseInvocation]:
"""Gets the next node ready to execute."""
@@ -919,17 +857,17 @@ class GraphExecutionState(BaseModel):
"""Returns true if the graph has any errors"""
return len(self.errors) > 0
def _create_execution_node(self, node_path: str, iteration_node_map: list[tuple[str, str]]) -> list[str]:
def _create_execution_node(self, node_id: str, iteration_node_map: list[tuple[str, str]]) -> list[str]:
"""Prepares an iteration node and connects all edges, returning the new node id"""
node = self.graph.get_node(node_path)
node = self.graph.get_node(node_id)
self_iteration_count = -1
# If this is an iterator node, we must create a copy for each iteration
if isinstance(node, IterateInvocation):
# Get input collection edge (should error if there are no inputs)
input_collection_edge = next(iter(self.graph._get_input_edges(node_path, "collection")))
input_collection_edge = next(iter(self.graph._get_input_edges(node_id, "collection")))
input_collection_prepared_node_id = next(
n[1] for n in iteration_node_map if n[0] == input_collection_edge.source.node_id
)
@@ -943,7 +881,7 @@ class GraphExecutionState(BaseModel):
return new_nodes
# Get all input edges
input_edges = self.graph._get_input_edges(node_path)
input_edges = self.graph._get_input_edges(node_id)
# Create new edges for this iteration
# For collect nodes, this may contain multiple inputs to the same field
@@ -970,10 +908,10 @@ class GraphExecutionState(BaseModel):
# Add to execution graph
self.execution_graph.add_node(new_node)
self.prepared_source_mapping[new_node.id] = node_path
if node_path not in self.source_prepared_mapping:
self.source_prepared_mapping[node_path] = set()
self.source_prepared_mapping[node_path].add(new_node.id)
self.prepared_source_mapping[new_node.id] = node_id
if node_id not in self.source_prepared_mapping:
self.source_prepared_mapping[node_id] = set()
self.source_prepared_mapping[node_id].add(new_node.id)
# Add new edges to execution graph
for edge in new_edges:
@@ -1077,13 +1015,13 @@ class GraphExecutionState(BaseModel):
def _get_iteration_node(
self,
source_node_path: str,
source_node_id: str,
graph: nx.DiGraph,
execution_graph: nx.DiGraph,
prepared_iterator_nodes: list[str],
) -> Optional[str]:
"""Gets the prepared version of the specified source node that matches every iteration specified"""
prepared_nodes = self.source_prepared_mapping[source_node_path]
prepared_nodes = self.source_prepared_mapping[source_node_id]
if len(prepared_nodes) == 1:
return next(iter(prepared_nodes))
@@ -1094,7 +1032,7 @@ class GraphExecutionState(BaseModel):
# Filter to only iterator nodes that are a parent of the specified node, in tuple format (prepared, source)
iterator_source_node_mapping = [(n, self.prepared_source_mapping[n]) for n in prepared_iterator_nodes]
parent_iterators = [itn for itn in iterator_source_node_mapping if nx.has_path(graph, itn[1], source_node_path)]
parent_iterators = [itn for itn in iterator_source_node_mapping if nx.has_path(graph, itn[1], source_node_id)]
return next(
(n for n in prepared_nodes if all(nx.has_path(execution_graph, pit[0], n) for pit in parent_iterators)),
@@ -1163,19 +1101,19 @@ class GraphExecutionState(BaseModel):
def add_node(self, node: BaseInvocation) -> None:
self.graph.add_node(node)
def update_node(self, node_path: str, new_node: BaseInvocation) -> None:
if not self._is_node_updatable(node_path):
def update_node(self, node_id: str, new_node: BaseInvocation) -> None:
if not self._is_node_updatable(node_id):
raise NodeAlreadyExecutedError(
f"Node {node_path} has already been prepared or executed and cannot be updated"
f"Node {node_id} has already been prepared or executed and cannot be updated"
)
self.graph.update_node(node_path, new_node)
self.graph.update_node(node_id, new_node)
def delete_node(self, node_path: str) -> None:
if not self._is_node_updatable(node_path):
def delete_node(self, node_id: str) -> None:
if not self._is_node_updatable(node_id):
raise NodeAlreadyExecutedError(
f"Node {node_path} has already been prepared or executed and cannot be deleted"
f"Node {node_id} has already been prepared or executed and cannot be deleted"
)
self.graph.delete_node(node_path)
self.graph.delete_node(node_id)
def add_edge(self, edge: Edge) -> None:
if not self._is_node_updatable(edge.destination.node_id):
@@ -1190,63 +1128,3 @@ class GraphExecutionState(BaseModel):
f"Destination node {edge.destination.node_id} has already been prepared or executed and cannot have a source edge deleted"
)
self.graph.delete_edge(edge)
class ExposedNodeInput(BaseModel):
node_path: str = Field(description="The node path to the node with the input")
field: str = Field(description="The field name of the input")
alias: str = Field(description="The alias of the input")
class ExposedNodeOutput(BaseModel):
node_path: str = Field(description="The node path to the node with the output")
field: str = Field(description="The field name of the output")
alias: str = Field(description="The alias of the output")
class LibraryGraph(BaseModel):
id: str = Field(description="The unique identifier for this library graph", default_factory=uuid_string)
graph: Graph = Field(description="The graph")
name: str = Field(description="The name of the graph")
description: str = Field(description="The description of the graph")
exposed_inputs: list[ExposedNodeInput] = Field(description="The inputs exposed by this graph", default_factory=list)
exposed_outputs: list[ExposedNodeOutput] = Field(
description="The outputs exposed by this graph", default_factory=list
)
@field_validator("exposed_inputs", "exposed_outputs")
def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]):
if len(v) != len({i.alias for i in v}):
raise ValueError("Duplicate exposed alias")
return v
@model_validator(mode="after")
def validate_exposed_nodes(cls, values):
graph = values.graph
# Validate exposed inputs
for exposed_input in values.exposed_inputs:
if not graph.has_node(exposed_input.node_path):
raise ValueError(f"Exposed input node {exposed_input.node_path} does not exist")
node = graph.get_node(exposed_input.node_path)
if get_input_field(node, exposed_input.field) is None:
raise ValueError(
f"Exposed input field {exposed_input.field} does not exist on node {exposed_input.node_path}"
)
# Validate exposed outputs
for exposed_output in values.exposed_outputs:
if not graph.has_node(exposed_output.node_path):
raise ValueError(f"Exposed output node {exposed_output.node_path} does not exist")
node = graph.get_node(exposed_output.node_path)
if get_output_field(node, exposed_output.field) is None:
raise ValueError(
f"Exposed output field {exposed_output.field} does not exist on node {exposed_output.node_path}"
)
return values
GraphInvocation.model_rebuild(force=True)
Graph.model_rebuild(force=True)
GraphExecutionState.model_rebuild(force=True)

View File

@@ -1,3 +1,4 @@
import threading
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Optional
@@ -12,7 +13,6 @@ from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType, SubModelType
from invokeai.backend.model_manager.load.load_base import LoadedModel
@@ -22,6 +22,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
if TYPE_CHECKING:
from invokeai.app.invocations.baseinvocation import BaseInvocation
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
"""
The InvocationContext provides access to various services and data about the current invocation.
@@ -48,26 +49,18 @@ Note: The docstrings are in weird places, but that's where they must be to get I
@dataclass
class InvocationContextData:
queue_item: "SessionQueueItem"
"""The queue item that is being executed."""
invocation: "BaseInvocation"
"""The invocation that is being executed."""
session_id: str
"""The session that is being executed."""
queue_id: str
"""The queue in which the session is being executed."""
source_node_id: str
"""The ID of the node from which the currently executing invocation was prepared."""
queue_item_id: int
"""The ID of the queue item that is being executed."""
batch_id: str
"""The ID of the batch that is being executed."""
workflow: Optional[WorkflowWithoutID] = None
"""The workflow associated with this queue item, if any."""
source_invocation_id: str
"""The ID of the invocation from which the currently executing invocation was prepared."""
class InvocationContextInterface:
def __init__(self, services: InvocationServices, context_data: InvocationContextData) -> None:
def __init__(self, services: InvocationServices, data: InvocationContextData) -> None:
self._services = services
self._context_data = context_data
self._data = data
class BoardsInterface(InvocationContextInterface):
@@ -173,26 +166,26 @@ class ImagesInterface(InvocationContextInterface):
metadata_ = None
if metadata:
metadata_ = metadata
elif isinstance(self._context_data.invocation, WithMetadata):
metadata_ = self._context_data.invocation.metadata
elif isinstance(self._data.invocation, WithMetadata):
metadata_ = self._data.invocation.metadata
# If `board_id` is provided directly, use that. Else, use the board provided by `WithBoard`, falling back to None.
board_id_ = None
if board_id:
board_id_ = board_id
elif isinstance(self._context_data.invocation, WithBoard) and self._context_data.invocation.board:
board_id_ = self._context_data.invocation.board.board_id
elif isinstance(self._data.invocation, WithBoard) and self._data.invocation.board:
board_id_ = self._data.invocation.board.board_id
return self._services.images.create(
image=image,
is_intermediate=self._context_data.invocation.is_intermediate,
is_intermediate=self._data.invocation.is_intermediate,
image_category=image_category,
board_id=board_id_,
metadata=metadata_,
image_origin=ResourceOrigin.INTERNAL,
workflow=self._context_data.workflow,
session_id=self._context_data.session_id,
node_id=self._context_data.invocation.id,
workflow=self._data.queue_item.workflow,
session_id=self._data.queue_item.session_id,
node_id=self._data.invocation.id,
)
def get_pil(self, image_name: str, mode: IMAGE_MODES | None = None) -> Image:
@@ -254,7 +247,7 @@ class ConditioningInterface(InvocationContextInterface):
"""
Saves a conditioning data object, returning its name.
:param conditioning_context_data: The conditioning data to save.
:param conditioning_data: The conditioning data to save.
"""
name = self._services.conditioning.save(obj=conditioning_data)
@@ -292,7 +285,7 @@ class ModelsInterface(InvocationContextInterface):
# the event payloads.
return self._services.model_manager.load_model_by_key(
key=key, submodel_type=submodel_type, context_data=self._context_data
key=key, submodel_type=submodel_type, context_data=self._data
)
def load_by_attrs(
@@ -311,7 +304,7 @@ class ModelsInterface(InvocationContextInterface):
base_model=base_model,
model_type=model_type,
submodel=submodel,
context_data=self._context_data,
context_data=self._data,
)
def get_config(self, key: str) -> AnyModelConfig:
@@ -370,6 +363,16 @@ class ConfigInterface(InvocationContextInterface):
class UtilInterface(InvocationContextInterface):
def __init__(
self, services: InvocationServices, data: InvocationContextData, cancel_event: threading.Event
) -> None:
super().__init__(services, data)
self._cancel_event = cancel_event
def is_canceled(self) -> bool:
"""Checks if the current invocation has been canceled."""
return self._cancel_event.is_set()
def sd_step_callback(self, intermediate_state: PipelineIntermediateState, base_model: BaseModelType) -> None:
"""
The step callback emits a progress event with the current step, the total number of
@@ -381,17 +384,12 @@ class UtilInterface(InvocationContextInterface):
:param base_model: The base model for the current denoising step.
"""
# The step callback needs access to the events and the invocation queue services, but this
# represents a dangerous level of access.
#
# We wrap the step callback so that nodes do not have direct access to these services.
stable_diffusion_step_callback(
context_data=self._context_data,
context_data=self._data,
intermediate_state=intermediate_state,
base_model=base_model,
invocation_queue=self._services.queue,
events=self._services.events,
is_canceled=self.is_canceled,
)
@@ -410,50 +408,51 @@ class InvocationContext:
config: ConfigInterface,
util: UtilInterface,
boards: BoardsInterface,
context_data: InvocationContextData,
data: InvocationContextData,
services: InvocationServices,
) -> None:
self.images = images
"""Provides methods to save, get and update images and their metadata."""
"""Methods to save, get and update images and their metadata."""
self.tensors = tensors
"""Provides methods to save and get tensors, including image, noise, masks, and masked images."""
"""Methods to save and get tensors, including image, noise, masks, and masked images."""
self.conditioning = conditioning
"""Provides methods to save and get conditioning data."""
"""Methods to save and get conditioning data."""
self.models = models
"""Provides methods to check if a model exists, get a model, and get a model's info."""
"""Methods to check if a model exists, get a model, and get a model's info."""
self.logger = logger
"""Provides access to the app logger."""
"""The app logger."""
self.config = config
"""Provides access to the app's config."""
"""The app config."""
self.util = util
"""Provides utility methods."""
"""Utility methods, including a method to check if an invocation was canceled and step callbacks."""
self.boards = boards
"""Provides methods to interact with boards."""
self._data = context_data
"""Provides data about the current queue item and invocation. This is an internal API and may change without warning."""
"""Methods to interact with boards."""
self._data = data
"""An internal API providing access to data about the current queue item and invocation. You probably shouldn't use this. It may change without warning."""
self._services = services
"""Provides access to the full application services. This is an internal API and may change without warning."""
"""An internal API providing access to all application services. You probably shouldn't use this. It may change without warning."""
def build_invocation_context(
services: InvocationServices,
context_data: InvocationContextData,
data: InvocationContextData,
cancel_event: threading.Event,
) -> InvocationContext:
"""
Builds the invocation context for a specific invocation execution.
:param invocation_services: The invocation services to wrap.
:param invocation_context_data: The invocation context data.
:param services: The invocation services to wrap.
:param data: The invocation context data.
"""
logger = LoggerInterface(services=services, context_data=context_data)
images = ImagesInterface(services=services, context_data=context_data)
tensors = TensorsInterface(services=services, context_data=context_data)
models = ModelsInterface(services=services, context_data=context_data)
config = ConfigInterface(services=services, context_data=context_data)
util = UtilInterface(services=services, context_data=context_data)
conditioning = ConditioningInterface(services=services, context_data=context_data)
boards = BoardsInterface(services=services, context_data=context_data)
logger = LoggerInterface(services=services, data=data)
images = ImagesInterface(services=services, data=data)
tensors = TensorsInterface(services=services, data=data)
models = ModelsInterface(services=services, data=data)
config = ConfigInterface(services=services, data=data)
util = UtilInterface(services=services, data=data, cancel_event=cancel_event)
conditioning = ConditioningInterface(services=services, data=data)
boards = BoardsInterface(services=services, data=data)
ctx = InvocationContext(
images=images,
@@ -461,7 +460,7 @@ def build_invocation_context(
config=config,
tensors=tensors,
models=models,
context_data=context_data,
data=data,
util=util,
conditioning=conditioning,
services=services,

View File

@@ -1,9 +1,9 @@
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Callable
import torch
from PIL import Image
from invokeai.app.services.invocation_processor.invocation_processor_common import CanceledException, ProgressImage
from invokeai.app.services.session_processor.session_processor_common import CanceledException, ProgressImage
from invokeai.backend.model_manager.config import BaseModelType
from ...backend.stable_diffusion import PipelineIntermediateState
@@ -11,7 +11,6 @@ from ...backend.util.util import image_to_dataURL
if TYPE_CHECKING:
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.invocation_queue.invocation_queue_base import InvocationQueueABC
from invokeai.app.services.shared.invocation_context import InvocationContextData
@@ -34,10 +33,10 @@ def stable_diffusion_step_callback(
context_data: "InvocationContextData",
intermediate_state: PipelineIntermediateState,
base_model: BaseModelType,
invocation_queue: "InvocationQueueABC",
events: "EventServiceBase",
is_canceled: Callable[[], bool],
) -> None:
if invocation_queue.is_canceled(context_data.session_id):
if is_canceled():
raise CanceledException
# Some schedulers report not only the noisy latents at the current timestep,
@@ -115,12 +114,12 @@ def stable_diffusion_step_callback(
dataURL = image_to_dataURL(image, image_format="JPEG")
events.emit_generator_progress(
queue_id=context_data.queue_id,
queue_item_id=context_data.queue_item_id,
queue_batch_id=context_data.batch_id,
graph_execution_state_id=context_data.session_id,
queue_id=context_data.queue_item.queue_id,
queue_item_id=context_data.queue_item.item_id,
queue_batch_id=context_data.queue_item.batch_id,
graph_execution_state_id=context_data.queue_item.session_id,
node_id=context_data.invocation.id,
source_node_id=context_data.source_node_id,
source_node_id=context_data.source_invocation_id,
progress_image=ProgressImage(width=width, height=height, dataURL=dataURL),
step=intermediate_state.step,
order=intermediate_state.order,

View File

@@ -1,8 +1,47 @@
import re
from typing import List, Tuple
import invokeai.backend.util.logging as logger
from invokeai.app.services.model_records import UnknownModelException
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.model_manager.config import BaseModelType, ModelType
from invokeai.backend.textual_inversion import TextualInversionModelRaw
def extract_ti_triggers_from_prompt(prompt: str) -> list[str]:
ti_triggers = []
def extract_ti_triggers_from_prompt(prompt: str) -> List[str]:
ti_triggers: List[str] = []
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt):
ti_triggers.append(trigger)
ti_triggers.append(str(trigger))
return ti_triggers
def generate_ti_list(
prompt: str, base: BaseModelType, context: InvocationContext
) -> List[Tuple[str, TextualInversionModelRaw]]:
ti_list: List[Tuple[str, TextualInversionModelRaw]] = []
for trigger in extract_ti_triggers_from_prompt(prompt):
name_or_key = trigger[1:-1]
try:
loaded_model = context.models.load(key=name_or_key)
model = loaded_model.model
assert isinstance(model, TextualInversionModelRaw)
assert loaded_model.config.base == base
ti_list.append((name_or_key, model))
except UnknownModelException:
try:
loaded_model = context.models.load_by_attrs(
model_name=name_or_key, base_model=base, model_type=ModelType.TextualInversion
)
model = loaded_model.model
assert isinstance(model, TextualInversionModelRaw)
assert loaded_model.config.base == base
ti_list.append((name_or_key, model))
except UnknownModelException:
pass
except ValueError:
logger.warning(f'trigger: "{trigger}" more than one similarly-named textual inversion models')
except AssertionError:
logger.warning(f'trigger: "{trigger}" not a valid textual inversion model for this graph')
except Exception:
logger.warning(f'Failed to load TI model for trigger: "{trigger}"')
return ti_list

View File

@@ -8,7 +8,6 @@ from invokeai.app.services.config import InvokeAIAppConfig
def check_invokeai_root(config: InvokeAIAppConfig):
try:
assert config.model_conf_path.exists(), f"{config.model_conf_path} not found"
assert config.db_path.parent.exists(), f"{config.db_path.parent} not found"
assert config.models_path.exists(), f"{config.models_path} not found"
if not config.ignore_missing_core_models:

View File

@@ -1,14 +1,11 @@
"""Utility (backend) functions used by model_install.py"""
import re
from logging import Logger
from pathlib import Path
from typing import Any, Dict, List, Optional
import omegaconf
from huggingface_hub import HfFolder
from pydantic import BaseModel, Field
from pydantic.dataclasses import dataclass
from pydantic.networks import AnyHttpUrl
from requests import HTTPError
from tqdm import tqdm
@@ -18,12 +15,8 @@ from invokeai.app.services.download import DownloadQueueService
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage
from invokeai.app.services.model_install import (
HFModelSource,
LocalModelSource,
ModelInstallService,
ModelInstallServiceBase,
ModelSource,
URLModelSource,
)
from invokeai.app.services.model_metadata import ModelMetadataStoreSQL
from invokeai.app.services.model_records import ModelRecordServiceBase, ModelRecordServiceSQL
@@ -31,7 +24,6 @@ from invokeai.app.services.shared.sqlite.sqlite_util import init_db
from invokeai.backend.model_manager import (
BaseModelType,
InvalidModelConfigException,
ModelRepoVariant,
ModelType,
)
from invokeai.backend.model_manager.metadata import UnknownMetadataException
@@ -226,37 +218,13 @@ class InstallHelper(object):
additional_models.append(reverse_source[requirement])
model_list.extend(additional_models)
def _make_install_source(self, model_info: UnifiedModelInfo) -> ModelSource:
assert model_info.source
model_path_id_or_url = model_info.source.strip("\"' ")
model_path = Path(model_path_id_or_url)
if model_path.exists(): # local file on disk
return LocalModelSource(path=model_path.absolute(), inplace=True)
# parsing huggingface repo ids
# we're going to do a little trick that allows for extended repo_ids of form "foo/bar:fp16"
variants = "|".join([x.lower() for x in ModelRepoVariant.__members__])
if match := re.match(f"^([^/]+/[^/]+?)(?::({variants}))?$", model_path_id_or_url):
repo_id = match.group(1)
repo_variant = ModelRepoVariant(match.group(2)) if match.group(2) else None
subfolder = Path(model_info.subfolder) if model_info.subfolder else None
return HFModelSource(
repo_id=repo_id,
access_token=HfFolder.get_token(),
subfolder=subfolder,
variant=repo_variant,
)
if re.match(r"^(http|https):", model_path_id_or_url):
return URLModelSource(url=AnyHttpUrl(model_path_id_or_url))
raise ValueError(f"Unsupported model source: {model_path_id_or_url}")
def add_or_delete(self, selections: InstallSelections) -> None:
"""Add or delete selected models."""
installer = self._installer
self._add_required_models(selections.install_models)
for model in selections.install_models:
source = self._make_install_source(model)
assert model.source
model_path_id_or_url = model.source.strip("\"' ")
config = (
{
"description": model.description,
@@ -267,12 +235,12 @@ class InstallHelper(object):
)
try:
installer.import_model(
source=source,
installer.heuristic_import(
source=model_path_id_or_url,
config=config,
)
except (UnknownMetadataException, InvalidModelConfigException, HTTPError, OSError) as e:
self._logger.warning(f"{source}: {e}")
self._logger.warning(f"{model.source}: {e}")
for model_to_remove in selections.remove_models:
parts = model_to_remove.split("/")

View File

@@ -939,7 +939,7 @@ def main() -> None:
# run this unconditionally in case new directories need to be added
initialize_rootdir(config.root_path, opt.yes_to_all)
# this will initialize the models.yaml file if not present
# this will initialize and populate the models tables if not present
install_helper = InstallHelper(config, logger)
models_to_download = default_user_selections(opt, install_helper)

View File

@@ -138,9 +138,16 @@ class ModelConfigBase(BaseModel):
source: Optional[str] = Field(description="model original source (path, URL or repo_id)", default=None)
last_modified: Optional[float] = Field(description="timestamp for modification time", default_factory=time.time)
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
schema["required"].extend(
["key", "base", "type", "format", "original_hash", "current_hash", "source", "last_modified"]
)
model_config = ConfigDict(
use_enum_values=False,
validate_assignment=True,
json_schema_extra=json_schema_extra,
)
def update(self, attributes: Dict[str, Any]) -> None:
@@ -227,37 +234,6 @@ class MainDiffusersConfig(_DiffusersConfig, _MainConfig):
type: Literal[ModelType.Main] = ModelType.Main
class ONNXSD1Config(_MainConfig):
"""Model config for ONNX format models based on sd-1."""
type: Literal[ModelType.ONNX] = ModelType.ONNX
format: Literal[ModelFormat.Onnx, ModelFormat.Olive]
base: Literal[BaseModelType.StableDiffusion1] = BaseModelType.StableDiffusion1
prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon
upcast_attention: bool = False
class ONNXSD2Config(_MainConfig):
"""Model config for ONNX format models based on sd-2."""
type: Literal[ModelType.ONNX] = ModelType.ONNX
format: Literal[ModelFormat.Onnx, ModelFormat.Olive]
# No yaml config file for ONNX, so these are part of config
base: Literal[BaseModelType.StableDiffusion2] = BaseModelType.StableDiffusion2
prediction_type: SchedulerPredictionType = SchedulerPredictionType.VPrediction
upcast_attention: bool = True
class ONNXSDXLConfig(_MainConfig):
"""Model config for ONNX format models based on sdxl."""
type: Literal[ModelType.ONNX] = ModelType.ONNX
format: Literal[ModelFormat.Onnx, ModelFormat.Olive]
# No yaml config file for ONNX, so these are part of config
base: Literal[BaseModelType.StableDiffusionXL] = BaseModelType.StableDiffusionXL
prediction_type: SchedulerPredictionType = SchedulerPredictionType.VPrediction
class IPAdapterConfig(ModelConfigBase):
"""Model config for IP Adaptor format models."""
@@ -280,7 +256,6 @@ class T2IConfig(ModelConfigBase):
format: Literal[ModelFormat.Diffusers]
_ONNXConfig = Annotated[Union[ONNXSD1Config, ONNXSD2Config, ONNXSDXLConfig], Field(discriminator="base")]
_ControlNetConfig = Annotated[
Union[ControlNetDiffusersConfig, ControlNetCheckpointConfig],
Field(discriminator="format"),
@@ -290,7 +265,6 @@ _MainModelConfig = Annotated[Union[MainDiffusersConfig, MainCheckpointConfig], F
AnyModelConfig = Union[
_MainModelConfig,
_ONNXConfig,
_VaeConfig,
_ControlNetConfig,
# ModelConfigBase,

View File

@@ -10,7 +10,7 @@ model will be cleared and (re)loaded from disk when next needed.
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from logging import Logger
from typing import Dict, Generic, Optional, Set, TypeVar
from typing import Dict, Generic, Optional, TypeVar
import torch
@@ -89,24 +89,8 @@ class ModelCacheBase(ABC, Generic[T]):
@property
@abstractmethod
def execution_devices(self) -> Set[torch.device]:
"""Return the set of available execution devices."""
pass
@abstractmethod
def acquire_execution_device(self, timeout: int = 0) -> torch.device:
"""
Pick the next available execution device.
If all devices are currently engaged (locked), then
block until timeout seconds have passed and raise a
TimeoutError if no devices are available.
"""
pass
@abstractmethod
def release_execution_device(self, device: torch.device) -> None:
"""Release a previously-acquired execution device."""
def execution_device(self) -> torch.device:
"""Return the exection device (e.g. "cuda" for VRAM)."""
pass
@property
@@ -127,7 +111,7 @@ class ModelCacheBase(ABC, Generic[T]):
pass
@abstractmethod
def move_model_to_device(self, cache_entry: CacheRecord[AnyModel], device: torch.device) -> None:
def move_model_to_device(self, cache_entry: CacheRecord[AnyModel], target_device: torch.device) -> None:
"""Move model into the indicated device."""
pass

View File

@@ -25,8 +25,7 @@ import sys
import time
from contextlib import suppress
from logging import Logger
from threading import BoundedSemaphore, Lock
from typing import Dict, List, Optional, Set
from typing import Dict, List, Optional
import torch
@@ -62,8 +61,8 @@ class ModelCache(ModelCacheBase[AnyModel]):
self,
max_cache_size: float = DEFAULT_MAX_CACHE_SIZE,
max_vram_cache_size: float = DEFAULT_MAX_VRAM_CACHE_SIZE,
execution_device: torch.device = torch.device("cuda"),
storage_device: torch.device = torch.device("cpu"),
execution_devices: Optional[Set[torch.device]] = None,
precision: torch.dtype = torch.float16,
sequential_offload: bool = False,
lazy_offloading: bool = True,
@@ -75,7 +74,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
Initialize the model RAM cache.
:param max_cache_size: Maximum size of the RAM cache [6.0 GB]
:param execution_devices: Set of torch device to load active model into [calculated]
:param execution_device: Torch device to load active model into [torch.device('cuda')]
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
:param precision: Precision for loaded models [torch.float16]
:param lazy_offloading: Keep model in VRAM until another model needs to be loaded
@@ -90,7 +89,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
self._precision: torch.dtype = precision
self._max_cache_size: float = max_cache_size
self._max_vram_cache_size: float = max_vram_cache_size
self._execution_devices: Set[torch.device] = execution_devices or self._get_execution_devices()
self._execution_device: torch.device = execution_device
self._storage_device: torch.device = storage_device
self._logger = logger or InvokeAILogger.get_logger(self.__class__.__name__)
self._log_memory_usage = log_memory_usage or self._logger.level == logging.DEBUG
@@ -100,10 +99,6 @@ class ModelCache(ModelCacheBase[AnyModel]):
self._cached_models: Dict[str, CacheRecord[AnyModel]] = {}
self._cache_stack: List[str] = []
self._lock = Lock()
self._free_execution_device = BoundedSemaphore(len(self._execution_devices))
self._busy_execution_devices: Set[torch.device] = set()
@property
def logger(self) -> Logger:
"""Return the logger used by the cache."""
@@ -120,24 +115,9 @@ class ModelCache(ModelCacheBase[AnyModel]):
return self._storage_device
@property
def execution_devices(self) -> Set[torch.device]:
"""Return the set of available execution devices."""
return self._execution_devices
def acquire_execution_device(self, timeout: int = 0) -> torch.device:
"""Acquire and return an execution device (e.g. "cuda" for VRAM)."""
with self._lock:
self._free_execution_device.acquire(timeout=timeout)
free_devices = self.execution_devices - self._busy_execution_devices
chosen_device = list(free_devices)[0]
self._busy_execution_devices.add(chosen_device)
return chosen_device
def release_execution_device(self, device: torch.device) -> None:
"""Mark this execution device as unused."""
with self._lock:
self._free_execution_device.release()
self._busy_execution_devices.remove(device)
def execution_device(self) -> torch.device:
"""Return the exection device (e.g. "cuda" for VRAM)."""
return self._execution_device
@property
def max_cache_size(self) -> float:
@@ -265,7 +245,13 @@ class ModelCache(ModelCacheBase[AnyModel]):
mps.empty_cache()
def move_model_to_device(self, cache_entry: CacheRecord[AnyModel], target_device: torch.device) -> None:
"""Move model into the indicated device."""
"""Move model into the indicated device.
:param cache_entry: The CacheRecord for the model
:param target_device: The torch.device to move the model into
May raise a torch.cuda.OutOfMemoryError
"""
# These attributes are not in the base ModelMixin class but in various derived classes.
# Some models don't have these attributes, in which case they run in RAM/CPU.
self.logger.debug(f"Called to move {cache_entry.key} to {target_device}")
@@ -279,6 +265,9 @@ class ModelCache(ModelCacheBase[AnyModel]):
if torch.device(source_device).type == torch.device(target_device).type:
return
# may raise an exception here if insufficient GPU VRAM
self._check_free_vram(target_device, cache_entry.size)
start_model_to_time = time.time()
snapshot_before = self._capture_memory_snapshot()
cache_entry.model.to(target_device)
@@ -426,12 +415,12 @@ class ModelCache(ModelCacheBase[AnyModel]):
self.logger.debug(f"After making room: cached_models={len(self._cached_models)}")
@staticmethod
def _get_execution_devices() -> Set[torch.device]:
default_device = choose_torch_device()
if default_device != torch.device("cuda"):
return {default_device}
# we get here if the default device is cuda, and return each of the
# cuda devices.
return {torch.device(f"cuda:{x}") for x in range(0, torch.cuda.device_count())}
def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None:
if target_device.type != "cuda":
return
vram_device = ( # mem_get_info() needs an indexed device
target_device if target_device.index is not None else torch.device(str(target_device), index=0)
)
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))
if needed_size > free_mem:
raise torch.cuda.OutOfMemoryError

View File

@@ -2,16 +2,12 @@
Base class and implementation of a class that moves models in and out of VRAM.
"""
from typing import Optional
import torch
from invokeai.backend.model_manager import AnyModel
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase
MAX_GPU_WAIT = 600 # wait up to 10 minutes for a GPU to become free
class ModelLocker(ModelLockerBase):
"""Internal class that mediates movement in and out of GPU."""
@@ -25,7 +21,6 @@ class ModelLocker(ModelLockerBase):
"""
self._cache = cache
self._cache_entry = cache_entry
self._execution_device: Optional[torch.device] = None
@property
def model(self) -> AnyModel:
@@ -44,14 +39,15 @@ class ModelLocker(ModelLockerBase):
if self._cache.lazy_offloading:
self._cache.offload_unlocked_models(self._cache_entry.size)
# We wait for a gpu to be free - may raise a TimeoutError
self._execution_device = self._cache.acquire_execution_device(MAX_GPU_WAIT)
self._cache.move_model_to_device(self._cache_entry, self._execution_device)
self._cache.move_model_to_device(self._cache_entry, self._cache.execution_device)
self._cache_entry.loaded = True
self._cache.logger.debug(f"Locking {self._cache_entry.key} in {self._execution_device}")
self._cache.logger.debug(f"Locking {self._cache_entry.key} in {self._cache.execution_device}")
self._cache.print_cuda_stats()
except torch.cuda.OutOfMemoryError:
self._cache.logger.warning("Insufficient GPU memory to load model. Aborting")
self._cache_entry.unlock()
raise
except Exception:
self._cache_entry.unlock()
raise
@@ -63,8 +59,6 @@ class ModelLocker(ModelLockerBase):
return
self._cache_entry.unlock()
if self._execution_device:
self._cache.release_execution_device(self._execution_device)
if not self._cache.lazy_offloading:
self._cache.offload_unlocked_models(self._cache_entry.size)
self._cache.print_cuda_stats()

View File

@@ -1,7 +1,7 @@
"""
invokeai.backend.model_manager.merge exports:
merge_diffusion_models() -- combine multiple models by location and return a pipeline object
merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to models.yaml
merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to the models tables
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
"""
@@ -101,7 +101,7 @@ class ModelMerger(object):
**kwargs: Any,
) -> AnyModelConfig:
"""
:param models: up to three models, designated by their InvokeAI models.yaml model name
:param models: up to three models, designated by their registered InvokeAI model name
:param merged_model_name: name for new model
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2

View File

@@ -160,7 +160,7 @@ class CivitaiMetadataFetch(ModelMetadataFetchBase):
nsfw=model_json["nsfw"],
restrictions=LicenseRestrictions(
AllowNoCredit=model_json["allowNoCredit"],
AllowCommercialUse=CommercialUsage(model_json["allowCommercialUse"]),
AllowCommercialUse={CommercialUsage(x) for x in model_json["allowCommercialUse"]},
AllowDerivatives=model_json["allowDerivatives"],
AllowDifferentLicense=model_json["allowDifferentLicense"],
),

View File

@@ -54,8 +54,8 @@ class LicenseRestrictions(BaseModel):
AllowDifferentLicense: bool = Field(
description="if true, derivatives of this model be redistributed under a different license", default=False
)
AllowCommercialUse: Optional[CommercialUsage] = Field(
description="Type of commercial use allowed or 'No' if no commercial use is allowed.", default=None
AllowCommercialUse: Optional[Set[CommercialUsage] | CommercialUsage] = Field(
description="Type of commercial use allowed if no commercial use is allowed.", default=None
)
@@ -142,7 +142,10 @@ class CivitaiMetadata(ModelMetadataWithFiles):
if self.restrictions.AllowCommercialUse is None:
return False
else:
return self.restrictions.AllowCommercialUse != CommercialUsage("None")
# accommodate schema change
acu = self.restrictions.AllowCommercialUse
commercial_usage = acu if isinstance(acu, set) else {acu}
return CommercialUsage.No not in commercial_usage
@property
def allow_derivatives(self) -> bool:

View File

@@ -188,7 +188,7 @@ class ModelProbe(object):
and fields["prediction_type"] == SchedulerPredictionType.VPrediction
)
model_info = ModelConfigFactory.make_config(fields)
model_info = ModelConfigFactory.make_config(fields, key=fields.get("key", None))
return model_info
@classmethod

View File

@@ -28,6 +28,7 @@ from typing import Callable, Optional, Set, Union
from pydantic import BaseModel, Field
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.logging import InvokeAILogger
default_logger: Logger = InvokeAILogger.get_logger()
@@ -117,13 +118,10 @@ class ModelSearch(ModelSearchBase):
"""
models_found: Set[Path] = Field(default_factory=set)
scanned_dirs: Set[Path] = Field(default_factory=set)
pruned_paths: Set[Path] = Field(default_factory=set)
config: InvokeAIAppConfig = InvokeAIAppConfig.get_config()
def search_started(self) -> None:
self.models_found = set()
self.scanned_dirs = set()
self.pruned_paths = set()
if self.on_search_started:
self.on_search_started(self._directory)
@@ -139,53 +137,53 @@ class ModelSearch(ModelSearchBase):
def search(self, directory: Union[Path, str]) -> Set[Path]:
self._directory = Path(directory)
if not self._directory.is_absolute():
self._directory = self.config.models_path / self._directory
self.stats = SearchStats() # zero out
self.search_started() # This will initialize _models_found to empty
self._walk_directory(directory)
self._walk_directory(self._directory)
self.search_completed()
return self.models_found
def _walk_directory(self, path: Union[Path, str]) -> None:
for root, dirs, files in os.walk(path, followlinks=True):
# don't descend into directories that start with a "."
# to avoid the Mac .DS_STORE issue.
if str(Path(root).name).startswith("."):
self.pruned_paths.add(Path(root))
if any(Path(root).is_relative_to(x) for x in self.pruned_paths):
continue
def _walk_directory(self, path: Union[Path, str], max_depth: int = 20) -> None:
absolute_path = Path(path)
if (
len(absolute_path.parts) - len(self._directory.parts) > max_depth
or not absolute_path.exists()
or absolute_path.parent in self.models_found
):
return
entries = os.scandir(absolute_path.as_posix())
entries = [entry for entry in entries if not entry.name.startswith(".")]
dirs = [entry for entry in entries if entry.is_dir()]
file_names = [entry.name for entry in entries if entry.is_file()]
if any(
x in file_names
for x in [
"config.json",
"model_index.json",
"learned_embeds.bin",
"pytorch_lora_weights.bin",
"image_encoder.txt",
]
):
try:
self.model_found(absolute_path)
return
except KeyboardInterrupt:
raise
except Exception as e:
self.logger.warning(str(e))
return
self.stats.items_scanned += len(dirs) + len(files)
for d in dirs:
path = Path(root) / d
if path.parent in self.scanned_dirs:
self.scanned_dirs.add(path)
continue
if any(
(path / x).exists()
for x in [
"config.json",
"model_index.json",
"learned_embeds.bin",
"pytorch_lora_weights.bin",
"image_encoder.txt",
]
):
self.scanned_dirs.add(path)
try:
self.model_found(path)
except KeyboardInterrupt:
raise
except Exception as e:
self.logger.warning(str(e))
for n in file_names:
if n.endswith((".ckpt", ".bin", ".pth", ".safetensors", ".pt")):
try:
self.model_found(absolute_path / n)
except KeyboardInterrupt:
raise
except Exception as e:
self.logger.warning(str(e))
for f in files:
path = Path(root) / f
if path.parent in self.scanned_dirs:
continue
if path.suffix in {".ckpt", ".bin", ".pth", ".safetensors", ".pt"}:
try:
self.model_found(path)
except KeyboardInterrupt:
raise
except Exception as e:
self.logger.warning(str(e))
for d in dirs:
self._walk_directory(absolute_path / d)

View File

@@ -4,12 +4,12 @@ from __future__ import annotations
import pickle
from contextlib import contextmanager
from typing import Any, Dict, Iterator, List, Optional, Tuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers import OnnxRuntimeModel, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTokenizer
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager import AnyModel
@@ -168,7 +168,7 @@ class ModelPatcher:
def apply_ti(
cls,
tokenizer: CLIPTokenizer,
text_encoder: CLIPTextModel,
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
ti_list: List[Tuple[str, TextualInversionModelRaw]],
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
init_tokens_count = None
@@ -265,7 +265,7 @@ class ModelPatcher:
@contextmanager
def apply_clip_skip(
cls,
text_encoder: CLIPTextModel,
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
clip_skip: int,
) -> None:
skipped_layers = []

View File

@@ -86,6 +86,7 @@ class AddsMaskGuidance:
mask_latents: torch.FloatTensor
scheduler: SchedulerMixin
noise: torch.Tensor
gradient_mask: bool
def __call__(self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning) -> BaseOutput:
output_class = step_output.__class__ # We'll create a new one with masked data.
@@ -121,7 +122,12 @@ class AddsMaskGuidance:
# TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already?
# mask_latents = self.scheduler.scale_model_input(mask_latents, t)
mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size)
masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype))
if self.gradient_mask:
threshhold = (t.item()) / self.scheduler.config.num_train_timesteps
mask_bool = mask > threshhold # I don't know when mask got inverted, but it did
masked_input = torch.where(mask_bool, latents, mask_latents)
else:
masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype))
return masked_input
@@ -335,6 +341,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
t2i_adapter_data: Optional[list[T2IAdapterData]] = None,
mask: Optional[torch.Tensor] = None,
masked_latents: Optional[torch.Tensor] = None,
gradient_mask: Optional[bool] = False,
seed: Optional[int] = None,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
if init_timestep.shape[0] == 0:
@@ -375,7 +382,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
self._unet_forward, mask, masked_latents
)
else:
additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise))
additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise, gradient_mask))
try:
latents, attention_map_saver = self.generate_latents_from_embeddings(
@@ -392,7 +399,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
self.invokeai_diffuser.model_forward_callback = self._unet_forward
# restore unmasked part
if mask is not None:
if mask is not None and not gradient_mask:
latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype))
return latents, attention_map_saver

View File

@@ -120,7 +120,7 @@ def parse_args() -> Namespace:
"--model",
type=str,
default="sd-1/main/stable-diffusion-v1-5",
help="Name of the diffusers model to train against, as defined in configs/models.yaml.",
help="Name of the diffusers model to train against.",
)
model_group.add_argument(
"--revision",

View File

@@ -34,7 +34,7 @@ sd-1/main/Analog-Diffusion:
recommended: False
sd-1/main/Deliberate:
description: Versatile model that produces detailed images up to 768px (4.27 GB)
source: XpucT/Deliberate
source: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors?download=true
recommended: False
sd-1/main/Dungeons-and-Diffusion:
description: Dungeons & Dragons characters (2.13 GB)

View File

@@ -455,7 +455,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
selections = self.parentApp.install_selections
all_models = self.all_models
# Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove
# Defined models (in INITIAL_CONFIG.yaml or invokeai.db) to add/remove
ui_sections = [
self.starter_pipelines,
self.pipeline_models,

View File

@@ -435,7 +435,7 @@ def main():
run_cli(args)
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge")
logger.error("You need to have at least two diffusers models in order to merge")
else:
logger.error("Not enough room for the user interface. Try making this window larger.")
sys.exit(-1)

4
invokeai/frontend/training/textual_inversion.py Executable file → Normal file
View File

@@ -261,7 +261,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
def validate_field_values(self) -> bool:
bad_fields = []
if self.model.value is None:
bad_fields.append("Model Name must correspond to a known model in models.yaml")
bad_fields.append("Model Name must correspond to a known model in invokeai.db")
if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value):
bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen")
if self.train_data_dir.value is None:
@@ -442,7 +442,7 @@ def main() -> None:
pass
except (widget.NotEnoughSpaceForWidget, Exception) as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("You need to have at least one diffusers models defined in models.yaml in order to train")
logger.error("You need to have at least one diffusers models defined in invokeai.db in order to train")
elif str(e).startswith("addwstr"):
logger.error("Not enough window space for the interface. Please make your window larger and try again.")
else:

View File

@@ -1,454 +0,0 @@
#!/usr/bin/env python
"""
This is the frontend to "textual_inversion_training.py".
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
"""
import os
import re
import shutil
import sys
import traceback
from argparse import Namespace
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import npyscreen
from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.install.install_helper import initialize_installer
from invokeai.backend.model_manager import ModelType
from invokeai.backend.training import do_textual_inversion_training, parse_args
TRAINING_DATA = "text-inversion-training-data"
TRAINING_DIR = "text-inversion-output"
CONF_FILE = "preferences.conf"
config = None
class textualInversionForm(npyscreen.FormMultiPageAction):
resolutions = [512, 768, 1024]
lr_schedulers = [
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup",
]
precisions = ["no", "fp16", "bf16"]
learnable_properties = ["object", "style"]
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
self.saved_args = saved_args or {}
super().__init__(parentApp, name)
def afterEditing(self) -> None:
self.parentApp.setNextForm(None)
def create(self) -> None:
self.model_names, default = self.get_model_names()
default_initializer_token = ""
default_placeholder_token = ""
saved_args = self.saved_args
assert config is not None
try:
default = self.model_names.index(saved_args["model"])
except Exception:
pass
self.add_widget_intelligent(
npyscreen.FixedText,
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields, cursor arrows to make a selection, and space to toggle checkboxes.",
editable=False,
)
self.model = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Model Name:",
values=sorted(self.model_names),
value=default,
max_height=len(self.model_names) + 1,
scroll_exit=True,
)
self.placeholder_token = self.add_widget_intelligent(
npyscreen.TitleText,
name="Trigger Term:",
value="", # saved_args.get('placeholder_token',''), # to restore previous term
scroll_exit=True,
)
self.placeholder_token.when_value_edited = self.initializer_changed
self.nextrely -= 1
self.nextrelx += 30
self.prompt_token = self.add_widget_intelligent(
npyscreen.FixedText,
name="Trigger term for use in prompt",
value="",
editable=False,
scroll_exit=True,
)
self.nextrelx -= 30
self.initializer_token = self.add_widget_intelligent(
npyscreen.TitleText,
name="Initializer:",
value=saved_args.get("initializer_token", default_initializer_token),
scroll_exit=True,
)
self.resume_from_checkpoint = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Resume from last saved checkpoint",
value=False,
scroll_exit=True,
)
self.learnable_property = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Learnable property:",
values=self.learnable_properties,
value=self.learnable_properties.index(saved_args.get("learnable_property", "object")),
max_height=4,
scroll_exit=True,
)
self.train_data_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name="Data Training Directory:",
select_dir=True,
must_exist=False,
value=str(
saved_args.get(
"train_data_dir",
config.root_dir / TRAINING_DATA / default_placeholder_token,
)
),
scroll_exit=True,
)
self.output_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name="Output Destination Directory:",
select_dir=True,
must_exist=False,
value=str(
saved_args.get(
"output_dir",
config.root_dir / TRAINING_DIR / default_placeholder_token,
)
),
scroll_exit=True,
)
self.resolution = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Image resolution (pixels):",
values=self.resolutions,
value=self.resolutions.index(saved_args.get("resolution", 512)),
max_height=4,
scroll_exit=True,
)
self.center_crop = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Center crop images before resizing to resolution",
value=saved_args.get("center_crop", False),
scroll_exit=True,
)
self.mixed_precision = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Mixed Precision:",
values=self.precisions,
value=self.precisions.index(saved_args.get("mixed_precision", "fp16")),
max_height=4,
scroll_exit=True,
)
self.num_train_epochs = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Number of training epochs:",
out_of=1000,
step=50,
lowest=1,
value=saved_args.get("num_train_epochs", 100),
scroll_exit=True,
)
self.max_train_steps = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Max Training Steps:",
out_of=10000,
step=500,
lowest=1,
value=saved_args.get("max_train_steps", 3000),
scroll_exit=True,
)
self.train_batch_size = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Batch Size (reduce if you run out of memory):",
out_of=50,
step=1,
lowest=1,
value=saved_args.get("train_batch_size", 8),
scroll_exit=True,
)
self.gradient_accumulation_steps = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):",
out_of=10,
step=1,
lowest=1,
value=saved_args.get("gradient_accumulation_steps", 4),
scroll_exit=True,
)
self.lr_warmup_steps = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Warmup Steps:",
out_of=100,
step=1,
lowest=0,
value=saved_args.get("lr_warmup_steps", 0),
scroll_exit=True,
)
self.learning_rate = self.add_widget_intelligent(
npyscreen.TitleText,
name="Learning Rate:",
value=str(
saved_args.get("learning_rate", "5.0e-04"),
),
scroll_exit=True,
)
self.scale_lr = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Scale learning rate by number GPUs, steps and batch size",
value=saved_args.get("scale_lr", True),
scroll_exit=True,
)
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Use xformers acceleration",
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
scroll_exit=True,
)
self.lr_scheduler = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Learning rate scheduler:",
values=self.lr_schedulers,
max_height=7,
value=self.lr_schedulers.index(saved_args.get("lr_scheduler", "constant")),
scroll_exit=True,
)
self.model.editing = True
def initializer_changed(self) -> None:
placeholder = self.placeholder_token.value
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder)
self.resume_from_checkpoint.value = Path(self.output_dir.value).exists()
def on_ok(self):
if self.validate_field_values():
self.parentApp.setNextForm(None)
self.editing = False
self.parentApp.ti_arguments = self.marshall_arguments()
npyscreen.notify("Launching textual inversion training. This will take a while...")
else:
self.editing = True
def ok_cancel(self):
sys.exit(0)
def validate_field_values(self) -> bool:
bad_fields = []
if self.model.value is None:
bad_fields.append("Model Name must correspond to a known model in models.yaml")
if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value):
bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen")
if self.train_data_dir.value is None:
bad_fields.append("Data Training Directory cannot be empty")
if self.output_dir.value is None:
bad_fields.append("The Output Destination Directory cannot be empty")
if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:"
for problem in bad_fields:
message += f"\n* {problem}"
npyscreen.notify_confirm(message)
return False
else:
return True
def get_model_names(self) -> Tuple[List[str], int]:
global config
assert config is not None
installer = initialize_installer(config)
store = installer.record_store
main_models = store.search_by_attr(model_type=ModelType.Main)
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
default = 0
return (model_names, default)
def marshall_arguments(self) -> dict:
args = {}
# the choices
args.update(
model=self.model_names[self.model.value[0]],
resolution=self.resolutions[self.resolution.value[0]],
lr_scheduler=self.lr_schedulers[self.lr_scheduler.value[0]],
mixed_precision=self.precisions[self.mixed_precision.value[0]],
learnable_property=self.learnable_properties[self.learnable_property.value[0]],
)
# all the strings and booleans
for attr in (
"initializer_token",
"placeholder_token",
"train_data_dir",
"output_dir",
"scale_lr",
"center_crop",
"enable_xformers_memory_efficient_attention",
):
args[attr] = getattr(self, attr).value
# all the integers
for attr in (
"train_batch_size",
"gradient_accumulation_steps",
"num_train_epochs",
"max_train_steps",
"lr_warmup_steps",
):
args[attr] = int(getattr(self, attr).value)
# the floats (just one)
args.update(learning_rate=float(self.learning_rate.value))
# a special case
if self.resume_from_checkpoint.value and Path(self.output_dir.value).exists():
args["resume_from_checkpoint"] = "latest"
return args
class MyApplication(npyscreen.NPSAppManaged):
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
super().__init__()
self.ti_arguments = None
self.saved_args = saved_args
def onStart(self):
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
self.main = self.addForm(
"MAIN",
textualInversionForm,
name="Textual Inversion Settings",
saved_args=self.saved_args,
)
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
"""
Copy learned_embeds.bin into the embeddings folder, and offer to
delete the full model and checkpoints.
"""
assert config is not None
source = Path(args["output_dir"], "learned_embeds.bin")
dest_dir_name = args["placeholder_token"].strip("<>")
destination = config.root_dir / "embeddings" / dest_dir_name
os.makedirs(destination, exist_ok=True)
logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
shutil.copy(source, destination)
if (input("Delete training logs and intermediate checkpoints? [y] ") or "y").startswith(("y", "Y")):
shutil.rmtree(Path(args["output_dir"]))
else:
logger.info(f'Keeping {args["output_dir"]}')
def save_args(args: dict) -> None:
"""
Save the current argument values to an omegaconf file
"""
assert config is not None
dest_dir = config.root_dir / TRAINING_DIR
os.makedirs(dest_dir, exist_ok=True)
conf_file = dest_dir / CONF_FILE
conf = OmegaConf.create(args)
OmegaConf.save(config=conf, f=conf_file)
def previous_args() -> dict:
"""
Get the previous arguments used.
"""
assert config is not None
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
try:
conf = OmegaConf.load(conf_file)
conf["placeholder_token"] = conf["placeholder_token"].strip("<>")
except Exception:
conf = None
return conf
def do_front_end() -> None:
global config
saved_args = previous_args()
myapplication = MyApplication(saved_args=saved_args)
myapplication.run()
if my_args := myapplication.ti_arguments:
os.makedirs(my_args["output_dir"], exist_ok=True)
# Automatically add angle brackets around the trigger
if not re.match("^<.+>$", my_args["placeholder_token"]):
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
my_args["only_save_embeds"] = True
save_args(my_args)
try:
print(my_args)
do_textual_inversion_training(config, **my_args)
copy_to_embeddings_folder(my_args)
except Exception as e:
logger.error("An exception occurred during training. The exception was:")
logger.error(str(e))
logger.error("DETAILS:")
logger.error(traceback.format_exc())
def main() -> None:
global config
args: Namespace = parse_args()
config = InvokeAIAppConfig.get_config()
config.parse_args([])
# change root if needed
if args.root_dir:
config.root = args.root_dir
try:
if args.front_end:
do_front_end()
else:
do_textual_inversion_training(config, **vars(args))
except AssertionError as e:
logger.error(e)
sys.exit(-1)
except KeyboardInterrupt:
pass
except (widget.NotEnoughSpaceForWidget, Exception) as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("You need to have at least one diffusers models defined in models.yaml in order to train")
elif str(e).startswith("addwstr"):
logger.error("Not enough window space for the interface. Please make your window larger and try again.")
else:
logger.error(e)
sys.exit(-1)
if __name__ == "__main__":
main()

View File

@@ -1,9 +0,0 @@
{
"entry": ["src/main.tsx"],
"extensions": [".ts", ".tsx"],
"ignorePatterns": ["**/node_modules/**", "dist/**", "public/**", "**/*.stories.tsx", "config/**"],
"ignoreUnresolved": [],
"ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"],
"respectGitignore": true,
"ignoreUnused": []
}

View File

@@ -0,0 +1,27 @@
import type { KnipConfig } from 'knip';
const config: KnipConfig = {
ignore: [
// This file is only used during debugging
'src/app/store/middleware/debugLoggerMiddleware.ts',
// Autogenerated types - shouldn't ever touch these
'src/services/api/schema.ts',
],
ignoreBinaries: ['only-allow'],
rules: {
files: 'warn',
dependencies: 'warn',
unlisted: 'warn',
binaries: 'warn',
unresolved: 'warn',
exports: 'warn',
types: 'warn',
nsExports: 'warn',
nsTypes: 'warn',
enumMembers: 'warn',
classMembers: 'warn',
duplicates: 'warn',
},
};
export default config;

View File

@@ -24,16 +24,16 @@
"build": "pnpm run lint && vite build",
"typegen": "node scripts/typegen.js",
"preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx",
"lint:knip": "knip",
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx",
"lint:eslint": "eslint --max-warnings=0 .",
"lint:prettier": "prettier --check .",
"lint:tsc": "tsc --noEmit",
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"",
"fix": "eslint --fix . && prettier --log-level warn --write .",
"lint": "concurrently -g -c red,green,yellow,blue,magenta pnpm:lint:*",
"fix": "knip --fix && eslint --fix . && prettier --log-level warn --write .",
"preinstall": "npx only-allow pnpm",
"storybook": "storybook dev -p 6006",
"build-storybook": "storybook build",
"unimported": "npx unimported",
"test": "vitest",
"test:no-watch": "vitest --no-watch"
},
@@ -57,54 +57,51 @@
"@dnd-kit/sortable": "^8.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@fontsource-variable/inter": "^5.0.16",
"@invoke-ai/ui-library": "^0.0.18",
"@mantine/form": "6.0.21",
"@nanostores/react": "^0.7.1",
"@reduxjs/toolkit": "2.0.1",
"@invoke-ai/ui-library": "^0.0.21",
"@nanostores/react": "^0.7.2",
"@reduxjs/toolkit": "2.2.1",
"@roarr/browser-log-writer": "^1.3.0",
"chakra-react-select": "^4.7.6",
"compare-versions": "^6.1.0",
"dateformat": "^5.0.3",
"framer-motion": "^10.18.0",
"i18next": "^23.7.16",
"i18next-http-backend": "^2.4.2",
"framer-motion": "^11.0.6",
"i18next": "^23.10.0",
"i18next-http-backend": "^2.5.0",
"idb-keyval": "^6.2.1",
"jsondiffpatch": "^0.6.0",
"konva": "^9.3.1",
"konva": "^9.3.3",
"lodash-es": "^4.17.21",
"nanostores": "^0.9.5",
"nanostores": "^0.10.0",
"new-github-issue-url": "^1.0.0",
"overlayscrollbars": "^2.4.6",
"overlayscrollbars-react": "^0.5.3",
"query-string": "^8.1.0",
"overlayscrollbars": "^2.5.0",
"overlayscrollbars-react": "^0.5.4",
"query-string": "^9.0.0",
"react": "^18.2.0",
"react-colorful": "^5.6.1",
"react-dom": "^18.2.0",
"react-dropzone": "^14.2.3",
"react-error-boundary": "^4.0.12",
"react-hook-form": "^7.49.3",
"react-hotkeys-hook": "4.4.4",
"react-i18next": "^14.0.0",
"react-hook-form": "^7.50.1",
"react-hotkeys-hook": "4.5.0",
"react-i18next": "^14.0.5",
"react-icons": "^5.0.1",
"react-konva": "^18.2.10",
"react-redux": "9.1.0",
"react-resizable-panels": "^1.0.9",
"react-resizable-panels": "^2.0.11",
"react-select": "5.8.0",
"react-textarea-autosize": "^8.5.3",
"react-use": "^17.4.3",
"react-virtuoso": "^4.6.2",
"reactflow": "^11.10.2",
"react-use": "^17.5.0",
"react-virtuoso": "^4.7.1",
"reactflow": "^11.10.4",
"redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^5.1.0",
"roarr": "^7.21.0",
"serialize-error": "^11.0.3",
"socket.io-client": "^4.7.4",
"type-fest": "^4.9.0",
"use-debounce": "^10.0.0",
"use-image": "^1.1.1",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zod-validation-error": "^3.0.0"
"zod-validation-error": "^3.0.2"
},
"peerDependencies": {
"@chakra-ui/react": "^2.8.2",
@@ -113,59 +110,42 @@
"ts-toolbelt": "^9.6.0"
},
"devDependencies": {
"@arthurgeron/eslint-plugin-react-usememo": "^2.2.3",
"@invoke-ai/eslint-config-react": "^0.0.13",
"@invoke-ai/prettier-config-react": "^0.0.6",
"@storybook/addon-docs": "^7.6.10",
"@storybook/addon-essentials": "^7.6.10",
"@storybook/addon-interactions": "^7.6.10",
"@storybook/addon-links": "^7.6.10",
"@storybook/addon-storysource": "^7.6.10",
"@storybook/blocks": "^7.6.10",
"@storybook/manager-api": "^7.6.10",
"@storybook/react": "^7.6.10",
"@storybook/react-vite": "^7.6.10",
"@storybook/test": "^7.6.10",
"@storybook/theming": "^7.6.10",
"@invoke-ai/eslint-config-react": "^0.0.14",
"@invoke-ai/prettier-config-react": "^0.0.7",
"@storybook/addon-essentials": "^7.6.17",
"@storybook/addon-interactions": "^7.6.17",
"@storybook/addon-links": "^7.6.17",
"@storybook/addon-storysource": "^7.6.17",
"@storybook/manager-api": "^7.6.17",
"@storybook/react": "^7.6.17",
"@storybook/react-vite": "^7.6.17",
"@storybook/theming": "^7.6.17",
"@types/dateformat": "^5.0.2",
"@types/lodash-es": "^4.17.12",
"@types/node": "^20.11.5",
"@types/react": "^18.2.48",
"@types/react-dom": "^18.2.18",
"@types/uuid": "^9.0.7",
"@typescript-eslint/eslint-plugin": "^6.19.0",
"@typescript-eslint/parser": "^6.19.0",
"@vitejs/plugin-react-swc": "^3.5.0",
"@types/node": "^20.11.20",
"@types/react": "^18.2.59",
"@types/react-dom": "^18.2.19",
"@types/uuid": "^9.0.8",
"@vitejs/plugin-react-swc": "^3.6.0",
"concurrently": "^8.2.2",
"eslint": "^8.56.0",
"eslint-config-prettier": "^9.1.0",
"dpdm": "^3.14.0",
"eslint": "^8.57.0",
"eslint-plugin-i18next": "^6.0.3",
"eslint-plugin-import": "^2.29.1",
"eslint-plugin-path": "^1.2.4",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-simple-import-sort": "^10.0.0",
"eslint-plugin-storybook": "^0.6.15",
"eslint-plugin-unused-imports": "^3.0.0",
"madge": "^6.1.0",
"knip": "^5.0.2",
"openapi-types": "^12.1.3",
"openapi-typescript": "^6.7.3",
"prettier": "^3.2.4",
"openapi-typescript": "^6.7.4",
"prettier": "^3.2.5",
"rollup-plugin-visualizer": "^5.12.0",
"storybook": "^7.6.10",
"storybook": "^7.6.17",
"ts-toolbelt": "^9.6.0",
"tsafe": "^1.6.6",
"typescript": "^5.3.3",
"vite": "^5.0.12",
"vite-plugin-css-injected-by-js": "^3.3.1",
"vite-plugin-dts": "^3.7.1",
"vite": "^5.1.4",
"vite-plugin-css-injected-by-js": "^3.4.0",
"vite-plugin-dts": "^3.7.3",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.3.1",
"vitest": "^1.2.2"
},
"pnpm": {
"patchedDependencies": {
"reselect@5.0.1": "patches/reselect@5.0.1.patch"
}
"vitest": "^1.3.1"
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -81,7 +81,7 @@
"outputs": "Ausgabe",
"data": "Daten",
"safetensors": "Safe-Tensors",
"outpaint": "Ausmalen",
"outpaint": "Outpaint (Außen ausmalen)",
"details": "Details",
"format": "Format",
"unknown": "Unbekannt",
@@ -110,17 +110,18 @@
"nextPage": "Nächste Seite",
"unknownError": "Unbekannter Fehler",
"unsaved": "Nicht gespeichert",
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Dann siehe hier:",
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Siehe hier:",
"localSystem": "Lokales System",
"orderBy": "Ordnen nach",
"saveAs": "Speicher als",
"saveAs": "Speichern als",
"updated": "Aktualisiert",
"copy": "Kopieren",
"aboutHeading": "Nutzen Sie Ihre kreative Energie"
"aboutHeading": "Nutzen Sie Ihre kreative Energie",
"toResolve": "Lösen"
},
"gallery": {
"generations": "Erzeugungen",
"showGenerations": "Zeige Erzeugnisse",
"showGenerations": "Zeige Ergebnisse",
"uploads": "Uploads",
"showUploads": "Zeige Uploads",
"galleryImageSize": "Bildgröße",
@@ -150,9 +151,9 @@
"problemDeletingImagesDesc": "Ein oder mehrere Bilder konnten nicht gelöscht werden",
"starImage": "Bild markieren",
"assets": "Ressourcen",
"unstarImage": "Markierung Entfernen",
"unstarImage": "Markierung entfernen",
"image": "Bild",
"deleteSelection": "Lösche markierte",
"deleteSelection": "Lösche Auswahl",
"dropToUpload": "$t(gallery.drop) zum hochladen",
"dropOrUpload": "$t(gallery.drop) oder hochladen",
"drop": "Ablegen",
@@ -590,10 +591,21 @@
"general": "Allgemein",
"hiresStrength": "High Res Stärke",
"hidePreview": "Verstecke Vorschau",
"showPreview": "Zeige Vorschau"
"showPreview": "Zeige Vorschau",
"aspect": "Seitenverhältnis",
"aspectRatio": "Seitenverhältnis",
"scheduler": "Planer",
"aspectRatioFree": "Frei",
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (kann zu groß sein)",
"lockAspectRatio": "Seitenverhältnis sperren",
"swapDimensions": "Seitenverhältnis umkehren",
"setToOptimalSize": "Optimiere Größe für Modell",
"useSize": "Maße übernehmen",
"remixImage": "Remix des Bilds erstellen",
"imageActions": "Weitere Bildaktionen"
},
"settings": {
"displayInProgress": "Bilder in Bearbeitung anzeigen",
"displayInProgress": "Zwischenbilder anzeigen",
"saveSteps": "Speichern der Bilder alle n Schritte",
"confirmOnDelete": "Bestätigen beim Löschen",
"displayHelpIcons": "Hilfesymbole anzeigen",
@@ -606,7 +618,34 @@
"useSlidersForAll": "Schieberegler für alle Optionen verwenden",
"showAdvancedOptions": "Erweiterte Optionen anzeigen",
"alternateCanvasLayout": "Alternatives Leinwand-Layout",
"clearIntermediatesDesc1": "Das Löschen der Zwischenprodukte setzt Leinwand und ControlNet zurück."
"clearIntermediatesDesc1": "Das Löschen der Zwischenbilder setzt Leinwand und ControlNet zurück.",
"favoriteSchedulers": "Lieblings-Planer",
"favoriteSchedulersPlaceholder": "Keine Planer favorisiert",
"generation": "Erzeugung",
"enableInformationalPopovers": "Info-Popouts anzeigen",
"shouldLogToConsole": "Konsole loggen",
"showProgressInViewer": "Zwischenbilder im Viewer anzeigen",
"clearIntermediatesDesc3": "Ihre Bilder werden nicht gelöscht.",
"clearIntermediatesWithCount_one": "Lösche {{count}} Zwischenbilder",
"clearIntermediatesWithCount_other": "Lösche {{count}} Zwischenbilder",
"reloadingIn": "Neuladen in",
"enableNodesEditor": "Nodes Editor aktivieren",
"autoChangeDimensions": "Breite/Höhe auf Modellstandard setzen",
"experimental": "Experimentell",
"intermediatesCleared_one": "{{count}} Zwischenbilder gelöscht",
"intermediatesCleared_other": "{{count}} Zwischenbilder gelöscht",
"enableInvisibleWatermark": "Unsichtbares Wasserzeichen aktivieren",
"general": "Allgemein",
"consoleLogLevel": "Protokollierungsstufe",
"clearIntermediatesDisabled": "Warteschlange muss leer sein, um Zwischenbilder zu löschen",
"developer": "Entwickler",
"antialiasProgressImages": "Zwischenbilder mit Anti-Alias",
"beta": "Beta",
"ui": "Benutzeroberfläche",
"clearIntermediatesDesc2": "Zwischenbilder sind Nebenprodukte der Erstellung. Sie zu löschen macht Festplattenspeicher frei.",
"clearIntermediates": "Zwischenbilder löschen",
"intermediatesClearedFailed": "Problem beim Löschen der Zwischenbilder",
"enableNSFWChecker": "Auf unangemessene Inhalte prüfen"
},
"toast": {
"tempFoldersEmptied": "Temp-Ordner geleert",
@@ -651,7 +690,9 @@
"problemCopyingCanvas": "Problem beim Kopieren der Leinwand",
"problemCopyingCanvasDesc": "Kann Basis-Layer nicht exportieren",
"problemDownloadingCanvas": "Problem beim Herunterladen der Leinwand",
"setAsCanvasInitialImage": "Als Ausgangsbild gesetzt"
"setAsCanvasInitialImage": "Als Ausgangsbild gesetzt",
"addedToBoard": "Dem Board hinzugefügt",
"loadedWithWarnings": "Workflow mit Warnungen geladen"
},
"tooltip": {
"feature": {
@@ -733,23 +774,23 @@
"accessibility": {
"modelSelect": "Modell-Auswahl",
"uploadImage": "Bild hochladen",
"previousImage": "Voriges Bild",
"previousImage": "Vorheriges Bild",
"useThisParameter": "Benutze diesen Parameter",
"copyMetadataJson": "Kopiere Metadaten JSON",
"copyMetadataJson": "Kopiere JSON-Metadaten",
"zoomIn": "Vergrößern",
"rotateClockwise": "Im Uhrzeigersinn drehen",
"flipHorizontally": "Horizontal drehen",
"flipVertically": "Vertikal drehen",
"modifyConfig": "Optionen einstellen",
"toggleAutoscroll": "Auroscroll ein/ausschalten",
"toggleLogViewer": "Log Betrachter ein/ausschalten",
"toggleLogViewer": "Log-Betrachter ein/ausschalten",
"showOptionsPanel": "Seitenpanel anzeigen",
"reset": "Zurücksetzten",
"nextImage": "Nächstes Bild",
"zoomOut": "Verkleinern",
"rotateCounterClockwise": "Gegen den Uhrzeigersinn drehen",
"showGalleryPanel": "Galeriefenster anzeigen",
"exitViewer": "Betrachten beenden",
"showGalleryPanel": "Galerie-Panel anzeigen",
"exitViewer": "Betrachter beenden",
"menu": "Menü",
"loadMore": "Mehr laden",
"invokeProgressBar": "Invoke Fortschrittsanzeige",
@@ -759,7 +800,7 @@
"about": "Über"
},
"boards": {
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
"autoAddBoard": "Automatisches Hinzufügen zum Board",
"topMessage": "Dieser Ordner enthält Bilder die in den folgenden Funktionen verwendet werden:",
"move": "Bewegen",
"menuItemAutoAdd": "Auto-Hinzufügen zu diesem Ordner",
@@ -768,13 +809,13 @@
"noMatching": "Keine passenden Ordner",
"selectBoard": "Ordner aussuchen",
"cancel": "Abbrechen",
"addBoard": "Ordner hinzufügen",
"addBoard": "Board hinzufügen",
"uncategorized": "Ohne Kategorie",
"downloadBoard": "Ordner runterladen",
"changeBoard": "Ordner wechseln",
"loading": "Laden...",
"clearSearch": "Suche leeren",
"bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden.",
"bottomMessage": "Löschen des Boards und seiner Bilder setzt alle Funktionen zurück, die sie gerade verwenden.",
"deleteBoardOnly": "Nur Ordner löschen",
"deleteBoard": "Löschen Ordner",
"deleteBoardAndImages": "Löschen Ordner und Bilder",
@@ -820,7 +861,7 @@
"colorMap": "Farbe",
"lowThreshold": "Niedrige Schwelle",
"highThreshold": "Hohe Schwelle",
"toggleControlNet": "Schalten ControlNet um",
"toggleControlNet": "Dieses ControlNet ein- oder ausschalten",
"delete": "Löschen",
"controlAdapter_one": "Control Adapter",
"controlAdapter_other": "Control Adapter",
@@ -865,18 +906,23 @@
"maxFaces": "Maximale Anzahl Gesichter",
"resizeSimple": "Größe ändern (einfach)",
"large": "Groß",
"modelSize": "Modell Größe",
"modelSize": "Modellgröße",
"small": "Klein",
"base": "Basis",
"depthAnything": "Depth Anything / \"Tiefe irgendwas\"",
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
"depthAnything": "Depth Anything",
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth-Anything-Technik",
"face": "Gesicht",
"body": "Körper",
"hands": "Hände",
"dwOpenpose": "DW Openpose",
"dwOpenposeDescription": "Posenschätzung mit DW Openpose"
},
"queue": {
"status": "Status",
"cancelTooltip": "Aktuellen Aufgabe abbrechen",
"queueEmpty": "Warteschlange leer",
"in_progress": "In Arbeit",
"queueFront": "An den Anfang der Warteschlange tun",
"queueFront": "Am Anfang der Warteschlange einreihen",
"completed": "Fertig",
"queueBack": "In die Warteschlange",
"clearFailed": "Probleme beim leeren der Warteschlange",
@@ -904,7 +950,7 @@
"batchValues": "Stapel Werte",
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
"queuedCount": "{{pending}} wartenden Elemente",
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
"clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
"completedIn": "Fertig in",
"cancelBatchSucceeded": "Stapel abgebrochen",
"cancelBatch": "Stapel stoppen",
@@ -913,20 +959,20 @@
"cancelBatchFailed": "Problem beim Abbruch vom Stapel",
"clearQueueAlertDialog2": "Warteschlange wirklich leeren?",
"pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt",
"pauseSucceeded": "Prozessor angehalten",
"pauseSucceeded": "Prozess angehalten",
"cancelFailed": "Problem beim Stornieren des Auftrags",
"pauseFailed": "Problem beim Anhalten des Prozessors",
"pauseFailed": "Problem beim Anhalten des Prozesses",
"front": "Vorne",
"pruneTooltip": "Bereinigen Sie {{item_count}} abgeschlossene Aufträge",
"resumeFailed": "Problem beim wieder aufnehmen von Prozessor",
"resumeFailed": "Problem beim Fortsetzen des Prozesses",
"pruneFailed": "Problem beim leeren der Warteschlange",
"pauseTooltip": "Pause von Prozessor",
"pauseTooltip": "Prozess anhalten",
"back": "Hinten",
"resumeSucceeded": "Prozessor wieder aufgenommen",
"resumeTooltip": "Prozessor wieder aufnehmen",
"resumeSucceeded": "Prozess wird fortgesetzt",
"resumeTooltip": "Prozess wieder aufnehmen",
"time": "Zeit",
"batchQueuedDesc_one": "{{count}} Eintrag ans {{direction}} der Wartschlange hinzugefügt",
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt",
"batchQueuedDesc_one": "{{count}} Eintrag an {{direction}} der Wartschlange hinzugefügt",
"batchQueuedDesc_other": "{{count}} Einträge an {{direction}} der Wartschlange hinzugefügt",
"openQueue": "Warteschlange öffnen",
"batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung",
"batchFieldValues": "Stapelverarbeitungswerte",
@@ -961,11 +1007,12 @@
"workflow": "Workflow",
"scheduler": "Planer",
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden",
"recallParameters": "Parameter wiederherstellen"
"recallParameters": "Parameter wiederherstellen",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)"
},
"popovers": {
"noiseUseCPU": {
"heading": "Nutze Prozessor rauschen",
"heading": "Nutze CPU-Rauschen",
"paragraphs": [
"Entscheidet, ob auf der CPU oder GPU Rauschen erzeugt wird.",
"Mit aktiviertem CPU-Rauschen wird ein bestimmter Seedwert das gleiche Bild auf jeder Maschine erzeugen.",
@@ -975,8 +1022,7 @@
"paramModel": {
"heading": "Modell",
"paragraphs": [
"Modell für die Entrauschungsschritte.",
"Verschiedene Modelle werden in der Regel so trainiert, dass sie sich auf die Erzeugung bestimmter Ästhetik und/oder Inhalte spezialisiert."
"Modell für die Entrauschungsschritte."
]
},
"paramIterations": {
@@ -1084,12 +1130,23 @@
"Wie stark wird das ControlNet das generierte Bild beeinflussen wird."
],
"heading": "Einfluss"
},
"paramScheduler": {
"paragraphs": [
"\"Planer\" definiert, wie iterativ Rauschen zu einem Bild hinzugefügt wird, oder wie ein Sample bei der Ausgabe eines Modells aktualisiert wird."
],
"heading": "Planer"
},
"imageFit": {
"paragraphs": [
"Reduziert das Ausgangsbild auf die Breite und Höhe des Ausgangsbildes. Empfohlen zu aktivieren."
]
}
},
"ui": {
"lockRatio": "Verhältnis sperren",
"hideProgressImages": "Verstecke Prozess Bild",
"showProgressImages": "Zeige Prozess Bild",
"hideProgressImages": "Fortschrittsbilder verbergen",
"showProgressImages": "Fortschrittsbilder anzeigen",
"swapSizes": "Tausche Größen"
},
"invocationCache": {
@@ -1287,7 +1344,19 @@
"vaeFieldDescription": "VAE Submodell.",
"unknownInput": "Unbekannte Eingabe: {{name}}",
"unknownNodeType": "Unbekannter Knotentyp",
"float": "Kommazahlen"
"float": "Kommazahlen",
"latentsPolymorphic": "Latents Polymorph",
"integerPolymorphicDescription": "Eine Sammlung von ganzen Zahlen.",
"integerPolymorphic": "Ganze Zahl Polymorph",
"ipAdapterPolymorphic": "IP-Adapter Polymorph",
"floatPolymorphic": "Fließkommazahl Polymorph",
"enumDescription": "Aufzählungen sind Werte, die eine von mehreren Optionen sein können.",
"floatCollection": "Fließkommazahl Sammlung",
"enum": "Aufzählung",
"floatPolymorphicDescription": "Eine Sammlung von Fließkommazahlen",
"fullyContainNodes": "Vollständig ausgewählte Nodes auswählen",
"editMode": "Im Workflow-Editor bearbeiten",
"floatCollectionDescription": "Eine Sammlung von Fließkommazahlen"
},
"hrf": {
"enableHrf": "Korrektur für hohe Auflösungen",
@@ -1336,12 +1405,12 @@
},
"control": {
"title": "Kontrolle",
"controlAdaptersTab": "Kontroll Adapter",
"ipTab": "Bild Beschreibung"
"controlAdaptersTab": "Kontroll-Adapter",
"ipTab": "Bild-Prompts"
},
"compositing": {
"coherenceTab": "Kohärenzpass",
"infillTab": "Füllung",
"infillTab": "Füllung / Infill",
"title": "Compositing"
}
},
@@ -1379,5 +1448,15 @@
},
"app": {
"storeNotInitialized": "App-Store ist nicht initialisiert"
},
"sdxl": {
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
"scheduler": "Planer",
"steps": "Schritte",
"useRefiner": "Refiner verwenden",
"selectAModel": "Modell auswählen"
},
"dynamicPrompts": {
"showDynamicPrompts": "Dynamische Prompts anzeigen"
}
}

View File

@@ -424,8 +424,11 @@
"uploads": "Uploads",
"deleteSelection": "Delete Selection",
"downloadSelection": "Download Selection",
"preparingDownload": "Preparing Download",
"preparingDownloadFailed": "Problem Preparing Download",
"bulkDownloadRequested": "Preparing Download",
"bulkDownloadRequestedDesc": "Your download request is being prepared. This may take a few moments.",
"bulkDownloadRequestFailed": "Problem Preparing Download",
"bulkDownloadStarting": "Download Starting",
"bulkDownloadFailed": "Download Failed",
"problemDeletingImages": "Problem Deleting Images",
"problemDeletingImagesDesc": "One or more images could not be deleted"
},
@@ -653,6 +656,7 @@
}
},
"metadata": {
"allPrompts": "All Prompts",
"cfgScale": "CFG scale",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
"createdBy": "Created By",
@@ -661,6 +665,7 @@
"height": "Height",
"hiresFix": "High Resolution Optimization",
"imageDetails": "Image Details",
"imageDimensions": "Image Dimensions",
"initImage": "Initial image",
"metadata": "Metadata",
"model": "Model",
@@ -668,9 +673,12 @@
"noImageDetails": "No image details found",
"noMetaData": "No metadata found",
"noRecallParameters": "No parameters to recall found",
"parameterSet": "Parameter {{parameter}} set",
"parsingFailed": "Parsing Failed",
"perlin": "Perlin Noise",
"positivePrompt": "Positive Prompt",
"recallParameters": "Recall Parameters",
"recallParameter": "Recall {{label}}",
"scheduler": "Scheduler",
"seamless": "Seamless",
"seed": "Seed",
@@ -684,20 +692,24 @@
},
"modelManager": {
"active": "active",
"addAll": "Add All",
"addCheckpointModel": "Add Checkpoint / Safetensor Model",
"addDifference": "Add Difference",
"addDiffuserModel": "Add Diffusers",
"addManually": "Add Manually",
"addModel": "Add Model",
"addModels": "Add Models",
"addNew": "Add New",
"addNewModel": "Add New Model",
"addSelected": "Add Selected",
"advanced": "Advanced",
"advancedImportInfo": "The advanced tab allows for manual configuration of core model settings. Only use this tab if you are confident that you know the correct model type and configuration for the selected model.",
"allModels": "All Models",
"alpha": "Alpha",
"availableModels": "Available Models",
"baseModel": "Base Model",
"cached": "cached",
"cancel": "Cancel",
"cannotUseSpaces": "Cannot Use Spaces",
"checkpointFolder": "Checkpoint Folder",
"checkpointModels": "Checkpoints",
@@ -731,6 +743,7 @@
"descriptionValidationMsg": "Add a description for your model",
"deselectAll": "Deselect All",
"diffusersModels": "Diffusers",
"edit": "Edit",
"findModels": "Find Models",
"formMessageDiffusersModelLocation": "Diffusers Model Location",
"formMessageDiffusersModelLocationDesc": "Please enter at least one.",
@@ -739,7 +752,9 @@
"height": "Height",
"heightValidationMsg": "Default height of your model.",
"ignoreMismatch": "Ignore Mismatches Between Selected Models",
"imageEncoderModelId": "Image Encoder Model ID",
"importModels": "Import Models",
"importQueue": "Import Queue",
"inpainting": "v1 Inpainting",
"interpolationType": "Interpolation Type",
"inverseSigmoid": "Inverse Sigmoid",
@@ -768,8 +783,11 @@
"modelMergeHeaderHelp1": "You can merge up to three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.",
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
"modelMetadata": "Model Metadata",
"modelName": "Model Name",
"modelOne": "Model 1",
"modelsFound": "Models Found",
"modelSettings": "Model Settings",
"modelsMerged": "Models Merged",
"modelsMergeFailed": "Model Merge Failed",
"modelsSynced": "Models Synced",
@@ -789,16 +807,24 @@
"notLoaded": "not loaded",
"oliveModels": "Olives",
"onnxModels": "Onnx",
"path": "Path",
"pathToCustomConfig": "Path To Custom Config",
"pickModelType": "Pick Model Type",
"predictionType": "Prediction Type (for Stable Diffusion 2.x Models and occasional Stable Diffusion 1.x Models)",
"predictionType": "Prediction Type",
"prune": "Prune",
"pruneTooltip": "Prune finished imports from queue",
"quickAdd": "Quick Add",
"removeFromQueue": "Remove From Queue",
"repo_id": "Repo ID",
"repoIDValidationMsg": "Online repository of your model",
"repoVariant": "Repo Variant",
"safetensorModels": "SafeTensors",
"sameFolder": "Same folder",
"scan": "Scan",
"scanFolder": "Scan folder",
"scanAgain": "Scan Again",
"scanForModels": "Scan For Models",
"scanResults": "Scan Results",
"search": "Search",
"selectAll": "Select All",
"selectAndAdd": "Select and Add Models Listed Below",
@@ -809,9 +835,11 @@
"showExisting": "Show Existing",
"sigmoid": "Sigmoid",
"simpleModelDesc": "Provide a path to a local Diffusers model, local checkpoint / safetensors model a HuggingFace Repo ID, or a checkpoint/diffusers model URL.",
"source": "Source",
"statusConverting": "Converting",
"syncModels": "Sync Models",
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.",
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you add models to the InvokeAI root folder or autoimport directory after the application has booted.",
"upcastAttention": "Upcast Attention",
"updateModel": "Update Model",
"useCustomConfig": "Use Custom Config",
"v1": "v1",
@@ -826,7 +854,8 @@
"variant": "Variant",
"weightedSum": "Weighted Sum",
"width": "Width",
"widthValidationMsg": "Default width of your model."
"widthValidationMsg": "Default width of your model.",
"ztsnrTraining": "ZTSNR Training"
},
"models": {
"addLora": "Add LoRA",
@@ -1120,8 +1149,8 @@
"codeformerFidelity": "Fidelity",
"coherenceMode": "Mode",
"coherencePassHeader": "Coherence Pass",
"coherenceSteps": "Steps",
"coherenceStrength": "Strength",
"coherenceEdgeSize": "Edge Size",
"coherenceMinDenoise": "Min Denoise",
"compositingSettingsHeader": "Compositing Settings",
"controlNetControlMode": "Control Mode",
"copyImage": "Copy Image",
@@ -1165,8 +1194,8 @@
"unableToInvoke": "Unable to Invoke"
},
"maskAdjustmentsHeader": "Mask Adjustments",
"maskBlur": "Blur",
"maskBlurMethod": "Blur Method",
"maskBlur": "Mask Blur",
"maskBlurMethod": "Mask Blur Method",
"maskEdge": "Mask Edge",
"negativePromptPlaceholder": "Negative Prompt",
"noiseSettings": "Noise",
@@ -1348,6 +1377,8 @@
"modelAdded": "Model Added: {{modelName}}",
"modelAddedSimple": "Model Added",
"modelAddFailed": "Model Add Failed",
"modelImportCanceled": "Model Import Canceled",
"modelImportRemoved": "Model Import Removed",
"nodesBrokenConnections": "Cannot load. Some connections are broken.",
"nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
"nodesLoaded": "Nodes Loaded",
@@ -1356,8 +1387,8 @@
"nodesNotValidJSON": "Not a valid JSON",
"nodesSaved": "Nodes Saved",
"nodesUnrecognizedTypes": "Cannot load. Graph has unrecognized types",
"parameterNotSet": "Parameter not set",
"parameterSet": "Parameter set",
"parameterNotSet": "{{parameter}} not set",
"parameterSet": "{{parameter}} set",
"parametersFailed": "Problem loading parameters",
"parametersFailedDesc": "Unable to load init image.",
"parametersNotSet": "Parameters Not Set",
@@ -1381,6 +1412,7 @@
"promptNotSet": "Prompt Not Set",
"promptNotSetDesc": "Could not find prompt for this image.",
"promptSet": "Prompt Set",
"prunedQueue": "Pruned Queue",
"resetInitialImage": "Reset Initial Image",
"seedNotSet": "Seed Not Set",
"seedNotSetDesc": "Could not find seed for this image.",
@@ -1424,9 +1456,8 @@
"clipSkip": {
"heading": "CLIP Skip",
"paragraphs": [
"Choose how many layers of the CLIP model to skip.",
"Some models work better with certain CLIP Skip settings.",
"A higher value typically results in a less detailed image."
"How many layers of the CLIP model to skip.",
"Certain models are better suited to be used with CLIP Skip."
]
},
"paramNegativeConditioning": {
@@ -1446,11 +1477,12 @@
"paramScheduler": {
"heading": "Scheduler",
"paragraphs": [
"Scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
"Scheduler used during the generation process.",
"Each scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output."
]
},
"compositingBlur": {
"heading": "Blur",
"compositingMaskBlur": {
"heading": "Mask Blur",
"paragraphs": ["The blur radius of the mask."]
},
"compositingBlurMethod": {
@@ -1463,47 +1495,63 @@
},
"compositingCoherenceMode": {
"heading": "Mode",
"paragraphs": ["The mode of the Coherence Pass."]
"paragraphs": ["Method used to create a coherent image with the newly generated masked area."]
},
"compositingCoherenceSteps": {
"heading": "Steps",
"paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."]
"paragraphs": ["Number of steps in the Coherence Pass.", "Similar to Generation Steps."]
},
"compositingStrength": {
"heading": "Strength",
"paragraphs": ["Amount of noise added for the Coherence Pass.", "Similar to Denoising Strength."]
},
"compositingCoherenceEdgeSize": {
"heading": "Edge Size",
"paragraphs": ["The edge size of the coherence pass."]
},
"compositingCoherenceMinDenoise": {
"heading": "Minimum Denoise",
"paragraphs": [
"Denoising strength for the Coherence Pass.",
"Same as the Image to Image Denoising Strength parameter."
"Minimum denoise strength for the Coherence mode",
"The minimum denoise strength for the coherence region when inpainting or outpainting"
]
},
"compositingMaskAdjustments": {
"heading": "Mask Adjustments",
"paragraphs": ["Adjust the mask."]
},
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
"paragraphs": [
"Which steps of the denoising process will have the ControlNet applied.",
"ControlNets applied at the beginning of the process guide composition, and ControlNets applied at the end guide details."
]
},
"controlNetControlMode": {
"heading": "Control Mode",
"paragraphs": ["Lends more weight to either the prompt or ControlNet."]
},
"controlNetResizeMode": {
"heading": "Resize Mode",
"paragraphs": ["How the ControlNet image will be fit to the image output size."]
},
"controlNet": {
"heading": "ControlNet",
"paragraphs": [
"ControlNets provide guidance to the generation process, helping create images with controlled composition, structure, or style, depending on the model selected."
]
},
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
"paragraphs": [
"The part of the of the denoising process that will have the Control Adapter applied.",
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
]
},
"controlNetControlMode": {
"heading": "Control Mode",
"paragraphs": ["Lend more weight to either the prompt or ControlNet."]
},
"controlNetProcessor": {
"heading": "Processor",
"paragraphs": [
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
]
},
"controlNetResizeMode": {
"heading": "Resize Mode",
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
},
"controlNetWeight": {
"heading": "Weight",
"paragraphs": ["How strongly the ControlNet will impact the generated image."]
"paragraphs": [
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
]
},
"dynamicPrompts": {
"heading": "Dynamic Prompts",
@@ -1526,13 +1574,23 @@
"Per Image will use a unique seed for each image. This provides more variation."
]
},
"imageFit": {
"heading": "Fit Initial Image to Output Size",
"paragraphs": [
"Resizes the initial image to the width and height of the output image. Recommended to enable."
]
},
"infillMethod": {
"heading": "Infill Method",
"paragraphs": ["Method to infill the selected area."]
"paragraphs": ["Method of infilling during the Outpainting or Inpainting process."]
},
"lora": {
"heading": "LoRA Weight",
"paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."]
"heading": "LoRA",
"paragraphs": ["Lightweight models that are used in conjunction with base models."]
},
"loraWeight": {
"heading": "Weight",
"paragraphs": ["Weight of the LoRA. Higher weight will lead to larger impacts on the final image."]
},
"noiseUseCPU": {
"heading": "Use CPU Noise",
@@ -1542,14 +1600,25 @@
"There is no performance impact to enabling CPU Noise."
]
},
"paramAspect": {
"heading": "Aspect",
"paragraphs": [
"Aspect ratio of the generated image. Changing the ratio will update the Width and Height accordingly.",
"“Optimize” will set the Width and Height to optimal dimensions for the chosen model."
]
},
"paramCFGScale": {
"heading": "CFG Scale",
"paragraphs": ["Controls how much your prompt influences the generation process."]
"paragraphs": [
"Controls how much the prompt influences the generation process.",
"High CFG Scale values can result in over-saturation and distorted generation results. "
]
},
"paramCFGRescaleMultiplier": {
"heading": "CFG Rescale Multiplier",
"paragraphs": [
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr). Suggested value 0.7."
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr).",
"Suggested value of 0.7 for these models."
]
},
"paramDenoisingStrength": {
@@ -1559,6 +1628,16 @@
"0 will result in an identical image, while 1 will result in a completely new image."
]
},
"paramHeight": {
"heading": "Height",
"paragraphs": ["Height of the generated image. Must be a multiple of 8."]
},
"paramHrf": {
"heading": "Enable High Resolution Fix",
"paragraphs": [
"Generate high quality images at a larger resolution than optimal for the model. Generally used to prevent duplication in the generated image."
]
},
"paramIterations": {
"heading": "Iterations",
"paragraphs": [
@@ -1569,8 +1648,7 @@
"paramModel": {
"heading": "Model",
"paragraphs": [
"Model used for the denoising steps.",
"Different models are typically trained to specialize in producing particular aesthetic results and content."
"Model used for generation. Different models are trained to specialize in producing different aesthetic results and content."
]
},
"paramRatio": {
@@ -1584,7 +1662,7 @@
"heading": "Seed",
"paragraphs": [
"Controls the starting noise used for generation.",
"Disable “Random Seed” to produce identical results with the same generation settings."
"Disable the “Random” option to produce identical results with the same generation settings."
]
},
"paramSteps": {
@@ -1594,6 +1672,10 @@
"Higher step counts will typically create better images but will require more generation time."
]
},
"paramUpscaleMethod": {
"heading": "Upscale Method",
"paragraphs": ["Method used to upscale the image for High Resolution Fix."]
},
"paramVAE": {
"heading": "VAE",
"paragraphs": ["Model used for translating AI output into the final image."]
@@ -1601,14 +1683,82 @@
"paramVAEPrecision": {
"heading": "VAE Precision",
"paragraphs": [
"The precision used during VAE encoding and decoding. FP16/half precision is more efficient, at the expense of minor image variations."
"The precision used during VAE encoding and decoding.",
"Fp16/Half precision is more efficient, at the expense of minor image variations."
]
},
"paramWidth": {
"heading": "Width",
"paragraphs": ["Width of the generated image. Must be a multiple of 8."]
},
"patchmatchDownScaleSize": {
"heading": "Downscale",
"paragraphs": [
"How much downscaling occurs before infilling.",
"Higher downscaling will improve performance and reduce quality."
]
},
"refinerModel": {
"heading": "Refiner Model",
"paragraphs": [
"Model used during the refiner portion of the generation process.",
"Similar to the Generation Model."
]
},
"refinerPositiveAestheticScore": {
"heading": "Positive Aesthetic Score",
"paragraphs": [
"Weight generations to be more similar to images with a high aesthetic score, based on the training data."
]
},
"refinerNegativeAestheticScore": {
"heading": "Negative Aesthetic Score",
"paragraphs": [
"Weight generations to be more similar to images with a low aesthetic score, based on the training data."
]
},
"refinerScheduler": {
"heading": "Scheduler",
"paragraphs": [
"Scheduler used during the refiner portion of the generation process.",
"Similar to the Generation Scheduler."
]
},
"refinerStart": {
"heading": "Refiner Start",
"paragraphs": [
"Where in the generation process the refiner will start to be used.",
"0 means the refiner will be used for the entire generation process, 0.8 means the refiner will be used for the last 20% of the generation process."
]
},
"refinerSteps": {
"heading": "Steps",
"paragraphs": [
"Number of steps that will be performed during the refiner portion of the generation process.",
"Similar to the Generation Steps."
]
},
"refinerCfgScale": {
"heading": "CFG Scale",
"paragraphs": [
"Controls how much the prompt influences the generation process.",
"Similar to the Generation CFG Scale."
]
},
"scaleBeforeProcessing": {
"heading": "Scale Before Processing",
"paragraphs": [
"Scales the selected area to the size best suited for the model before the image generation process."
"“Auto” scales the selected area to the size best suited for the model before the image generation process.",
"“Manual” allows you to choose the width and height the selected area will be scaled to before the image generation process."
]
},
"seamlessTilingXAxis": {
"heading": "Seamless Tiling X Axis",
"paragraphs": ["Seamlessly tile an image along the horizontal axis."]
},
"seamlessTilingYAxis": {
"heading": "Seamless Tiling Y Axis",
"paragraphs": ["Seamlessly tile an image along the vertical axis."]
}
},
"ui": {
@@ -1642,6 +1792,9 @@
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",
"clearHistory": "Clear History",
"clearMask": "Clear Mask (Shift+C)",
"coherenceModeGaussianBlur": "Gaussian Blur",
"coherenceModeBoxBlur": "Box Blur",
"coherenceModeStaged": "Staged",
"colorPicker": "Color Picker",
"copyToClipboard": "Copy to Clipboard",
"cursorPosition": "Cursor Position",

View File

@@ -47,7 +47,7 @@
"statusModelConverted": "Modello Convertito",
"statusConvertingModel": "Conversione Modello",
"loading": "Caricamento in corso",
"loadingInvokeAI": "Caricamento Invoke AI",
"loadingInvokeAI": "Caricamento di Invoke AI",
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
@@ -61,7 +61,7 @@
"imagePrompt": "Prompt Immagine",
"darkMode": "Modalità scura",
"batch": "Gestione Lotto",
"modelManager": "Gestore modello",
"modelManager": "Gestore Modelli",
"communityLabel": "Comunità",
"nodeEditor": "Editor dei nodi",
"statusProcessing": "Elaborazione in corso",
@@ -81,7 +81,7 @@
"error": "Errore",
"installed": "Installato",
"template": "Schema",
"outputs": "Uscite",
"outputs": "Risultati",
"data": "Dati",
"somethingWentWrong": "Qualcosa è andato storto",
"copyError": "$t(gallery.copy) Errore",
@@ -93,7 +93,7 @@
"created": "Creato",
"prevPage": "Pagina precedente",
"delete": "Elimina",
"orderBy": "Ordinato per",
"orderBy": "Ordina per",
"nextPage": "Pagina successiva",
"saveAs": "Salva come",
"unsaved": "Non salvato",
@@ -109,7 +109,12 @@
"green": "Verde",
"blue": "Blu",
"alpha": "Alfa",
"copy": "Copia"
"copy": "Copia",
"on": "Attivato",
"checkpoint": "Checkpoint",
"safetensors": "Safetensors",
"ai": "ia",
"file": "File"
},
"gallery": {
"generations": "Generazioni",
@@ -934,7 +939,7 @@
"executionStateCompleted": "Completato",
"boardFieldDescription": "Una bacheca della galleria",
"addNodeToolTip": "Aggiungi nodo (Shift+A, Space)",
"sDXLRefinerModelField": "Modello Refiner",
"sDXLRefinerModelField": "Modello Affinatore",
"problemReadingMetadata": "Problema durante la lettura dei metadati dall'immagine",
"colorCodeEdgesHelp": "Bordi con codice colore in base ai campi collegati",
"animatedEdges": "Bordi animati",
@@ -1138,7 +1143,11 @@
"unsupportedAnyOfLength": "unione di troppi elementi ({{count}})",
"clearWorkflowDesc": "Cancellare questo flusso di lavoro e avviarne uno nuovo?",
"clearWorkflow": "Cancella il flusso di lavoro",
"clearWorkflowDesc2": "Il tuo flusso di lavoro attuale presenta modifiche non salvate."
"clearWorkflowDesc2": "Il tuo flusso di lavoro attuale presenta modifiche non salvate.",
"viewMode": "Utilizzare nella vista lineare",
"reorderLinearView": "Riordina la vista lineare",
"editMode": "Modifica nell'editor del flusso di lavoro",
"resetToDefaultValue": "Ripristina il valore predefinito"
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
@@ -1241,7 +1250,16 @@
"large": "Grande",
"small": "Piccolo",
"depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything",
"modelSize": "Dimensioni del modello"
"modelSize": "Dimensioni del modello",
"dwOpenposeDescription": "Stima della posa umana utilizzando DW Openpose",
"face": "Viso",
"body": "Corpo",
"hands": "Mani",
"lineartAnime": "Linea Anime",
"base": "Base",
"lineart": "Linea",
"controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))",
"mediapipeFace": "Mediapipe Volto"
},
"queue": {
"queueFront": "Aggiungi all'inizio della coda",
@@ -1321,7 +1339,7 @@
"noModelsAvailable": "Nessun modello disponibile",
"selectModel": "Seleziona un modello",
"selectLoRA": "Seleziona un LoRA",
"noRefinerModelsInstalled": "Nessun modello SDXL Refiner installato",
"noRefinerModelsInstalled": "Nessun modello affinatore SDXL installato",
"noLoRAsInstalled": "Nessun LoRA installato",
"esrganModel": "Modello ESRGAN",
"addLora": "Aggiungi LoRA",
@@ -1371,7 +1389,8 @@
"popovers": {
"paramScheduler": {
"paragraphs": [
"Il campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello."
"Il campionatore utilizzato durante il processo di generazione.",
"Ciascun campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello."
],
"heading": "Campionatore"
},
@@ -1384,8 +1403,8 @@
"compositingCoherenceSteps": {
"heading": "Passi",
"paragraphs": [
"Numero di passi di riduzione del rumore utilizzati nel Passaggio di Coerenza.",
"Uguale al parametro principale Passi."
"Numero di passi utilizzati nel Passaggio di Coerenza.",
"Simile ai passi di generazione."
]
},
"compositingBlur": {
@@ -1397,14 +1416,13 @@
"compositingCoherenceMode": {
"heading": "Modalità",
"paragraphs": [
"La modalità del Passaggio di Coerenza."
"Metodo utilizzato per creare un'immagine coerente con l'area mascherata appena generata."
]
},
"clipSkip": {
"paragraphs": [
"Scegli quanti livelli del modello CLIP saltare.",
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip.",
"Un valore più alto in genere produce un'immagine meno dettagliata."
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip."
]
},
"compositingCoherencePass": {
@@ -1416,8 +1434,8 @@
"compositingStrength": {
"heading": "Forza",
"paragraphs": [
"Intensità di riduzione del rumore per il passaggio di coerenza.",
"Uguale al parametro intensità di riduzione del rumore da immagine a immagine."
"Quantità di rumore aggiunta per il Passaggio di Coerenza.",
"Simile alla forza di riduzione del rumore."
]
},
"paramNegativeConditioning": {
@@ -1443,8 +1461,8 @@
"controlNetBeginEnd": {
"heading": "Percentuale passi Inizio / Fine",
"paragraphs": [
"A quali passi del processo di rimozione del rumore verrà applicato ControlNet.",
"I ControlNet applicati all'inizio del processo guidano la composizione, mentre i ControlNet applicati alla fine guidano i dettagli."
"La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.",
"In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli."
]
},
"noiseUseCPU": {
@@ -1457,7 +1475,8 @@
},
"scaleBeforeProcessing": {
"paragraphs": [
"Ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine."
"\"Auto\" ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine.",
"\"Manuale\" consente di scegliere la larghezza e l'altezza a cui verrà ridimensionata l'area selezionata prima del processo di generazione dell'immagine."
],
"heading": "Scala prima dell'elaborazione"
},
@@ -1492,20 +1511,21 @@
"paramVAEPrecision": {
"heading": "Precisione VAE",
"paragraphs": [
"La precisione utilizzata durante la codifica e decodifica VAE. FP16/mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine."
"La precisione utilizzata durante la codifica e decodifica VAE.",
"Fp16/Mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine."
]
},
"paramSeed": {
"paragraphs": [
"Controlla il rumore iniziale utilizzato per la generazione.",
"Disabilita seme \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione."
"Disabilita l'opzione \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione."
],
"heading": "Seme"
},
"controlNetResizeMode": {
"heading": "Modalità ridimensionamento",
"paragraphs": [
"Come l'immagine ControlNet verrà adattata alle dimensioni di output dell'immagine."
"Metodo per adattare le dimensioni dell'immagine in ingresso dell'adattatore di controllo alle dimensioni della generazione di output."
]
},
"dynamicPromptsSeedBehaviour": {
@@ -1520,8 +1540,7 @@
"paramModel": {
"heading": "Modello",
"paragraphs": [
"Modello utilizzato per i passaggi di riduzione del rumore.",
"Diversi modelli sono generalmente addestrati per specializzarsi nella produzione di particolari risultati e contenuti estetici."
"Modello utilizzato per la generazione. Diversi modelli vengono addestrati per specializzarsi nella produzione di risultati e contenuti estetici diversi."
]
},
"paramDenoisingStrength": {
@@ -1539,25 +1558,26 @@
},
"infillMethod": {
"paragraphs": [
"Metodo per riempire l'area selezionata."
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
],
"heading": "Metodo di riempimento"
},
"controlNetWeight": {
"heading": "Peso",
"paragraphs": [
"Quanto forte sarà l'impatto di ControlNet sull'immagine generata."
"Peso dell'adattatore di controllo. Un peso maggiore porterà a impatti maggiori sull'immagine finale."
]
},
"paramCFGScale": {
"heading": "Scala CFG",
"paragraphs": [
"Controlla quanto il tuo prompt influenza il processo di generazione."
"Controlla quanto il prompt influenza il processo di generazione.",
"Valori elevati della scala CFG possono provocare una saturazione eccessiva e distorsioni nei risultati della generazione. "
]
},
"controlNetControlMode": {
"paragraphs": [
"Attribuisce più peso al prompt o a ControlNet."
"Attribuisce più peso al prompt oppure a ControlNet."
],
"heading": "Modalità di controllo"
},
@@ -1569,9 +1589,9 @@
]
},
"lora": {
"heading": "Peso LoRA",
"heading": "LoRA",
"paragraphs": [
"Un peso LoRA più elevato porterà a impatti maggiori sull'immagine finale."
"Modelli leggeri utilizzati insieme ai modelli base."
]
},
"controlNet": {
@@ -1583,7 +1603,123 @@
"paramCFGRescaleMultiplier": {
"heading": "Moltiplicatore di riscala CFG",
"paragraphs": [
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr). Valore suggerito 0.7."
"Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr).",
"Valore suggerito di 0.7 per questi modelli."
]
},
"controlNetProcessor": {
"heading": "Processore",
"paragraphs": [
"Metodo di elaborazione dell'immagine di input per guidare il processo di generazione. Processori diversi forniranno effetti o stili diversi nelle immagini generate."
]
},
"imageFit": {
"heading": "Adatta l'immagine iniziale alle dimensioni di output",
"paragraphs": [
"Ridimensiona l'immagine iniziale in base alla larghezza e all'altezza dell'immagine di output. Si consiglia di abilitarlo."
]
},
"loraWeight": {
"heading": "Peso",
"paragraphs": [
"Peso del LoRA. Un peso maggiore comporterà un impatto maggiore sull'immagine finale."
]
},
"paramAspect": {
"heading": "Aspetto",
"paragraphs": [
"Proporzioni dell'immagine generata. La modifica del rapporto aggiornerà di conseguenza la larghezza e l'altezza.",
"\"Ottimizza\" imposterà la larghezza e l'altezza alle dimensioni ottimali per il modello scelto."
]
},
"paramHeight": {
"heading": "Altezza",
"paragraphs": [
"Altezza dell'immagine generata. Deve essere un multiplo di 8."
]
},
"paramHrf": {
"heading": "Abilita correzione alta risoluzione",
"paragraphs": [
"Genera immagini di alta qualità con una risoluzione maggiore di quella ottimale per il modello. Generalmente utilizzato per impedire la duplicazione nell'immagine generata."
]
},
"paramUpscaleMethod": {
"heading": "Metodo di ampliamento",
"paragraphs": [
"Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione."
]
},
"patchmatchDownScaleSize": {
"heading": "Ridimensiona",
"paragraphs": [
"Quanto ridimensionamento avviene prima del riempimento.",
"Un ridimensionamento più elevato migliorerà le prestazioni e ridurrà la qualità."
]
},
"paramWidth": {
"paragraphs": [
"Larghezza dell'immagine generata. Deve essere un multiplo di 8."
],
"heading": "Larghezza"
},
"refinerModel": {
"heading": "Modello Affinatore",
"paragraphs": [
"Modello utilizzato durante la parte di affinamento del processo di generazione.",
"Simile al modello di generazione."
]
},
"refinerNegativeAestheticScore": {
"paragraphs": [
"Valuta le generazioni in modo che siano più simili alle immagini con un punteggio estetico basso, in base ai dati di addestramento."
],
"heading": "Punteggio estetico negativo"
},
"refinerScheduler": {
"paragraphs": [
"Campionatore utilizzato durante la parte di affinamento del processo di generazione.",
"Simile al campionatore di generazione."
],
"heading": "Campionatore"
},
"refinerStart": {
"heading": "Inizio affinamento",
"paragraphs": [
"A che punto nel processo di generazione inizierà ad essere utilizzato l'affinatore.",
"0 significa che l'affinatore verrà utilizzato per l'intero processo di generazione, 0.8 significa che l'affinatore verrà utilizzato per l'ultimo 20% del processo di generazione."
]
},
"refinerSteps": {
"heading": "Passi",
"paragraphs": [
"Numero di passi che verranno eseguiti durante la parte di affinamento del processo di generazione.",
"Simile ai passi di generazione."
]
},
"refinerCfgScale": {
"heading": "Scala CFG",
"paragraphs": [
"Controlla quanto il prompt influenza il processo di generazione.",
"Simile alla scala CFG di generazione."
]
},
"seamlessTilingXAxis": {
"heading": "Asse X di piastrellatura senza cuciture",
"paragraphs": [
"Affianca senza soluzione di continuità un'immagine lungo l'asse orizzontale."
]
},
"seamlessTilingYAxis": {
"heading": "Asse Y di piastrellatura senza cuciture",
"paragraphs": [
"Affianca senza soluzione di continuità un'immagine lungo l'asse verticale."
]
},
"refinerPositiveAestheticScore": {
"heading": "Punteggio estetico positivo",
"paragraphs": [
"Valuta le generazioni in modo che siano più simili alle immagini con un punteggio estetico elevato, in base ai dati di addestramento."
]
}
},
@@ -1632,7 +1768,8 @@
"steps": "Passi",
"scheduler": "Campionatore",
"recallParameters": "Richiama i parametri",
"noRecallParameters": "Nessun parametro da richiamare trovato"
"noRecallParameters": "Nessun parametro da richiamare trovato",
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)"
},
"hrf": {
"enableHrf": "Abilita Correzione Alta Risoluzione",

View File

@@ -1217,16 +1217,14 @@
"clipSkip": {
"paragraphs": [
"Kies hoeveel CLIP-modellagen je wilt overslaan.",
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen.",
"Een hogere waarde geeft meestal een minder gedetailleerde afbeelding."
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen."
],
"heading": "Overslaan CLIP"
},
"paramModel": {
"heading": "Model",
"paragraphs": [
"Model gebruikt voor de ontruisingsstappen.",
"Verschillende modellen zijn meestal getraind om zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal."
"Model gebruikt voor de ontruisingsstappen."
]
},
"compositingCoherencePass": {

View File

@@ -108,7 +108,16 @@
"preferencesLabel": "Предпочтения",
"or": "или",
"advancedOptions": "Расширенные настройки",
"free": "Свободно"
"free": "Свободно",
"aboutHeading": "Владей своей творческой силой",
"red": "Красный",
"green": "Зеленый",
"blue": "Синий",
"alpha": "Альфа",
"toResolve": "Чтоб решить",
"copy": "Копировать",
"localSystem": "Локальная система",
"aboutDesc": "Используя Invoke для работы? Проверьте это:"
},
"gallery": {
"generations": "Генерации",
@@ -152,17 +161,17 @@
},
"hotkeys": {
"keyboardShortcuts": "Горячие клавиши",
"appHotkeys": "Горячие клавиши приложения",
"generalHotkeys": "Общие горячие клавиши",
"galleryHotkeys": орячие клавиши галереи",
"unifiedCanvasHotkeys": "Горячие клавиши Единого холста",
"appHotkeys": "Приложение",
"generalHotkeys": "Общее",
"galleryHotkeys": "Галлерея",
"unifiedCanvasHotkeys": "Единый холст",
"invoke": {
"title": "Invoke",
"desc": "Сгенерировать изображение"
},
"cancel": {
"title": "Отменить",
"desc": "Отменить генерацию изображения"
"desc": "Отменить текущий элемент"
},
"focusPrompt": {
"title": "Переключиться на ввод запроса",
@@ -352,7 +361,7 @@
"desc": "Открывает меню добавления узла",
"title": "Добавление узлов"
},
"nodesHotkeys": "Горячие клавиши узлов",
"nodesHotkeys": "Узлы",
"cancelAndClear": {
"desc": "Отмена текущего элемента очереди и очистка всех ожидающих элементов",
"title": "Отменить и очистить"
@@ -367,7 +376,11 @@
"desc": "Открытие и закрытие панели опций и галереи",
"title": "Переключить опции и галерею"
},
"clearSearch": "Очистить поиск"
"clearSearch": "Очистить поиск",
"remixImage": {
"desc": "Используйте все параметры, кроме сида из текущего изображения",
"title": "Ремикс изображения"
}
},
"modelManager": {
"modelManager": "Менеджер моделей",
@@ -512,7 +525,8 @@
"modelType": "Тип модели",
"customConfigFileLocation": "Расположение пользовательского файла конфигурации",
"vaePrecision": "Точность VAE",
"noModelSelected": "Модель не выбрана"
"noModelSelected": "Модель не выбрана",
"configFile": "Файл конфигурации"
},
"parameters": {
"images": "Изображения",
@@ -583,8 +597,8 @@
"copyImage": "Скопировать изображение",
"showPreview": "Показать предпросмотр",
"noiseSettings": "Шум",
"seamlessXAxis": "Горизонтальная",
"seamlessYAxis": "Вертикальная",
"seamlessXAxis": "Бесшовность по оси X",
"seamlessYAxis": "Бесшовность по оси Y",
"scheduler": "Планировщик",
"boundingBoxWidth": "Ширина ограничивающей рамки",
"boundingBoxHeight": "Высота ограничивающей рамки",
@@ -612,7 +626,7 @@
"noControlImageForControlAdapter": "Адаптер контроля #{{number}} не имеет изображения",
"noModelForControlAdapter": "Не выбрана модель адаптера контроля #{{number}}.",
"unableToInvoke": "Невозможно вызвать",
"incompatibleBaseModelForControlAdapter": "Модель контрольного адаптера №{{number}} недействительна для основной модели.",
"incompatibleBaseModelForControlAdapter": "Адаптер контроля №{{number}} несовместим с основной моделью.",
"systemDisconnected": "Система отключена",
"missingNodeTemplate": "Отсутствует шаблон узла",
"readyToInvoke": "Готово к вызову",
@@ -653,7 +667,10 @@
"setToOptimalSize": "Установить оптимальный для модели размер",
"setToOptimalSizeTooSmall": "$t(parameters.setToOptimalSize) (может быть слишком маленьким)",
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (может быть слишком большим)",
"lockAspectRatio": "Заблокировать соотношение"
"lockAspectRatio": "Заблокировать соотношение",
"boxBlur": "Размытие прямоугольника",
"gaussianBlur": "Размытие по Гауссу",
"remixImage": "Ремикс изображения"
},
"settings": {
"models": "Модели",
@@ -787,7 +804,10 @@
"canvasSavedGallery": "Холст сохранен в галерею",
"imageUploadFailed": "Не удалось загрузить изображение",
"modelAdded": "Добавлена модель: {{modelName}}",
"problemImportingMask": "Проблема с импортом маски"
"problemImportingMask": "Проблема с импортом маски",
"problemDownloadingImage": "Не удается скачать изображение",
"uploadInitialImage": "Загрузить начальное изображение",
"resetInitialImage": "Сбросить начальное изображение"
},
"tooltip": {
"feature": {
@@ -892,7 +912,8 @@
"mode": "Режим",
"loadMore": "Загрузить больше",
"resetUI": "$t(accessibility.reset) интерфейс",
"createIssue": "Сообщить о проблеме"
"createIssue": "Сообщить о проблеме",
"about": "Об этом"
},
"ui": {
"showProgressImages": "Показывать промежуточный итог",
@@ -1117,7 +1138,18 @@
"unableToParseEdge": "Невозможно разобрать край",
"unknownInput": "Неизвестный вход: {{name}}",
"oNNXModelFieldDescription": "Поле модели ONNX.",
"imageCollection": "Коллекция изображений"
"imageCollection": "Коллекция изображений",
"newWorkflow": "Новый рабочий процесс",
"newWorkflowDesc": "Создать новый рабочий процесс?",
"clearWorkflow": "Очистить рабочий процесс",
"newWorkflowDesc2": "Текущий рабочий процесс имеет несохраненные изменения.",
"latentsCollection": "Коллекция латентов",
"clearWorkflowDesc": "Очистить этот рабочий процесс и создать новый?",
"clearWorkflowDesc2": "Текущий рабочий процесс имеет несохраненные измерения.",
"reorderLinearView": "Изменить порядок линейного просмотра",
"viewMode": "Использовать в линейном представлении",
"editMode": "Открыть в редакторе узлов",
"resetToDefaultValue": "Сбросить к стандартному значкнию"
},
"controlnet": {
"amult": "a_mult",
@@ -1198,7 +1230,18 @@
"enableIPAdapter": "Включить IP Adapter",
"maxFaces": "Макс Лица",
"mlsdDescription": "Минималистичный детектор отрезков линии",
"resizeSimple": "Изменить размер (простой)"
"resizeSimple": "Изменить размер (простой)",
"megaControl": "Mega контроль",
"base": "Базовый",
"depthAnything": "Глубина всего",
"depthAnythingDescription": "Создание карты глубины с использованием метода Depth Anything",
"face": "Лицо",
"dwOpenposeDescription": "Оценка позы человека с помощью DW Openpose",
"large": "Большой",
"modelSize": "Размер модели",
"small": "Маленький",
"body": "Тело",
"hands": "Руки"
},
"boards": {
"autoAddBoard": "Авто добавление Доски",
@@ -1281,7 +1324,7 @@
"compositingCoherenceSteps": {
"heading": "Шаги",
"paragraphs": [
null,
"Количество шагов снижения шума, используемых при прохождении когерентности.",
"То же, что и основной параметр «Шаги»."
]
},
@@ -1319,7 +1362,10 @@
]
},
"compositingCoherenceMode": {
"heading": "Режим"
"heading": "Режим",
"paragraphs": [
"Режим прохождения когерентности."
]
},
"paramSeed": {
"paragraphs": [
@@ -1353,16 +1399,14 @@
"clipSkip": {
"paragraphs": [
"Выберите, сколько слоев модели CLIP нужно пропустить.",
"Некоторые модели работают лучше с определенными настройками пропуска CLIP.",
"Более высокое значение обычно приводит к менее детализированному изображению."
"Некоторые модели работают лучше с определенными настройками пропуска CLIP."
],
"heading": "CLIP пропуск"
},
"paramModel": {
"heading": "Модель",
"paragraphs": [
"Модель, используемая для шагов шумоподавления.",
"Различные модели обычно обучаются, чтобы специализироваться на достижении определенных эстетических результатов и содержания."
"Модель, используемая для шагов шумоподавления."
]
},
"compositingCoherencePass": {
@@ -1601,7 +1645,7 @@
"openWorkflow": "Открытый рабочий процесс",
"clearWorkflowSearchFilter": "Очистить фильтр поиска рабочих процессов",
"workflowLibrary": "Библиотека",
"downloadWorkflow": "Скачать рабочий процесс",
"downloadWorkflow": "Сохранить в файл",
"noRecentWorkflows": "Нет недавних рабочих процессов",
"workflowSaved": "Рабочий процесс сохранен",
"workflowIsOpen": "Рабочий процесс открыт",
@@ -1614,9 +1658,12 @@
"deleteWorkflow": "Удалить рабочий процесс",
"workflows": "Рабочие процессы",
"noDescription": "Без описания",
"uploadWorkflow": "Загрузить рабочий процесс",
"uploadWorkflow": "Загрузить из файла",
"userWorkflows": "Мои рабочие процессы",
"newWorkflowCreated": "Создан новый рабочий процесс"
"newWorkflowCreated": "Создан новый рабочий процесс",
"saveWorkflowToProject": "Сохранить рабочий процесс в проект",
"workflowCleared": "Рабочий процесс очищен",
"noWorkflows": "Нет рабочих процессов"
},
"embedding": {
"noEmbeddingsLoaded": "встраивания не загружены",

View File

@@ -1444,16 +1444,14 @@
"clipSkip": {
"paragraphs": [
"选择要跳过 CLIP 模型多少层。",
"部分模型跳过特定数值的层时效果会更好。",
"较高的数值通常会导致图像细节更少。"
"部分模型跳过特定数值的层时效果会更好。"
],
"heading": "CLIP 跳过层"
},
"paramModel": {
"heading": "模型",
"paragraphs": [
"用于去噪过程的模型。",
"不同的模型一般会通过接受训练来专门产生特定的美学内容和结果。"
"用于去噪过程的模型。"
]
},
"paramIterations": {

View File

@@ -1,34 +0,0 @@
export const COLORS = {
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
underscore: '\x1b[4m',
blink: '\x1b[5m',
reverse: '\x1b[7m',
hidden: '\x1b[8m',
fg: {
black: '\x1b[30m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m',
white: '\x1b[37m',
gray: '\x1b[90m',
crimson: '\x1b[38m',
},
bg: {
black: '\x1b[40m',
red: '\x1b[41m',
green: '\x1b[42m',
yellow: '\x1b[43m',
blue: '\x1b[44m',
magenta: '\x1b[45m',
cyan: '\x1b[46m',
white: '\x1b[47m',
gray: '\x1b[100m',
crimson: '\x1b[48m',
},
};

View File

@@ -19,7 +19,7 @@ declare global {
}
export const $socketOptions = map<Partial<ManagerOptions & SocketOptions>>({});
export const $isSocketInitialized = atom<boolean>(false);
const $isSocketInitialized = atom<boolean>(false);
/**
* Initializes the socket.io connection and sets up event listeners.

View File

@@ -12,7 +12,6 @@ ROARR.serializeMessage = serializeMessage;
ROARR.write = createLogWriter();
export const BASE_CONTEXT = {};
export const log = Roarr.child(BASE_CONTEXT);
export const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));

View File

@@ -1,10 +1,7 @@
import { createAction } from '@reduxjs/toolkit';
import type { InvokeTabName } from 'features/ui/store/tabMap';
import type { BatchConfig } from 'services/api/types';
export const enqueueRequested = createAction<{
tabName: InvokeTabName;
prepend: boolean;
}>('app/enqueueRequested');
export const batchEnqueued = createAction<BatchConfig>('app/batchEnqueued');

View File

@@ -1 +1,2 @@
export const STORAGE_PREFIX = '@@invokeai-';
export const EMPTY_ARRAY = [];

View File

@@ -13,19 +13,9 @@ export const createMemoizedSelector = createSelectorCreator({
argsMemoize: lruMemoize,
});
/**
* A memoized selector creator that uses LRU cache default shallow equality check.
*/
export const createLruSelector = createSelectorCreator({
memoize: lruMemoize,
argsMemoize: lruMemoize,
});
export const createLruDraftSafeSelector = createDraftSafeSelectorCreator({
memoize: lruMemoize,
argsMemoize: lruMemoize,
});
export const getSelectorsOptions: GetSelectorsOptions = {
createSelector: createLruDraftSafeSelector,
createSelector: createDraftSafeSelectorCreator({
memoize: lruMemoize,
argsMemoize: lruMemoize,
}),
};

View File

@@ -2,15 +2,15 @@ import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
import { $projectId } from 'app/store/nanostores/projectId';
import type { UseStore } from 'idb-keyval';
import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
import { action, atom } from 'nanostores';
import { atom } from 'nanostores';
import type { Driver } from 'redux-remember';
// Create a custom idb-keyval store (just needed to customize the name)
export const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
export const clearIdbKeyValStore = action($idbKeyValStore, 'clear', (store) => {
clear(store.get());
});
export const clearIdbKeyValStore = () => {
clear($idbKeyValStore.get());
};
// Create redux-remember driver, wrapping idb-keyval
export const idbKeyValDriver: Driver = {

View File

@@ -3,7 +3,7 @@ import { parseify } from 'common/util/serialize';
import { PersistError, RehydrateError } from 'redux-remember';
import { serializeError } from 'serialize-error';
export type StorageErrorArgs = {
type StorageErrorArgs = {
key: string;
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */ // any is correct
value?: any;

View File

@@ -1,80 +1,65 @@
import type { ListenerEffect, TypedAddListener, TypedStartListening, UnknownAction } from '@reduxjs/toolkit';
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
import type { TypedStartListening } from '@reduxjs/toolkit';
import { createListenerMiddleware } from '@reduxjs/toolkit';
import { addCommitStagingAreaImageListener } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
import { addFirstListImagesListener } from 'app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts';
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived';
import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued';
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
import { addCanvasCopiedToClipboardListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard';
import { addCanvasDownloadedAsImageListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage';
import { addCanvasImageToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet';
import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery';
import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet';
import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged';
import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery';
import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess';
import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed';
import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas';
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
import { addEnqueueRequestedNodes } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes';
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
import { addRequestedSingleImageDeletionListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDeleted';
import { addImageDroppedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped';
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
import { addImagesStarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesStarred';
import { addImagesUnstarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesUnstarred';
import { addImageToDeleteSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected';
import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded';
import { addInitialImageSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/initialImageSelected';
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
import { addDynamicPromptsListener } from 'app/store/middleware/listenerMiddleware/listeners/promptChanged';
import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected';
import { addSocketDisconnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected';
import { addGeneratorProgressEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress';
import { addGraphExecutionStateCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete';
import { addInvocationCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete';
import { addInvocationErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError';
import { addInvocationRetrievalErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError';
import { addInvocationStartedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted';
import { addModelInstallEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall';
import { addModelLoadEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad';
import { addSocketQueueItemStatusChangedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged';
import { addSessionRetrievalErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError';
import { addSocketSubscribedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed';
import { addSocketUnsubscribedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed';
import { addStagingAreaImageSavedListener } from 'app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved';
import { addUpdateAllNodesRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested';
import { addUpscaleRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/upscaleRequested';
import { addWorkflowLoadRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested';
import type { AppDispatch, RootState } from 'app/store/store';
import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener';
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
import { addAnyEnqueuedListener } from './listeners/anyEnqueued';
import { addAppConfigReceivedListener } from './listeners/appConfigReceived';
import { addAppStartedListener } from './listeners/appStarted';
import { addBatchEnqueuedListener } from './listeners/batchEnqueued';
import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted';
import { addBoardIdSelectedListener } from './listeners/boardIdSelected';
import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard';
import { addCanvasDownloadedAsImageListener } from './listeners/canvasDownloadedAsImage';
import { addCanvasImageToControlNetListener } from './listeners/canvasImageToControlNet';
import { addCanvasMaskSavedToGalleryListener } from './listeners/canvasMaskSavedToGallery';
import { addCanvasMaskToControlNetListener } from './listeners/canvasMaskToControlNet';
import { addCanvasMergedListener } from './listeners/canvasMerged';
import { addCanvasSavedToGalleryListener } from './listeners/canvasSavedToGallery';
import { addControlNetAutoProcessListener } from './listeners/controlNetAutoProcess';
import { addControlNetImageProcessedListener } from './listeners/controlNetImageProcessed';
import { addEnqueueRequestedCanvasListener } from './listeners/enqueueRequestedCanvas';
import { addEnqueueRequestedLinear } from './listeners/enqueueRequestedLinear';
import { addEnqueueRequestedNodes } from './listeners/enqueueRequestedNodes';
import { addGetOpenAPISchemaListener } from './listeners/getOpenAPISchema';
import {
addImageAddedToBoardFulfilledListener,
addImageAddedToBoardRejectedListener,
} from './listeners/imageAddedToBoard';
import {
addImageDeletedFulfilledListener,
addImageDeletedPendingListener,
addImageDeletedRejectedListener,
addRequestedMultipleImageDeletionListener,
addRequestedSingleImageDeletionListener,
} from './listeners/imageDeleted';
import { addImageDroppedListener } from './listeners/imageDropped';
import {
addImageRemovedFromBoardFulfilledListener,
addImageRemovedFromBoardRejectedListener,
} from './listeners/imageRemovedFromBoard';
import { addImagesStarredListener } from './listeners/imagesStarred';
import { addImagesUnstarredListener } from './listeners/imagesUnstarred';
import { addImageToDeleteSelectedListener } from './listeners/imageToDeleteSelected';
import { addImageUploadedFulfilledListener, addImageUploadedRejectedListener } from './listeners/imageUploaded';
import { addInitialImageSelectedListener } from './listeners/initialImageSelected';
import { addModelSelectedListener } from './listeners/modelSelected';
import { addModelsLoadedListener } from './listeners/modelsLoaded';
import { addDynamicPromptsListener } from './listeners/promptChanged';
import { addSocketConnectedEventListener as addSocketConnectedListener } from './listeners/socketio/socketConnected';
import { addSocketDisconnectedEventListener as addSocketDisconnectedListener } from './listeners/socketio/socketDisconnected';
import { addGeneratorProgressEventListener as addGeneratorProgressListener } from './listeners/socketio/socketGeneratorProgress';
import { addGraphExecutionStateCompleteEventListener as addGraphExecutionStateCompleteListener } from './listeners/socketio/socketGraphExecutionStateComplete';
import { addInvocationCompleteEventListener as addInvocationCompleteListener } from './listeners/socketio/socketInvocationComplete';
import { addInvocationErrorEventListener as addInvocationErrorListener } from './listeners/socketio/socketInvocationError';
import { addInvocationRetrievalErrorEventListener } from './listeners/socketio/socketInvocationRetrievalError';
import { addInvocationStartedEventListener as addInvocationStartedListener } from './listeners/socketio/socketInvocationStarted';
import { addModelLoadEventListener } from './listeners/socketio/socketModelLoad';
import { addSocketQueueItemStatusChangedEventListener } from './listeners/socketio/socketQueueItemStatusChanged';
import { addSessionRetrievalErrorEventListener } from './listeners/socketio/socketSessionRetrievalError';
import { addSocketSubscribedEventListener as addSocketSubscribedListener } from './listeners/socketio/socketSubscribed';
import { addSocketUnsubscribedEventListener as addSocketUnsubscribedListener } from './listeners/socketio/socketUnsubscribed';
import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSaved';
import { addUpdateAllNodesRequestedListener } from './listeners/updateAllNodesRequested';
import { addUpscaleRequestedListener } from './listeners/upscaleRequested';
import { addWorkflowLoadRequestedListener } from './listeners/workflowLoadRequested';
export const listenerMiddleware = createListenerMiddleware();
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
export const startAppListening = listenerMiddleware.startListening as AppStartListening;
export const addAppListener = addListener as TypedAddListener<RootState, AppDispatch>;
export type AppListenerEffect = ListenerEffect<UnknownAction, RootState, AppDispatch>;
const startAppListening = listenerMiddleware.startListening as AppStartListening;
/**
* The RTK listener middleware is a lightweight alternative sagas/observables.
@@ -83,93 +68,88 @@ export type AppListenerEffect = ListenerEffect<UnknownAction, RootState, AppDisp
*/
// Image uploaded
addImageUploadedFulfilledListener();
addImageUploadedRejectedListener();
addImageUploadedFulfilledListener(startAppListening);
// Image selected
addInitialImageSelectedListener();
addInitialImageSelectedListener(startAppListening);
// Image deleted
addRequestedSingleImageDeletionListener();
addRequestedMultipleImageDeletionListener();
addImageDeletedPendingListener();
addImageDeletedFulfilledListener();
addImageDeletedRejectedListener();
addDeleteBoardAndImagesFulfilledListener();
addImageToDeleteSelectedListener();
addRequestedSingleImageDeletionListener(startAppListening);
addDeleteBoardAndImagesFulfilledListener(startAppListening);
addImageToDeleteSelectedListener(startAppListening);
// Image starred
addImagesStarredListener();
addImagesUnstarredListener();
addImagesStarredListener(startAppListening);
addImagesUnstarredListener(startAppListening);
// Gallery
addGalleryImageClickedListener();
addGalleryImageClickedListener(startAppListening);
// User Invoked
addEnqueueRequestedCanvasListener();
addEnqueueRequestedNodes();
addEnqueueRequestedLinear();
addAnyEnqueuedListener();
addBatchEnqueuedListener();
addEnqueueRequestedCanvasListener(startAppListening);
addEnqueueRequestedNodes(startAppListening);
addEnqueueRequestedLinear(startAppListening);
addAnyEnqueuedListener(startAppListening);
addBatchEnqueuedListener(startAppListening);
// Canvas actions
addCanvasSavedToGalleryListener();
addCanvasMaskSavedToGalleryListener();
addCanvasImageToControlNetListener();
addCanvasMaskToControlNetListener();
addCanvasDownloadedAsImageListener();
addCanvasCopiedToClipboardListener();
addCanvasMergedListener();
addStagingAreaImageSavedListener();
addCommitStagingAreaImageListener();
addCanvasSavedToGalleryListener(startAppListening);
addCanvasMaskSavedToGalleryListener(startAppListening);
addCanvasImageToControlNetListener(startAppListening);
addCanvasMaskToControlNetListener(startAppListening);
addCanvasDownloadedAsImageListener(startAppListening);
addCanvasCopiedToClipboardListener(startAppListening);
addCanvasMergedListener(startAppListening);
addStagingAreaImageSavedListener(startAppListening);
addCommitStagingAreaImageListener(startAppListening);
// Socket.IO
addGeneratorProgressListener();
addGraphExecutionStateCompleteListener();
addInvocationCompleteListener();
addInvocationErrorListener();
addInvocationStartedListener();
addSocketConnectedListener();
addSocketDisconnectedListener();
addSocketSubscribedListener();
addSocketUnsubscribedListener();
addModelLoadEventListener();
addSessionRetrievalErrorEventListener();
addInvocationRetrievalErrorEventListener();
addSocketQueueItemStatusChangedEventListener();
addGeneratorProgressEventListener(startAppListening);
addGraphExecutionStateCompleteEventListener(startAppListening);
addInvocationCompleteEventListener(startAppListening);
addInvocationErrorEventListener(startAppListening);
addInvocationStartedEventListener(startAppListening);
addSocketConnectedEventListener(startAppListening);
addSocketDisconnectedEventListener(startAppListening);
addSocketSubscribedEventListener(startAppListening);
addSocketUnsubscribedEventListener(startAppListening);
addModelLoadEventListener(startAppListening);
addModelInstallEventListener(startAppListening);
addSessionRetrievalErrorEventListener(startAppListening);
addInvocationRetrievalErrorEventListener(startAppListening);
addSocketQueueItemStatusChangedEventListener(startAppListening);
addBulkDownloadListeners(startAppListening);
// ControlNet
addControlNetImageProcessedListener();
addControlNetAutoProcessListener();
addControlNetImageProcessedListener(startAppListening);
addControlNetAutoProcessListener(startAppListening);
// Boards
addImageAddedToBoardFulfilledListener();
addImageAddedToBoardRejectedListener();
addImageRemovedFromBoardFulfilledListener();
addImageRemovedFromBoardRejectedListener();
addBoardIdSelectedListener();
addImageAddedToBoardFulfilledListener(startAppListening);
addImageRemovedFromBoardFulfilledListener(startAppListening);
addBoardIdSelectedListener(startAppListening);
// Node schemas
addGetOpenAPISchemaListener();
addGetOpenAPISchemaListener(startAppListening);
// Workflows
addWorkflowLoadRequestedListener();
addUpdateAllNodesRequestedListener();
addWorkflowLoadRequestedListener(startAppListening);
addUpdateAllNodesRequestedListener(startAppListening);
// DND
addImageDroppedListener();
addImageDroppedListener(startAppListening);
// Models
addModelSelectedListener();
addModelSelectedListener(startAppListening);
// app startup
addAppStartedListener();
addModelsLoadedListener();
addAppConfigReceivedListener();
addFirstListImagesListener();
addAppStartedListener(startAppListening);
addModelsLoadedListener(startAppListening);
addAppConfigReceivedListener(startAppListening);
addFirstListImagesListener(startAppListening);
// Ad-hoc upscale workflwo
addUpscaleRequestedListener();
addUpscaleRequestedListener(startAppListening);
// Dynamic prompts
addDynamicPromptsListener();
addDynamicPromptsListener(startAppListening);

View File

@@ -1,15 +1,14 @@
import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasBatchIdsReset, commitStagingAreaImage, discardStagedImages } from 'features/canvas/store/canvasSlice';
import { addToast } from 'features/system/store/systemSlice';
import { t } from 'i18next';
import { queueApi } from 'services/api/endpoints/queue';
import { startAppListening } from '..';
const matcher = isAnyOf(commitStagingAreaImage, discardStagedImages);
export const addCommitStagingAreaImageListener = () => {
export const addCommitStagingAreaImageListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher,
effect: async (_, { dispatch, getState }) => {

View File

@@ -1,15 +1,11 @@
import { createAction } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { imageSelected } from 'features/gallery/store/gallerySlice';
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageCache } from 'services/api/types';
import { getListImagesUrl, imagesSelectors } from 'services/api/util';
import { startAppListening } from '..';
export const appStarted = createAction('app/appStarted');
export const addFirstListImagesListener = () => {
export const addFirstListImagesListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.listImages.matchFulfilled,
effect: async (action, { dispatch, unsubscribe, cancelActiveListeners }) => {

View File

@@ -1,8 +1,7 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue';
import { startAppListening } from '..';
export const addAnyEnqueuedListener = () => {
export const addAnyEnqueuedListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,
effect: async (_, { dispatch, getState }) => {

View File

@@ -1,10 +1,9 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { setInfillMethod } from 'features/parameters/store/generationSlice';
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import { startAppListening } from '..';
export const addAppConfigReceivedListener = () => {
export const addAppConfigReceivedListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
effect: async (action, { getState, dispatch }) => {

View File

@@ -1,10 +1,9 @@
import { createAction } from '@reduxjs/toolkit';
import { startAppListening } from '..';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
export const appStarted = createAction('app/appStarted');
export const addAppStartedListener = () => {
export const addAppStartedListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: appStarted,
effect: async (action, { unsubscribe, cancelActiveListeners }) => {

View File

@@ -1,19 +1,13 @@
import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { parseify } from 'common/util/serialize';
import { toast } from 'common/util/toast';
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
import { t } from 'i18next';
import { truncate, upperFirst } from 'lodash-es';
import { queueApi } from 'services/api/endpoints/queue';
import { startAppListening } from '..';
const { toast } = createStandaloneToast({
theme: theme,
defaultOptions: TOAST_OPTIONS.defaultOptions,
});
export const addBatchEnqueuedListener = () => {
export const addBatchEnqueuedListener = (startAppListening: AppStartListening) => {
// success
startAppListening({
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,

View File

@@ -1,3 +1,4 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { resetCanvas } from 'features/canvas/store/canvasSlice';
import { controlAdaptersReset } from 'features/controlAdapters/store/controlAdaptersSlice';
import { getImageUsage } from 'features/deleteImageModal/store/selectors';
@@ -5,9 +6,7 @@ import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
import { clearInitialImage } from 'features/parameters/store/generationSlice';
import { imagesApi } from 'services/api/endpoints/images';
import { startAppListening } from '..';
export const addDeleteBoardAndImagesFulfilledListener = () => {
export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.deleteBoardAndImages.matchFulfilled,
effect: async (action, { dispatch, getState }) => {

View File

@@ -1,12 +1,11 @@
import { isAnyOf } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types';
import { imagesApi } from 'services/api/endpoints/images';
import { imagesSelectors } from 'services/api/util';
import { startAppListening } from '..';
export const addBoardIdSelectedListener = () => {
export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => {
startAppListening({
matcher: isAnyOf(boardIdSelected, galleryViewChanged),
effect: async (action, { getState, dispatch, condition, cancelActiveListeners }) => {

View File

@@ -0,0 +1,114 @@
import type { UseToastOptions } from '@invoke-ai/ui-library';
import { ExternalLink } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { toast } from 'common/util/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
import {
socketBulkDownloadCompleted,
socketBulkDownloadFailed,
socketBulkDownloadStarted,
} from 'services/events/actions';
const log = logger('images');
export const addBulkDownloadListeners = (startAppListening: AppStartListening) => {
startAppListening({
matcher: imagesApi.endpoints.bulkDownloadImages.matchFulfilled,
effect: async (action) => {
log.debug(action.payload, 'Bulk download requested');
// If we have an item name, we are processing the bulk download locally and should use it as the toast id to
// prevent multiple toasts for the same item.
toast({
id: action.payload.bulk_download_item_name ?? undefined,
title: t('gallery.bulkDownloadRequested'),
status: 'success',
// Show the response message if it exists, otherwise show the default message
description: action.payload.response || t('gallery.bulkDownloadRequestedDesc'),
duration: null,
isClosable: true,
});
},
});
startAppListening({
matcher: imagesApi.endpoints.bulkDownloadImages.matchRejected,
effect: async () => {
log.debug('Bulk download request failed');
// There isn't any toast to update if we get this event.
toast({
title: t('gallery.bulkDownloadRequestFailed'),
status: 'success',
isClosable: true,
});
},
});
startAppListening({
actionCreator: socketBulkDownloadStarted,
effect: async (action) => {
// This should always happen immediately after the bulk download request, so we don't need to show a toast here.
log.debug(action.payload.data, 'Bulk download preparation started');
},
});
startAppListening({
actionCreator: socketBulkDownloadCompleted,
effect: async (action) => {
log.debug(action.payload.data, 'Bulk download preparation completed');
const { bulk_download_item_name } = action.payload.data;
// TODO(psyche): This URL may break in in some environments (e.g. Nvidia workbench) but we need to test it first
const url = `/api/v1/images/download/${bulk_download_item_name}`;
const toastOptions: UseToastOptions = {
id: bulk_download_item_name,
title: t('gallery.bulkDownloadReady', 'Download ready'),
status: 'success',
description: (
<ExternalLink
label={t('gallery.clickToDownload', 'Click here to download')}
href={url}
download={bulk_download_item_name}
/>
),
duration: null,
isClosable: true,
};
if (toast.isActive(bulk_download_item_name)) {
toast.update(bulk_download_item_name, toastOptions);
} else {
toast(toastOptions);
}
},
});
startAppListening({
actionCreator: socketBulkDownloadFailed,
effect: async (action) => {
log.debug(action.payload.data, 'Bulk download preparation failed');
const { bulk_download_item_name } = action.payload.data;
const toastOptions: UseToastOptions = {
id: bulk_download_item_name,
title: t('gallery.bulkDownloadFailed'),
status: 'error',
description: action.payload.data.error,
duration: null,
isClosable: true,
};
if (toast.isActive(bulk_download_item_name)) {
toast.update(bulk_download_item_name, toastOptions);
} else {
toast(toastOptions);
}
},
});
};

View File

@@ -1,13 +1,12 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasCopiedToClipboard } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { addToast } from 'features/system/store/systemSlice';
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
import { t } from 'i18next';
import { startAppListening } from '..';
export const addCanvasCopiedToClipboardListener = () => {
export const addCanvasCopiedToClipboardListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasCopiedToClipboard,
effect: async (action, { dispatch, getState }) => {

View File

@@ -1,13 +1,12 @@
import { $logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasDownloadedAsImage } from 'features/canvas/store/actions';
import { downloadBlob } from 'features/canvas/util/downloadBlob';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { addToast } from 'features/system/store/systemSlice';
import { t } from 'i18next';
import { startAppListening } from '..';
export const addCanvasDownloadedAsImageListener = () => {
export const addCanvasDownloadedAsImageListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasDownloadedAsImage,
effect: async (action, { dispatch, getState }) => {

View File

@@ -1,4 +1,5 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasImageToControlAdapter } from 'features/canvas/store/actions';
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
@@ -6,9 +7,7 @@ import { addToast } from 'features/system/store/systemSlice';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
import { startAppListening } from '..';
export const addCanvasImageToControlNetListener = () => {
export const addCanvasImageToControlNetListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasImageToControlAdapter,
effect: async (action, { dispatch, getState }) => {

View File

@@ -1,13 +1,12 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { canvasMaskSavedToGallery } from 'features/canvas/store/actions';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { addToast } from 'features/system/store/systemSlice';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
import { startAppListening } from '..';
export const addCanvasMaskSavedToGalleryListener = () => {
export const addCanvasMaskSavedToGalleryListener = (startAppListening: AppStartListening) => {
startAppListening({
actionCreator: canvasMaskSavedToGallery,
effect: async (action, { dispatch, getState }) => {

Some files were not shown because too many files have changed in this diff Show More