Compare commits

..

198 Commits

Author SHA1 Message Date
psychedelicious
948ecf9333 chore: bump version to v5.5.0 2024-12-20 16:17:23 +11:00
psychedelicious
1038f7bcab Update invokeai_version.py 2024-12-20 10:17:09 +11:00
Riccardo Giovanetti
c7d9e2d62a translationBot(ui): update translation (Italian)
Currently translated at 99.3% (1635 of 1645 strings)

translationBot(ui): update translation (Italian)

Currently translated at 99.3% (1634 of 1645 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-12-20 10:07:15 +11:00
Riku
11c3a2e15d translationBot(ui): update translation (German)
Currently translated at 70.8% (1165 of 1645 strings)

Co-authored-by: Riku <riku.block@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-12-20 10:07:15 +11:00
psychedelicious
9e3ca383ec fix(ui): add missing model config to AnyModelConfig union type 2024-12-20 09:45:04 +11:00
Riku
bda83c2634 chore(ui): update typegen schema 2024-12-20 09:45:04 +11:00
Riku
525cb38c71 fix(app): fixed InputField default values 2024-12-20 09:30:56 +11:00
psychedelicious
a9a6720bad feat(app): change queue item execution log from debug to info
This provides useful context for subsequent logs during queue item execution.
2024-12-20 09:19:04 +11:00
psychedelicious
858bf9cf8c feat(api): less verbose uvicorn logs
Uvicorn's logging is rather verbose. This change adds a `log_level_network` config setting to independently control uvicorn's log outputs. The setting defaults to warning.

The change hides the helpful startup message that says the host and port we are running on.

For example: `Uvicorn running on http://0.0.0.0:9090 (Press CTRL+C to quit`

The ASGI lifespan handler is updated to log an equivalent message on startup, regardless of log level settings.

Besides being helpful, the launcher relies on a message like this to launch the app. So, previously, if the user set their log level to anything above info (e.g. warning or error), the launcher would fail to open the app. This change prevents that edge case.
2024-12-20 09:19:04 +11:00
David Hauptman
74a29c3735 re-format to fix ruff error 2024-12-19 22:33:17 +11:00
David Hauptman
6fc6be3aa0 Fix error message when adding a local path with quotes around the string 2024-12-19 22:33:17 +11:00
Mary Hipp
174ea021a6 lint 2024-12-18 12:48:15 -05:00
Mary Hipp
50b804e087 remove space 2024-12-18 12:48:15 -05:00
Mary Hipp
23270d7dfe update copy again 2024-12-18 12:48:15 -05:00
Mary Hipp
39e6f6d53f update whats new copy for control LOras 2024-12-18 12:48:15 -05:00
Mary Hipp
c154d833b9 raise error if control lora used with schnell 2024-12-18 10:19:28 -05:00
Mary Hipp
899a00af62 fix double filter on slow networks 2024-12-18 08:40:50 -05:00
Hosted Weblate
7c9ecdb362 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2024-12-18 18:05:42 +11:00
Riccardo Giovanetti
4a5255611b translationBot(ui): update translation (Italian)
Currently translated at 99.3% (1634 of 1644 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-12-18 18:05:42 +11:00
Thomas Bolteau
b5b39db304 translationBot(ui): update translation (French)
Currently translated at 97.0% (1595 of 1643 strings)

Co-authored-by: Thomas Bolteau <thomas.bolteau50@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/fr/
Translation: InvokeAI/Web UI
2024-12-18 18:05:42 +11:00
Linos
2cb5743cc5 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (1644 of 1644 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 100.0% (1643 of 1643 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 100.0% (1643 of 1643 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2024-12-18 18:05:42 +11:00
Riku
64ee8d491e translationBot(ui): update translation (German)
Currently translated at 70.3% (1156 of 1643 strings)

Co-authored-by: Riku <riku.block@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-12-18 18:05:42 +11:00
psychedelicious
d70d48de45 chore(ui): update whats new 2024-12-18 17:52:39 +11:00
psychedelicious
3f8636330f chore: bump version to v5.4.4rc1 2024-12-18 17:52:39 +11:00
Mary Hipp
0c2f96daf1 add probe for ControlLoRA x diffusers 2024-12-17 14:01:41 -05:00
Brandon Rising
c9b2cce627 Add diffusers config object for control loras 2024-12-17 14:01:41 -05:00
Mary Hipp
401fb392b8 add FLUX control loras to starter models 2024-12-17 09:29:21 -05:00
Ryan Dick
594511cf4a Add FLUX Control LoRA weight param (#7452)
## Summary

Add the ability to control the weight of a FLUX Control LoRA.

## Example

Original image:
<div style="display: flex; gap: 10px;">
<img
src="https://github.com/user-attachments/assets/4a2d9f4a-b58b-4df6-af90-67b018763a38"
alt="Image 1" width="300"/>
</div>

Prompt: `a scarecrow playing tennis`
Weights: 0.4, 0.6, 0.8, 1.0
<div style="display: flex; gap: 10px;">
<img
src="https://github.com/user-attachments/assets/62b83fd6-46ce-460a-8d51-9c2cda9b05c9"
alt="Image 1" width="300"/>
<img
src="https://github.com/user-attachments/assets/75442207-1538-46bc-9d6b-08ac5c235c93"
alt="Image 2" width="300"/>
</div>
<div style="display: flex; gap: 10px;">
<img
src="https://github.com/user-attachments/assets/4a9dc9ea-9757-4965-837e-197fc9243007"
alt="Image 1" width="300"/>
<img
src="https://github.com/user-attachments/assets/846f6918-ca82-4482-8c19-19172752fa8c"
alt="Image 2" width="300"/>
</div>

## QA Instructions

- [x] weight control changes strength of control image
- [x] Test that results match across both quantized and non-quantized.

## Merge Plan

**_Do not merge this PR yet._**

1. Merge #7450 
2. Merge #7446 
3. Change target branch to main
4. Merge this branch.

## Checklist

- [ ] _The PR has a short but descriptive title, suitable for a
changelog_
- [ ] _Tests added / updated (if applicable)_
- [ ] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2024-12-17 08:46:31 -05:00
psychedelicious
d764aa4a2a fix(ui): ensure only the expected properties are used when converting between control layer adapter settings 2024-12-17 13:36:11 +00:00
psychedelicious
ea34726329 chore(ui): lint 2024-12-17 13:36:11 +00:00
Ryan Dick
9b615e0de7 Fix bugs when switching control layer type. This logic still feels very hacky. 2024-12-17 13:36:11 +00:00
Ryan Dick
a463e97269 Bump FluxControlLoRALoaderInvocation version. 2024-12-17 13:36:10 +00:00
Ryan Dick
b272d46056 Enable ability to control the weight of FLUX Control LoRAs. 2024-12-17 13:36:10 +00:00
Ryan Dick
4d5f74c05b LoRA refactor to enable FLUX control LoRAs w/ quantized tranformers (#7446)
## Summary

This PR refactors the LoRA handling code to enable the use of FLUX
control LoRAs on top of quantized transformers.

Changes:
- Renamed a bunch of the model patching utilities to reflect that they
are not LoRA-specific
- Improved the unit test coverage.
- Refactored the handling of 'sidecar' patch layers to make them work
with more layer patch types. (This was necessary to get FLUX control
LoRAs working on top of quantized models.)
- Removed `ONNXModelPatcher`. It is out-of-date and hasn't been used in
a while.


## QA Instructions

I completed the following tests.

**These should be repeated after changing the target branch to main.**

**Due to the large surface area of this PR, reviewers should do
regression tests on a range of LoRA formats. There is a risk of
regression on a specific format that was missed during the
refactoring.**

- [x] FLUX Control LoRA + full FLUX transformer
- [x] FLUX Control LoRA + BnB NF4 quantized transformer
- [x] FLUX Control LoRA + GGUF quantized transformer
- [x] FLUX Control LoRA + non-control LoRA + full FLUX transformer
- [x] FLUX Contro LoRA + non-control LoRA + BnB quantized transformer
- [x] FLUX Control LoRA + non-control LoRA + GGUF quantized transformer
- Test the following cases for regression:
    - [x] Misc SD1/SDXL LoRA variants (LoRA, LoKr, IA3)
    - [x] FLUX, non-quantized, variety of LoRA formats
    - [x] FLUX, quantized, variety of LoRA formats

## Merge Plan

**_Don't merge this PR yet._**

Merge plan:
1. First merge brandon/flux-tools-loras into main
2. Change the target branch of this PR to main
3. Review / test / merge this PR

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [x] _Tests added / updated (if applicable)_
- [x] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2024-12-17 08:30:50 -05:00
Ryan Dick
dd09509dbd Rename ModelPatcher -> LayerPatcher to avoid conflicts with another ModelPatcher definition. 2024-12-17 13:20:19 +00:00
Ryan Dick
7fad4c9491 Rename LoRAModelRaw to ModelPatchRaw. 2024-12-17 13:20:19 +00:00
Ryan Dick
b820862eab Rename ModelPatcher methods to reflect that they are general model patching methods and are not LoRA-specific. 2024-12-17 13:20:19 +00:00
Ryan Dick
c604a0956e Rename LoRAPatcher -> ModelPatcher. 2024-12-17 13:20:19 +00:00
Ryan Dick
9369b39a12 Add GGMLTensor op. 2024-12-17 13:20:19 +00:00
Ryan Dick
80f64abd1e Use a FluxControlLoRALayer when loading FLUX control LoRAs. 2024-12-17 13:20:19 +00:00
Ryan Dick
37e3089457 Push LoRA layer reshaping down into the patch layers and add a new FluxControlLoRALayer type. 2024-12-17 13:20:19 +00:00
Ryan Dick
fe09f2d27a Move handling of LoRA scale and patch weight down into the layer patch classes. 2024-12-17 13:20:19 +00:00
Ryan Dick
e7e3f7e144 Ensure that patches are on the correct device when used in sidecar wrappers. 2024-12-17 13:20:19 +00:00
Ryan Dick
606d58d7db Add sidecar wrapper for FLUX RMSNorm layers to support SetParameterLayers used by FLUX structural control LoRAs. 2024-12-17 13:20:19 +00:00
Ryan Dick
c76a448846 Delete old sidecar_layers/ dir. 2024-12-17 13:20:19 +00:00
Ryan Dick
46133b5656 Switch LoRAPatcher to use the new sidecar_wrappers/ rather than sidecar_layers/. 2024-12-17 13:20:19 +00:00
Ryan Dick
ac28370fd2 Break up functions in LoRAPatcher in preparation for more refactoring. 2024-12-17 13:20:19 +00:00
Ryan Dick
1e0552c813 Add optimized implementations for the LinearSidecarWrapper when using LoRALayer or ConcatenatedLoRALayer patch types (since these are the most common). 2024-12-17 13:20:19 +00:00
Ryan Dick
e2451ef5ca A unit tests for LinearSidecarWrapper (and fix a bug). 2024-12-17 13:20:19 +00:00
Ryan Dick
443d838fd0 Add initial basic implementation of sidecar wrappers. 2024-12-17 13:20:19 +00:00
Ryan Dick
3a8a5442ea Add basic unit tests for SetParameterLayer. 2024-12-17 13:20:19 +00:00
Ryan Dick
808e3770d3 Remove AnyLoRALayer type definition in favor of using BaseLayerPatch base class. 2024-12-17 13:20:19 +00:00
Ryan Dick
2b441d6a2d Add BaseLayerPatch ABC to clarify the intended patch interface. 2024-12-17 13:20:19 +00:00
Ryan Dick
58de93a89e Delete empty file. 2024-12-17 13:20:19 +00:00
Ryan Dick
1eede4315e Delete ONNXModelPatcher. It is outdated and hasn't been used for a long time. 2024-12-17 13:20:19 +00:00
Ryan Dick
8ea697d733 Mark LoRALayerBase.rank(...) as a private method. 2024-12-17 13:20:19 +00:00
Ryan Dick
693d42661c Add basic unit tests for LoRALayer. 2024-12-17 13:20:19 +00:00
Ryan Dick
41664f88db Rename backend/patches/conversions/ to backend/patches/lora_conversions/ 2024-12-17 13:20:19 +00:00
Ryan Dick
42f8d6aa11 Rename backend/lora/ to backend/patches 2024-12-17 13:20:19 +00:00
psychedelicious
5f41a69665 feat(ui): prevent invoking when >1 control lora enabled 2024-12-17 07:28:45 -05:00
Ryan Dick
7da90a9b6b Ensure that model probe does not crash with integer state dict keys. 2024-12-17 07:28:45 -05:00
Ryan Dick
440185cc40 Simplify FLUX control LoRA probing. 2024-12-17 07:28:45 -05:00
Ryan Dick
26edc71268 ruff format 2024-12-17 07:28:45 -05:00
Ryan Dick
a4bed7aee3 Minor tidy of FLUX control LoRA implementation. (mostly documentation) 2024-12-17 07:28:45 -05:00
Ryan Dick
5fcd76a712 Fix frontend FLUX graph construction for FLUX control LoRAs. 2024-12-17 07:28:45 -05:00
Mary Hipp
516ffa641c add logic to change type to control_lora properly 2024-12-17 07:28:45 -05:00
Ryan Dick
d84adfd39f Clean up FLUX control LoRA pre-processing logic. 2024-12-17 07:28:45 -05:00
Ryan Dick
ac82f73dbe Make FluxControlLoRALoaderOutput.control_lora non-optional. 2024-12-17 07:28:45 -05:00
Brandon Rising
70811d0bd0 Remove unexpected artifacts in output images 2024-12-17 07:28:45 -05:00
Mary Hipp
e0344a302c feat(ui): update FLUX graph building to include control layers with control loras 2024-12-17 07:28:45 -05:00
Mary Hipp
92b0d89b70 (ui): replace logic for controlnet/t2i to include control_loras and display default settings in model manager 2024-12-17 07:28:45 -05:00
Mary Hipp
da213e4638 feat(ui): add control loras to control adapter model options, add default settings for preprocessor in probe 2024-12-17 07:28:45 -05:00
Brandon Rising
246b59f148 Run pnpm fix, regenerate schema 2024-12-17 07:28:45 -05:00
Brandon Rising
046d19446c Rename Structural Lora to Control Lora 2024-12-17 07:28:45 -05:00
Ryan Dick
040551d4fb Fixes to get FLUX Control LoRA working. 2024-12-17 07:28:45 -05:00
Brandon Rising
f53da60b84 Lots of updates centered around using the lora patcher rather than changing the modules in the transformer model 2024-12-17 07:28:45 -05:00
Brandon Rising
5a035dd19f Support bnb quantized nf4 flux models, Use controlnet vae, only support 1 structural lora per transformer. various other refractors and bugfixes 2024-12-17 07:28:45 -05:00
Brandon Rising
f3b253987f Initial setup for flux tools control loras 2024-12-17 07:28:45 -05:00
psychedelicious
25ff7918e8 chore(ui): knip 2024-12-16 18:57:43 -08:00
psychedelicious
09fc60acb0 feat(ui): show toasts when filter, transform, select or crop fails 2024-12-16 18:57:43 -08:00
psychedelicious
6f55f2c723 refactor(ui): simpler handling for graph building in enqueuerequested listener 2024-12-16 18:57:43 -08:00
psychedelicious
03b815c884 feat(uI): improved error handling for generation mode calcuation
Wrap logic that might throw in a result and handle log it if it errors before throwing.
2024-12-16 18:57:43 -08:00
psychedelicious
9cecdd17eb feat(uI): improved error handling when getting composite canvas images
Wrap logic that might throw in a result and handle log it if it errors before throwing.
2024-12-16 18:57:43 -08:00
psychedelicious
6b0f7ab57c feat(uI): improved error handling during rasterization
- Ensure the currently-rasterizing adapter is reset to `null` on success or failure of a rasterization operation. In case of failure, this prevents the UI from getting stuck with a disabled Invoke button and tooltip message "Canvas is busy (rasterizing)".
- Log the error if there is one.
2024-12-16 18:57:43 -08:00
psychedelicious
c805e38da2 fix(ui): remove duplicate log on socket connect 2024-12-16 18:57:43 -08:00
psychedelicious
2c1de0f07d fix(ui): missing translation string 2024-12-12 22:44:43 -08:00
psychedelicious
261d5ab488 docs: add redirect for patchmatch docs
The patchmatch lib links directly to our docs: https://invoke-ai.github.io/InvokeAI/installation/060_INSTALL_PATCHMATCH/

That URL doesn't exist any more. Added a redirect to the new URL.
2024-12-12 22:41:05 -08:00
Mary Hipp
ca571cd7a9 swap global and regional 2024-12-12 15:53:18 -05:00
Eugene Brodsky
4c94d41fa9 (chore) ruff format 2024-12-04 17:02:08 +00:00
Eugene Brodsky
4036244ee9 (app) clarify log message when migrating old .cache 2024-12-04 17:02:08 +00:00
Eugene Brodsky
d06232d9ba (config) ensure legacy model configs and node template are writable by the user even if the source files are read-only 2024-12-04 17:02:08 +00:00
Eugene Brodsky
bacbdfb8fc (docker) add comments in docker-entrypoint.sh and ensure variables are not null in bash expansion 2024-12-04 17:02:08 +00:00
Eugene Brodsky
59f42f4682 (pkg) reduce max supported python version as we have not yet tested 3.12 well enough 2024-12-04 17:02:08 +00:00
Eugene Brodsky
a636ac2899 (docker) use 'uv' to manage python installation and the invoke dependencies, since Ubuntu 24.04 comes with Python 3.12 which we do not yet support 2024-12-04 17:02:08 +00:00
Richard Lyons
bd478360d9 Upgrade docker build to ubuntu 24 2024-12-04 17:02:08 +00:00
Richard Lyons
ac0db07649 Fix docker deployment 2024-12-04 17:02:08 +00:00
psychedelicious
b7132ce9e7 fix(ui): capitalization for vietnamese language 2024-12-03 14:52:28 -08:00
psychedelicious
90f30e7748 chore: bump version to v5.4.3 2024-12-03 14:50:09 -08:00
Riccardo Giovanetti
6b86a66bc7 translationBot(ui): update translation (Italian)
Currently translated at 99.3% (1633 of 1643 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-12-03 13:16:12 -08:00
Linos
aa97e626e9 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (1643 of 1643 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 99.8% (1641 of 1643 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2024-12-03 13:13:26 -08:00
Ryan Dick
c90736093f Revert FLUX performance improvement that fails on MacOS (#7423)
## Summary

https://github.com/invoke-ai/InvokeAI/issues/7422

As reported in the above ticket, a recent FLUX performance improvement
caused a regression on MacOS. This PR reverts the offending part of the
change.

## Related Issues / Discussions

- Closes #7422 
- Original perf improvement:
https://github.com/invoke-ai/InvokeAI/pull/7399

## QA Instructions

I don't have a Mac capable of running this test, so trusting the report
in #7422 that this fixes the problem.

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [x] _Tests added / updated (if applicable)_
- [x] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2024-12-03 10:58:00 -05:00
Ryan Dick
0bff4ace1b Revert performance improvement, because it caused flux inference to fail on Mac: https://github.com/invoke-ai/InvokeAI/issues/7422 2024-12-03 15:18:58 +00:00
psychedelicious
5eb382074e tweak(ui): slightly clearer logic for skipping regional guidance 2024-12-02 23:46:21 -05:00
psychedelicious
46aa930526 fix(ui): skip disabled ref images 2024-12-02 23:46:21 -05:00
psychedelicious
3305bad0c2 fix(app): queue item id check before setting cancel flag should use != instead of is not
The `is` operator compares references, not values. Thanks to a wonderfully unintuitive quirk of python, `is` works on integers from `-5` to `256`, inclusive.

Whenever integers in this range are used for a value, internally python returns a reference to a stable object in memory. When integers outside this range are used as a value, python creates a new object in memory for that integer.

See `PyLong_FromLong` documentation here: https://docs.python.org/3/c-api/long.html

Tying this back to our session processor, we were using `is` to compare the queue item ids for equality. Our queue item ids start at 0, and each queue item created increments this by one. So this comparison works only for the first 256 queue items on the machine.

Starting with the 257th queue item, the comparison starts returning `False`, and cancelation gets weird.

Easy fix - use `!=` instead of `is not`.
2024-12-02 23:22:58 -05:00
psychedelicious
13703d8f55 chore: bump version to v5.4.3rc2 2024-12-02 15:02:30 -08:00
psychedelicious
60d838d0a5 chore(ui): update whats new copy 2024-12-02 15:02:30 -08:00
Riccardo Giovanetti
2a157a44bf translationBot(ui): update translation (Italian)
Currently translated at 99.3% (1633 of 1643 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-12-02 14:52:05 -08:00
James Reynolds
d61b5833c2 Fix documentation broken links and remove whitespace at end of lines 2024-12-02 14:49:53 -08:00
Jonathan
c094838c6a Update model_util.py 2024-12-02 14:35:02 -08:00
Hosted Weblate
2d334c8dd8 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2024-12-02 14:05:51 -08:00
Mary Hipp
a6be26e174 fix(worker): only apply processor cancel logic if cancel event is for current queue item 2024-12-02 14:03:05 -08:00
psychedelicious
f8c7adddd0 feat(ui): add vietnamese to language picker
Closes #7384
2024-12-02 08:12:14 -05:00
psychedelicious
17da1d92e9 fix(ui): remove "adding to" text on Invoke tooltip on Workflows/Upscaling tabs
The "adding to" text indicates if images are going to the gallery or staging area. This info is relevant only to the canvas tab, but was displayed on Upscaling and Workflows tabs. Removed it from those tabs.
2024-12-02 08:08:16 -05:00
psychedelicious
1cc57a4854 chore(ui): lint 2024-12-02 07:59:12 -05:00
psychedelicious
3993fae331 fix(ui): unable to invoke w/ empty inpaint mask or raster layer
Removed the empty state checks for these layer types - it's always OK to invoke when they are empty.
2024-12-02 07:59:12 -05:00
psychedelicious
1446526d55 tidy(ui): translation keys for canvas layer warnings 2024-12-02 07:59:12 -05:00
psychedelicious
62c024e725 feat(ui): add gallery image ctx menu items to create ref image from image
Appears these actions disappeared at some point. Restoring them.
2024-12-02 07:52:58 -05:00
psychedelicious
1e92bb4e94 fix(ui): ref image defaults to prev ref image's image selection
A redux selector is used to get the "default" IP Adapter. The selector uses the model list query result to select an IP Adapter model to be preset by default.

The selector is memoized, so if we mutate the returned default IP Adapter state, it mutates the result of the selector for all consumers.

For example, the `image` property of the default IP Adapter selector result is `null`. When we set the `image` property of the selector result while creating an IP Adapter, this does not trigger the selector to recompute its result. We end up setting the image for the selector result directly, and all other consumers now have that same image set.

Solution - we need to clone the selector result everywhere it is used. This was missed in a few spots, causing the issue.
2024-12-02 07:48:39 -05:00
psychedelicious
db6398fdf6 feat(ui): less confusing empty state for rg ref images
It was easy to misunderstand the empty state for a regional guidance reference image. There was no label, so it seemed like it was the whole region that was empty.

This small change adds the "Reference Image" heading to the empty state, so it's clear that the empty state messaging refers to this reference image, not the whole regional guidance layer.
2024-12-02 07:46:10 -05:00
Riccardo Giovanetti
ebd73a2ac2 translationBot(ui): update translation (Italian)
Currently translated at 98.7% (1622 of 1643 strings)

Co-authored-by: Riccardo Giovanetti <riccardo.giovanetti@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/it/
Translation: InvokeAI/Web UI
2024-12-02 02:13:51 -08:00
Hosted Weblate
8ee95cab00 translationBot(ui): update translation files
Updated by "Cleanup translation files" hook in Weblate.

Co-authored-by: Hosted Weblate <hosted@weblate.org>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/
Translation: InvokeAI/Web UI
2024-12-02 02:13:51 -08:00
Linos
d1184201a8 translationBot(ui): update translation (Vietnamese)
Currently translated at 100.0% (1643 of 1643 strings)

translationBot(ui): update translation (Vietnamese)

Currently translated at 100.0% (1638 of 1638 strings)

Co-authored-by: Linos <linos.coding@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/vi/
Translation: InvokeAI/Web UI
2024-12-02 02:13:51 -08:00
Nik Nikovsky
5887891654 translationBot(ui): update translation (Polish)
Currently translated at 4.9% (81 of 1638 strings)

Co-authored-by: Nik Nikovsky <zejdzztegomaila@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/pl/
Translation: InvokeAI/Web UI
2024-12-02 02:13:51 -08:00
Riku
765ca4e004 translationBot(ui): update translation (German)
Currently translated at 69.7% (1142 of 1638 strings)

Co-authored-by: Riku <riku.block@gmail.com>
Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/de/
Translation: InvokeAI/Web UI
2024-12-02 02:13:51 -08:00
Riku
159b00a490 fix(app): adjust session queue api type 2024-12-01 20:06:05 -08:00
Riku
3fbf6f2d2a chore(ui): update typegen schema 2024-12-01 19:56:09 -08:00
Riku
931fca7cd1 fix(ui): call cancel instead of clear queue 2024-12-01 19:53:12 -08:00
Riku
db84a3a5d4 refactor(ui): move clear queue hook to separate file 2024-12-01 19:42:25 -08:00
psychedelicious
ca8313e805 feat(ui): add new layer from image menu items for staging area
The layers are disabled when created so as to not interfere with the canvas state.
2024-12-01 19:37:49 -08:00
psychedelicious
df849035ee feat(ui): allow setting isEnabled, isLocked and name in createNewCanvasEntityFromImage util 2024-12-01 19:37:49 -08:00
psychedelicious
8d97fe69ca feat(ui): use imageDTOToFile in staging area save to gallery button 2024-12-01 19:37:49 -08:00
psychedelicious
9044e53a9b feat(ui): add imageDTOToFile util 2024-12-01 19:37:49 -08:00
Jonathan
6012b0f912 Update flux_text_encoder.py
Updated version number for FLUX Text Encoding.
2024-11-30 08:29:21 -05:00
Jonathan
bb0ed5dc8a Update flux_denoise.py
Updated node version for FLUX Denoise.
2024-11-30 08:29:21 -05:00
Ryan Dick
021552fd81 Avoid unnecessary dtype conversions with rope encodings. 2024-11-29 12:32:50 -05:00
Ryan Dick
be73dbba92 Use view() instead of rearrange() for better performance. 2024-11-29 12:32:50 -05:00
Ryan Dick
db9c0cad7c Replace custom RMSNorm implementation with torch.nn.functional.rms_norm(...) for improved speed. 2024-11-29 12:32:50 -05:00
Ryan Dick
54b7f9a063 FLUX Regional Prompting (#7388)
## Summary

This PR adds support for regional prompting with FLUX.

### Example 1
Global prompt: `An architecture rendering of the reception area of a
corporate office with modern decor.`
<img width="1386" alt="image"
src="https://github.com/user-attachments/assets/c8169bdb-49a9-44bc-bd9e-58d98e09094b">

![image](https://github.com/user-attachments/assets/4a426be9-9d7a-4527-b27c-2d2514ee73fe)

## QA Instructions

- [x] Test that there is no slowdown in the base case with a single
global prompt.
- [x] Test image fully covered by regional masks.
- [x] Test image covered by region masks with small gaps.
- [x] Test region masks with large unmasked ‘background’ regions
- [x] Test region masks with significant overlap
- [x] Test multiple global prompts.
- [x] Test no global prompt.
- [x] Test regional negative prompts (It runs... but results are not
great. Needs more tuning to be useful.)
- Test compatibility with:
    - [x] ControlNet
    - [x] LoRA
    - [x] IP-Adapter

## Remaining TODO

- [x] Disable the following UI features for FLUX prompt regions:
negative prompts, reference images, auto-negative.

## Checklist

- [x] _The PR has a short but descriptive title, suitable for a
changelog_
- [x] _Tests added / updated (if applicable)_
- [x] _Documentation added / updated (if applicable)_
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
2024-11-29 08:56:42 -05:00
psychedelicious
7d488a5352 feat(ui): add delete button to regional ref image empty state 2024-11-29 15:51:24 +10:00
psychedelicious
4d7667f63d fix(ui): add missing translations 2024-11-29 15:43:49 +10:00
psychedelicious
08704ee8ec feat(ui): use canvas layer validators in control/ip adapter graph builders 2024-11-29 15:32:48 +10:00
psychedelicious
5910892c33 Merge remote-tracking branch 'origin/main' into ryan/flux-regional-prompting 2024-11-29 15:19:39 +10:00
psychedelicious
46a09d9e90 feat(ui): format warnings tooltip 2024-11-29 13:32:51 +10:00
psychedelicious
df0c7d73f3 feat(ui): use regional guidance validation utils in graph builders 2024-11-29 13:26:09 +10:00
psychedelicious
3905c97e32 feat(ui): return translation keys from validation utils instead of translated strings 2024-11-29 13:25:09 +10:00
psychedelicious
0be796a808 feat(ui): use layer validation utils in invoke readiness utils 2024-11-29 13:14:26 +10:00
psychedelicious
7dd33b0f39 feat(ui): add indicator to canvas layer headers, displaying validation warnings
If there are any issues with the layer, the icon is displayed. If the layer is disabled, the icon is greyed out but still visible.
2024-11-29 13:13:47 +10:00
psychedelicious
484aaf1595 feat(ui): add canvas layer validation utils
These helpers consolidate layer validation checks. For example, checking that the layer has content drawn, is compatible with the selected main model, has valid reference images, etc.
2024-11-29 13:12:32 +10:00
psychedelicious
c276b60af9 tidy(ui): use object for addRegions graph builder util arg 2024-11-29 08:49:41 +10:00
Ryan Dick
5d8dd6e26e Fix FLUX regional negative prompts. 2024-11-28 18:49:29 +00:00
Emmanuel Ferdman
5bca68d873 docs: update code of conduct reference
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
2024-11-27 17:38:33 -08:00
Ryan Dick
64364e7911 Short-circuit if there are no region masks in FLUX and don't apply attention masking. 2024-11-27 22:40:10 +00:00
Ryan Dick
6565cea039 Comment unused _prepare_unrestricted_attn_mask(...) for future reference. 2024-11-27 22:16:44 +00:00
Ryan Dick
3ebd8d6c07 Delete outdated TODO comment. 2024-11-27 22:13:25 +00:00
Ryan Dick
e970185161 Tweak flux regional prompting attention scheme based on latest experimentation. 2024-11-27 22:13:07 +00:00
Ryan Dick
fa5653cdf7 Remove unused 'denoise' param to addRegions(). 2024-11-27 17:08:42 +00:00
Ryan Dick
9a7b000995 Update frontend to support regional prompts with FLUX in the canvas. 2024-11-27 17:04:43 +00:00
Ryan Dick
3a27242838 Bump transformers. The main motivation for this bump is to ingest a fix for DepthAnything postprocessing artifacts. 2024-11-27 07:46:16 -08:00
Ryan Dick
8cfb032051 Add utility ImagePanelLayoutInvocation for working with In-Context LoRA workflows. 2024-11-26 20:58:31 -08:00
Ryan Dick
06a9d4e2b2 Use a Textarea component for the FluxTextEncoderInvocation prompt field. 2024-11-26 20:58:31 -08:00
Brandon Rising
ed46acee79 fix: Fail scan on InvalidMagicError in picklescan, update default for read_checkpoint_meta to scan unless explicitly told not to 2024-11-26 16:17:12 -05:00
Ryan Dick
b54463d294 Allow regional prompting background regions to attend to themselves and to the entire txt embedding. 2024-11-26 17:57:31 +00:00
Ryan Dick
faee79dc95 Distinguish between restricted and unrestricted attn masks in FLUX regional prompting. 2024-11-26 16:55:52 +00:00
Mary Hipp
965cd76e33 lint fix 2024-11-26 11:25:53 -05:00
Mary Hipp
e5e8cbf34c shorten reference image mode descriptions; 2024-11-26 11:25:53 -05:00
Mary Hipp
3412a52594 (ui): updates various informational tooltips, adds descriptons to IP adapter method options 2024-11-26 11:25:53 -05:00
Ryan Dick
e01f66b026 Apply regional attention masks in the single stream blocks in addition to the double stream blocks. 2024-11-25 22:40:08 +00:00
Ryan Dick
53abdde242 Update Flux RegionalPromptingExtension to prepare both a mask with restricted image self-attention and a mask with unrestricted image self attention. 2024-11-25 22:04:23 +00:00
Ryan Dick
94c088300f Be smarter about selecting the global CLIP embedding for FLUX regional prompting. 2024-11-25 20:15:04 +00:00
Ryan Dick
3741a6f5e0 Fix device handling for regional masks and apply the attention mask in the FLUX double stream block. 2024-11-25 16:02:03 +00:00
Kent Keirsey
059336258f Create SECURITY.md 2024-11-25 04:10:03 -08:00
Ryan Dick
2c23b8414c Use a single global CLIP embedding for FLUX regional guidance. 2024-11-22 23:01:43 +00:00
Mary Hipp
271cc52c80 fix(ui): use token for download if its in store 2024-11-22 12:08:05 -05:00
Ryan Dick
20356c0746 Fixup the logic for preparing FLUX regional prompt attention masks. 2024-11-21 22:46:25 +00:00
psychedelicious
e44458609f chore: bump version to v5.4.3rc1 2024-11-21 10:32:43 -08:00
psychedelicious
69d86a7696 feat(ui): address feedback 2024-11-21 09:54:35 -08:00
Hippalectryon
56db1a9292 Use proxyrect and setEntityPosition to sync transformer position 2024-11-21 09:54:35 -08:00
Hippalectryon
cf50e5eeee Make sure the canvas is focused 2024-11-21 09:54:35 -08:00
Hippalectryon
c9c07968d2 lint 2024-11-21 09:54:35 -08:00
Hippalectryon
97d0757176 use $isInteractable instead of $isDisabled 2024-11-21 09:54:35 -08:00
Hippalectryon
0f51b677a9 refactor 2024-11-21 09:54:35 -08:00
Hippalectryon
56ca94c3a9 Don't move if the layer is disabled
Lint
2024-11-21 09:54:35 -08:00
Hippalectryon
28d169f859 Allow moving layers using the keyboard 2024-11-21 09:54:35 -08:00
psychedelicious
92f71d99ee tweak(ui): use X icon for rg ref image delete button 2024-11-21 08:50:39 -08:00
psychedelicious
0764c02b1d tweak(ui): code style 2024-11-21 08:50:39 -08:00
psychedelicious
081c7569fe feat(ui): add global ref image empty state 2024-11-21 08:50:39 -08:00
psychedelicious
20f6532ee8 feat(ui): add empty state for regional guidance ref image 2024-11-21 08:50:39 -08:00
Mary Hipp
b9e8910478 feat(ui): add actions for video modal clicks 2024-11-21 11:15:55 -05:00
Mary Hipp
ded8391e3c use nanostore for schema parsed instead 2024-11-20 20:13:31 -05:00
Mary Hipp
e9dd2c396a limit to one hook 2024-11-20 20:13:31 -05:00
Mary Hipp
0d86de0cb5 fix(ui): make sure schema has loaded before trying to load any workflows 2024-11-20 20:13:31 -05:00
Ryan Dick
bad1149504 WIP - add rough logic for preparing the FLUX regional prompting attention mask. 2024-11-20 22:29:36 +00:00
Ryan Dick
fda7aaa7ca Pass RegionalPromptingExtension down to the CustomDoubleStreamBlockProcessor in FLUX. 2024-11-20 19:48:04 +00:00
Ryan Dick
85c616fa34 WIP - Pass prompt masks to FLUX model during denoising. 2024-11-20 18:51:43 +00:00
psychedelicious
549f4e9794 feat(ui): set default infill method to lama 2024-11-20 11:19:17 -05:00
psychedelicious
ef8ededd2f fix(ui): disable width and height output on image batch output
There's a technical challenge with outputting these values directly. `ImageField` does not store them, so the batch's `ImageField` collection does not have width and height for each image.

In order to set up the batch and pass along width and height for each image, we'd need to make a network request for each image when the user clicks Invoke. It would often be cached, but this will eventually create a scaling issue and poor user experience.

As a very simple workaround, users can output the batch image output into an `Image Primitive` node to access the width and height.

This change is implemented by adding some simple special handling when parsing the output fields for the `image_batch` node.

I'll keep this situation in mind when extending the batching system to other field types.
2024-11-20 11:16:54 -05:00
Mary Hipp
1948ffe106 make sure Soft Edge Detection has preprocessor applied 2024-11-20 08:46:02 -05:00
200 changed files with 6022 additions and 1669 deletions

14
SECURITY.md Normal file
View File

@@ -0,0 +1,14 @@
# Security Policy
## Supported Versions
Only the latest version of Invoke will receive security updates.
We do not currently maintain multiple versions of the application with updates.
## Reporting a Vulnerability
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
At this time, we do not maintain a formal bug bounty program.
You can also share identified security issues with our team on huntr.com

View File

@@ -2,29 +2,42 @@
## Builder stage
FROM library/ubuntu:23.04 AS builder
FROM library/ubuntu:24.04 AS builder
ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt-get install -y \
git \
python3-venv \
python3-pip \
build-essential
build-essential \
git
ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai
# Install `uv` for package management
COPY --from=ghcr.io/astral-sh/uv:0.5.5 /uv /uvx /bin/
ENV VIRTUAL_ENV=/opt/venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ENV INVOKEAI_SRC=/opt/invokeai
ENV PYTHON_VERSION=3.11
ENV UV_COMPILE_BYTECODE=1
ENV UV_LINK_MODE=copy
ARG GPU_DRIVER=cuda
ARG TARGETPLATFORM="linux/amd64"
# unused but available
ARG BUILDPLATFORM
WORKDIR ${INVOKEAI_SRC}
# Switch to the `ubuntu` user to work around dependency issues with uv-installed python
RUN mkdir -p ${VIRTUAL_ENV} && \
mkdir -p ${INVOKEAI_SRC} && \
chmod -R a+w /opt
USER ubuntu
# Install python and create the venv
RUN uv python install ${PYTHON_VERSION} && \
uv venv --relocatable --prompt "invoke" --python ${PYTHON_VERSION} ${VIRTUAL_ENV}
WORKDIR ${INVOKEAI_SRC}
COPY invokeai ./invokeai
COPY pyproject.toml ./
@@ -32,25 +45,18 @@ COPY pyproject.toml ./
# the local working copy can be bind-mounted into the image
# at path defined by ${INVOKEAI_SRC}
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
# x86_64/CUDA is default
RUN --mount=type=cache,target=/root/.cache/pip \
python3 -m venv ${VIRTUAL_ENV} &&\
# x86_64/CUDA is the default
RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
else \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
fi &&\
fi && \
uv pip install --python ${PYTHON_VERSION} $extra_index_url_arg -e "."
# xformers + triton fails to install on arm64
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
pip install $extra_index_url_arg -e ".[xformers]"; \
else \
pip install $extra_index_url_arg -e "."; \
fi
# #### Build the Web UI ------------------------------------
#### Build the Web UI ------------------------------------
FROM node:20-slim AS web-builder
ENV PNPM_HOME="/pnpm"
@@ -66,7 +72,7 @@ RUN npx vite build
#### Runtime stage ---------------------------------------
FROM library/ubuntu:23.04 AS runtime
FROM library/ubuntu:24.04 AS runtime
ARG DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
@@ -83,17 +89,16 @@ RUN apt update && apt install -y --no-install-recommends \
gosu \
magic-wormhole \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
libgl1 \
libglx-mesa0 \
build-essential \
libopencv-dev \
libstdc++-10-dev &&\
apt-get clean && apt-get autoclean
ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai
ENV VIRTUAL_ENV=/opt/venv
ENV PYTHON_VERSION=3.11
ENV INVOKEAI_ROOT=/invokeai
ENV INVOKEAI_HOST=0.0.0.0
ENV INVOKEAI_PORT=9090
@@ -101,6 +106,14 @@ ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
# Install `uv` for package management
# and install python for the ubuntu user (expected to exist on ubuntu >=24.x)
# this is too tiny to optimize with multi-stage builds, but maybe we'll come back to it
COPY --from=ghcr.io/astral-sh/uv:0.5.5 /uv /uvx /bin/
USER ubuntu
RUN uv python install ${PYTHON_VERSION}
USER root
# --link requires buldkit w/ dockerfile syntax 1.4
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
@@ -115,7 +128,7 @@ WORKDIR ${INVOKEAI_SRC}
# build patchmatch
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
RUN python3 -c "from patchmatch import patch_match"
RUN python -c "from patchmatch import patch_match"
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}

View File

@@ -16,6 +16,9 @@ set -e -o pipefail
USER_ID=${CONTAINER_UID:-1000}
USER=ubuntu
# if the user does not exist, create it. It is expected to be present on ubuntu >=24.x
_=$(id ${USER} 2>&1) || useradd -u ${USER_ID} ${USER}
# ensure the UID is correct
usermod -u ${USER_ID} ${USER} 1>/dev/null
### Set the $PUBLIC_KEY env var to enable SSH access.
@@ -36,6 +39,8 @@ fi
mkdir -p "${INVOKEAI_ROOT}"
chown --recursive ${USER} "${INVOKEAI_ROOT}" || true
cd "${INVOKEAI_ROOT}"
export HF_HOME=${HF_HOME:-$INVOKEAI_ROOT/.cache/huggingface}
export MPLCONFIGDIR=${MPLCONFIGDIR:-$INVOKEAI_ROOT/.matplotlib}
# Run the CMD as the Container User (not root).
exec gosu ${USER} "$@"

View File

@@ -50,7 +50,7 @@ Applications are built on top of the invoke framework. They should construct `in
### Web UI
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/invokeai/frontend` and the backend code is found in `/invokeai/app/api_app.py` and `/invokeai/app/api/`. The code is further organized as such:
| Component | Description |
| --- | --- |
@@ -62,7 +62,7 @@ The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.t
### CLI
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/invokeai/frontend/cli`.
## Invoke
@@ -70,7 +70,7 @@ The Invoke framework provides the interface to the underlying AI systems and is
### Invoker
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
The invoker (`/invokeai/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
- **invocation services**, which are used by invocations to interact with core functionality.
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
@@ -82,12 +82,12 @@ The session graph does not support looping. This is left as an application probl
### Invocations
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/invokeai/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
### Services
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/invokeai/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
## AI Core
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
The AI Core is represented by the rest of the code base (i.e. the code outside of `/invokeai/app/`).

View File

@@ -287,8 +287,8 @@ new Invocation ready to be used.
Once you've created a Node, the next step is to share it with the community! The
best way to do this is to submit a Pull Request to add the Node to the
[Community Nodes](nodes/communityNodes) list. If you're not sure how to do that,
take a look a at our [contributing nodes overview](contributingNodes).
[Community Nodes](../nodes/communityNodes.md) list. If you're not sure how to do that,
take a look a at our [contributing nodes overview](../nodes/contributingNodes.md).
## Advanced

View File

@@ -9,20 +9,20 @@ model. These are the:
configuration information. Among other things, the record service
tracks the type of the model, its provenance, and where it can be
found on disk.
* _ModelInstallServiceBase_ A service for installing models to
disk. It uses `DownloadQueueServiceBase` to download models and
their metadata, and `ModelRecordServiceBase` to store that
information. It is also responsible for managing the InvokeAI
`models` directory and its contents.
* _DownloadQueueServiceBase_
A multithreaded downloader responsible
for downloading models from a remote source to disk. The download
queue has special methods for downloading repo_id folders from
Hugging Face, as well as discriminating among model versions in
Civitai, but can be used for arbitrary content.
* _ModelLoadServiceBase_
Responsible for loading a model from disk
into RAM and VRAM and getting it ready for inference.
@@ -207,9 +207,9 @@ for use in the InvokeAI web server. Its signature is:
```
def open(
cls,
config: InvokeAIAppConfig,
conn: Optional[sqlite3.Connection] = None,
cls,
config: InvokeAIAppConfig,
conn: Optional[sqlite3.Connection] = None,
lock: Optional[threading.Lock] = None
) -> Union[ModelRecordServiceSQL, ModelRecordServiceFile]:
```
@@ -363,7 +363,7 @@ functionality:
* Registering a model config record for a model already located on the
local filesystem, without moving it or changing its path.
* Installing a model alreadiy located on the local filesystem, by
moving it into the InvokeAI root directory under the
`models` folder (or wherever config parameter `models_dir`
@@ -371,21 +371,21 @@ functionality:
* Probing of models to determine their type, base type and other key
information.
* Interface with the InvokeAI event bus to provide status updates on
the download, installation and registration process.
* Downloading a model from an arbitrary URL and installing it in
`models_dir`.
* Special handling for HuggingFace repo_ids to recursively download
the contents of the repository, paying attention to alternative
variants such as fp16.
* Saving tags and other metadata about the model into the invokeai database
when fetching from a repo that provides that type of information,
(currently only HuggingFace).
### Initializing the installer
A default installer is created at InvokeAI api startup time and stored
@@ -461,7 +461,7 @@ revision.
`config` is an optional dict of values that will override the
autoprobed values for model type, base, scheduler prediction type, and
so forth. See [Model configuration and
probing](#Model-configuration-and-probing) for details.
probing](#model-configuration-and-probing) for details.
`access_token` is an optional access token for accessing resources
that need authentication.
@@ -494,7 +494,7 @@ source8 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
for source in [source1, source2, source3, source4, source5, source6, source7]:
install_job = installer.install_model(source)
source2job = installer.wait_for_installs(timeout=120)
for source in sources:
job = source2job[source]
@@ -504,7 +504,7 @@ for source in sources:
print(f"{source} installed as {model_key}")
elif job.errored:
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
```
As shown here, the `import_model()` method accepts a variety of

View File

@@ -1,6 +1,6 @@
# InvokeAI Backend Tests
We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.)
We use `pytest` to run the backend python tests. (See [pyproject.toml](https://github.com/invoke-ai/InvokeAI/blob/main/pyproject.toml) for the default `pytest` options.)
## Fast vs. Slow
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator).
@@ -33,7 +33,7 @@ pytest tests -m ""
## Test Organization
All backend tests are in the [`tests/`](/tests/) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
All backend tests are in the [`tests/`](https://github.com/invoke-ai/InvokeAI/tree/main/tests) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
TODO: The above statement is aspirational. A re-organization of legacy tests is required to make it true.

View File

@@ -2,7 +2,7 @@
## **What do I need to know to help?**
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
## **Get Started**
@@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
Once you're setup, for more information, you can review the documentation specific to your area of interest:
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
* #### [Frontend Documentation](../frontend/index.md)
* #### [Node Documentation](../INVOCATIONS.md)
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
@@ -20,15 +20,15 @@ Once you're setup, for more information, you can review the documentation specif
If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md), [translation](translation.md) or helping support other users and triage issues as they're reported in GitHub.
There are two paths to making a development contribution:
There are two paths to making a development contribution:
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item youd like to help with, reach out to the contributor assigned to the item to see how you can help.
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item youd like to help with, reach out to the contributor assigned to the item to see how you can help.
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no ones time is being misspent.*
## Best Practices:
## Best Practices:
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
* Comments! Commenting your code helps reviewers easily understand your contribution
* Use Python and Typescripts typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
@@ -38,7 +38,7 @@ There are two paths to making a development contribution:
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
For frontend related work, **@psychedelicious** is the best person to reach out to.
For frontend related work, **@psychedelicious** is the best person to reach out to.
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@psychedelicious**.

View File

@@ -22,15 +22,15 @@ Before starting these steps, ensure you have your local environment [configured
2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
3. Clone the repository to your local machine using:
```bash
git clone https://github.com/your-GitHub-username/InvokeAI.git
```
```bash
git clone https://github.com/your-GitHub-username/InvokeAI.git
```
If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface. 4. Create a new branch for your fix using:
```bash
git checkout -b branch-name-here
```
```bash
git checkout -b branch-name-here
```
5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:

View File

@@ -27,9 +27,9 @@ If you just want to use Invoke, you should use the [installer][installer link].
5. Activate the venv (you'll need to do this every time you want to run the app):
```sh
source .venv/bin/activate
```
```sh
source .venv/bin/activate
```
6. Install the repo as an [editable install][editable install link]:
@@ -37,7 +37,7 @@ If you just want to use Invoke, you should use the [installer][installer link].
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
```
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
Refer to the [manual installation][manual install link] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
7. Install the frontend dev toolchain:

View File

@@ -34,11 +34,11 @@ Please reach out to @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy)
## Contributors
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](contributors.md). We thank them for their time, hard work and effort.
## Code of Conduct
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](../CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
By making a contribution to this project, you certify that:

View File

@@ -110,7 +110,7 @@ async def cancel_by_batch_ids(
@session_queue_router.put(
"/{queue_id}/cancel_by_destination",
operation_id="cancel_by_destination",
responses={200: {"model": CancelByBatchIDsResult}},
responses={200: {"model": CancelByDestinationResult}},
)
async def cancel_by_destination(
queue_id: str = Path(description="The queue id to perform this operation on"),

View File

@@ -59,11 +59,32 @@ logger.info(f"Using torch device: {torch_device_name}")
loop = asyncio.new_event_loop()
# We may change the port if the default is in use, this global variable is used to store the port so that we can log
# the correct port when the server starts in the lifespan handler.
port = app_config.port
@asynccontextmanager
async def lifespan(app: FastAPI):
# Add startup event to load dependencies
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, loop=loop, logger=logger)
# Log the server address when it starts - in case the network log level is not high enough to see the startup log
proto = "https" if app_config.ssl_certfile else "http"
msg = f"Invoke running on {proto}://{app_config.host}:{port} (Press CTRL+C to quit)"
# Logging this way ignores the logger's log level and _always_ logs the message
record = logger.makeRecord(
name=logger.name,
level=logging.INFO,
fn="",
lno=0,
msg=msg,
args=(),
exc_info=None,
)
logger.handle(record)
yield
# Shut down threads
ApiDependencies.shutdown()
@@ -206,6 +227,7 @@ def invoke_api() -> None:
else:
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
global port
port = find_port(app_config.port)
if port != app_config.port:
logger.warn(f"Port {app_config.port} in use, using port {port}")
@@ -217,18 +239,17 @@ def invoke_api() -> None:
host=app_config.host,
port=port,
loop="asyncio",
log_level=app_config.log_level,
log_level=app_config.log_level_network,
ssl_certfile=app_config.ssl_certfile,
ssl_keyfile=app_config.ssl_keyfile,
)
server = uvicorn.Server(config)
# replace uvicorn's loggers with InvokeAI's for consistent appearance
for logname in ["uvicorn.access", "uvicorn"]:
log = InvokeAILogger.get_logger(logname)
log.handlers.clear()
for ch in logger.handlers:
log.addHandler(ch)
uvicorn_logger = InvokeAILogger.get_logger("uvicorn")
uvicorn_logger.handlers.clear()
for hdlr in logger.handlers:
uvicorn_logger.addHandler(hdlr)
loop.run_until_complete(server.serve())

View File

@@ -15,6 +15,11 @@ custom_nodes_readme_path = str(custom_nodes_path / "README.md")
shutil.copy(Path(__file__).parent / "custom_nodes/init.py", custom_nodes_init_path)
shutil.copy(Path(__file__).parent / "custom_nodes/README.md", custom_nodes_readme_path)
# set the same permissions as the destination directory, in case our source is read-only,
# so that the files are user-writable
for p in custom_nodes_path.glob("**/*"):
p.chmod(custom_nodes_path.stat().st_mode)
# Import custom nodes, see https://docs.python.org/3/library/importlib.html#importing-programmatically
spec = spec_from_file_location("custom_nodes", custom_nodes_init_path)
if spec is None or spec.loader is None:

View File

@@ -19,9 +19,9 @@ from invokeai.app.invocations.model import CLIPField
from invokeai.app.invocations.primitives import ConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.ti_utils import generate_ti_list
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
ConditioningFieldData,
@@ -66,10 +66,10 @@ class CompelInvocation(BaseInvocation):
tokenizer_info = context.models.load(self.clip.tokenizer)
text_encoder_info = context.models.load(self.clip.text_encoder)
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in self.clip.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info
return
@@ -82,7 +82,7 @@ class CompelInvocation(BaseInvocation):
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
tokenizer_info as tokenizer,
LoRAPatcher.apply_lora_patches(
LayerPatcher.apply_model_patches(
model=text_encoder,
patches=_lora_loader(),
prefix="lora_te_",
@@ -162,11 +162,11 @@ class SDXLPromptInvocationBase:
c_pooled = None
return c, c_pooled
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in clip_field.loras:
lora_info = context.models.load(lora.lora)
lora_model = lora_info.model
assert isinstance(lora_model, LoRAModelRaw)
assert isinstance(lora_model, ModelPatchRaw)
yield (lora_model, lora.weight)
del lora_info
return
@@ -179,7 +179,7 @@ class SDXLPromptInvocationBase:
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
tokenizer_info as tokenizer,
LoRAPatcher.apply_lora_patches(
LayerPatcher.apply_model_patches(
text_encoder,
patches=_lora_loader(),
prefix=lora_prefix,

View File

@@ -6,7 +6,6 @@ from PIL import Image
from torchvision.transforms.functional import resize as tv_resize
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import DEFAULT_PRECISION
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
from invokeai.app.invocations.model import VAEField
@@ -29,11 +28,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
fp32: bool = InputField(
default=DEFAULT_PRECISION == torch.float32,
description=FieldDescriptions.fp32,
ui_order=4,
)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=4)
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
if mask_image.mode != "L":

View File

@@ -7,7 +7,6 @@ from PIL import Image, ImageFilter
from torchvision.transforms.functional import resize as tv_resize
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
from invokeai.app.invocations.constants import DEFAULT_PRECISION
from invokeai.app.invocations.fields import (
DenoiseMaskField,
FieldDescriptions,
@@ -76,11 +75,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
ui_order=7,
)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8)
fp32: bool = InputField(
default=DEFAULT_PRECISION == torch.float32,
description=FieldDescriptions.fp32,
ui_order=9,
)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=9)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> GradientMaskOutput:

View File

@@ -37,10 +37,10 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
from invokeai.backend.model_patcher import ModelPatcher
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion import PipelineIntermediateState
from invokeai.backend.stable_diffusion.denoise_context import DenoiseContext, DenoiseInputs
from invokeai.backend.stable_diffusion.diffusers_pipeline import (
@@ -987,10 +987,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
def step_callback(state: PipelineIntermediateState) -> None:
context.util.sd_step_callback(state, unet_config.base)
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in self.unet.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info
return
@@ -1003,7 +1003,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME
# Apply the LoRA after unet has been moved to its target device for faster patching.
LoRAPatcher.apply_lora_patches(
LayerPatcher.apply_model_patches(
model=unet,
patches=_lora_loader(),
prefix="lora_unet_",

View File

@@ -56,6 +56,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
CLIPLEmbedModel = "CLIPLEmbedModelField"
CLIPGEmbedModel = "CLIPGEmbedModelField"
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
ControlLoRAModel = "ControlLoRAModelField"
# endregion
# region Misc Field Types
@@ -143,6 +144,7 @@ class FieldDescriptions:
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
control_lora_model = "Control LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
flux_model = "Flux model (Transformer) to load"
sd3_model = "SD3 model (MMDiTX) to load"
@@ -250,6 +252,11 @@ class FluxConditioningField(BaseModel):
"""A conditioning tensor primitive value"""
conditioning_name: str = Field(description="The name of conditioning tensor")
mask: Optional[TensorField] = Field(
default=None,
description="The mask associated with this conditioning tensor. Excluded regions should be set to False, "
"included regions should be set to True.",
)
class SD3ConditioningField(BaseModel):

View File

@@ -0,0 +1,49 @@
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
from invokeai.app.invocations.model import ControlLoRAField, ModelIdentifierField
from invokeai.app.services.shared.invocation_context import InvocationContext
@invocation_output("flux_control_lora_loader_output")
class FluxControlLoRALoaderOutput(BaseInvocationOutput):
"""Flux Control LoRA Loader Output"""
control_lora: ControlLoRAField = OutputField(
title="Flux Control LoRA", description="Control LoRAs to apply on model loading", default=None
)
@invocation(
"flux_control_lora_loader",
title="Flux Control LoRA",
tags=["lora", "model", "flux"],
category="model",
version="1.1.0",
classification=Classification.Prototype,
)
class FluxControlLoRALoaderInvocation(BaseInvocation):
"""LoRA model and Image to use with FLUX transformer generation."""
lora: ModelIdentifierField = InputField(
description=FieldDescriptions.control_lora_model, title="Control LoRA", ui_type=UIType.ControlLoRAModel
)
image: ImageField = InputField(description="The image to encode.")
weight: float = InputField(description="The weight of the LoRA.", default=1.0)
def invoke(self, context: InvocationContext) -> FluxControlLoRALoaderOutput:
if not context.models.exists(self.lora.key):
raise ValueError(f"Unknown lora: {self.lora.key}!")
return FluxControlLoRALoaderOutput(
control_lora=ControlLoRAField(
lora=self.lora,
img=self.image,
weight=self.weight,
)
)

View File

@@ -1,10 +1,12 @@
from contextlib import ExitStack
from typing import Callable, Iterator, Optional, Tuple
from typing import Callable, Iterator, Optional, Tuple, Union
import einops
import numpy as np
import numpy.typing as npt
import torch
import torchvision.transforms as tv_transforms
from PIL import Image
from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
@@ -21,8 +23,9 @@ from invokeai.app.invocations.fields import (
WithMetadata,
)
from invokeai.app.invocations.flux_controlnet import FluxControlNetField
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
from invokeai.app.invocations.ip_adapter import IPAdapterField
from invokeai.app.invocations.model import TransformerField, VAEField
from invokeai.app.invocations.model import ControlLoRAField, LoRAField, TransformerField, VAEField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXControlNetFlux
@@ -30,6 +33,7 @@ from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlN
from invokeai.backend.flux.denoise import denoise
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
@@ -42,10 +46,11 @@ from invokeai.backend.flux.sampling_utils import (
pack,
unpack,
)
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
from invokeai.backend.model_manager.config import ModelFormat
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
from invokeai.backend.util.devices import TorchDevice
@@ -56,7 +61,7 @@ from invokeai.backend.util.devices import TorchDevice
title="FLUX Denoise",
tags=["image", "flux"],
category="image",
version="3.2.1",
version="3.2.2",
classification=Classification.Prototype,
)
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
@@ -87,10 +92,13 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
input=Input.Connection,
title="Transformer",
)
positive_text_conditioning: FluxConditioningField = InputField(
control_lora: Optional[ControlLoRAField] = InputField(
description=FieldDescriptions.control_lora_model, input=Input.Connection, title="Control LoRA", default=None
)
positive_text_conditioning: FluxConditioningField | list[FluxConditioningField] = InputField(
description=FieldDescriptions.positive_cond, input=Input.Connection
)
negative_text_conditioning: FluxConditioningField | None = InputField(
negative_text_conditioning: FluxConditioningField | list[FluxConditioningField] | None = InputField(
default=None,
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
input=Input.Connection,
@@ -139,36 +147,12 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
name = context.tensors.save(tensor=latents)
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
def _load_text_conditioning(
self, context: InvocationContext, conditioning_name: str, dtype: torch.dtype
) -> Tuple[torch.Tensor, torch.Tensor]:
# Load the conditioning data.
cond_data = context.conditioning.load(conditioning_name)
assert len(cond_data.conditionings) == 1
flux_conditioning = cond_data.conditionings[0]
assert isinstance(flux_conditioning, FLUXConditioningInfo)
flux_conditioning = flux_conditioning.to(dtype=dtype)
t5_embeddings = flux_conditioning.t5_embeds
clip_embeddings = flux_conditioning.clip_embeds
return t5_embeddings, clip_embeddings
def _run_diffusion(
self,
context: InvocationContext,
):
inference_dtype = torch.bfloat16
# Load the conditioning data.
pos_t5_embeddings, pos_clip_embeddings = self._load_text_conditioning(
context, self.positive_text_conditioning.conditioning_name, inference_dtype
)
neg_t5_embeddings: torch.Tensor | None = None
neg_clip_embeddings: torch.Tensor | None = None
if self.negative_text_conditioning is not None:
neg_t5_embeddings, neg_clip_embeddings = self._load_text_conditioning(
context, self.negative_text_conditioning.conditioning_name, inference_dtype
)
# Load the input latents, if provided.
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
if init_latents is not None:
@@ -183,15 +167,45 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
dtype=inference_dtype,
seed=self.seed,
)
b, _c, latent_h, latent_w = noise.shape
packed_h = latent_h // 2
packed_w = latent_w // 2
# Load the conditioning data.
pos_text_conditionings = self._load_text_conditioning(
context=context,
cond_field=self.positive_text_conditioning,
packed_height=packed_h,
packed_width=packed_w,
dtype=inference_dtype,
device=TorchDevice.choose_torch_device(),
)
neg_text_conditionings: list[FluxTextConditioning] | None = None
if self.negative_text_conditioning is not None:
neg_text_conditionings = self._load_text_conditioning(
context=context,
cond_field=self.negative_text_conditioning,
packed_height=packed_h,
packed_width=packed_w,
dtype=inference_dtype,
device=TorchDevice.choose_torch_device(),
)
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
pos_text_conditionings, img_seq_len=packed_h * packed_w
)
neg_regional_prompting_extension = (
RegionalPromptingExtension.from_text_conditioning(neg_text_conditionings, img_seq_len=packed_h * packed_w)
if neg_text_conditionings
else None
)
transformer_info = context.models.load(self.transformer.transformer)
is_schnell = "schnell" in transformer_info.config.config_path
is_schnell = "schnell" in getattr(transformer_info.config, "config_path", "")
# Calculate the timestep schedule.
image_seq_len = noise.shape[-1] * noise.shape[-2] // 4
timesteps = get_schedule(
num_steps=self.num_steps,
image_seq_len=image_seq_len,
image_seq_len=packed_h * packed_w,
shift=not is_schnell,
)
@@ -226,30 +240,26 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
if len(timesteps) <= 1:
return x
if is_schnell and self.control_lora:
raise ValueError("Control LoRAs cannot be used with FLUX Schnell")
# Prepare the extra image conditioning tensor if a FLUX structural control image is provided.
img_cond = self._prep_structural_control_img_cond(context)
inpaint_mask = self._prep_inpaint_mask(context, x)
b, _c, latent_h, latent_w = x.shape
img_ids = generate_img_ids(h=latent_h, w=latent_w, batch_size=b, device=x.device, dtype=x.dtype)
pos_bs, pos_t5_seq_len, _ = pos_t5_embeddings.shape
pos_txt_ids = torch.zeros(
pos_bs, pos_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
)
neg_txt_ids: torch.Tensor | None = None
if neg_t5_embeddings is not None:
neg_bs, neg_t5_seq_len, _ = neg_t5_embeddings.shape
neg_txt_ids = torch.zeros(
neg_bs, neg_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
)
# Pack all latent tensors.
init_latents = pack(init_latents) if init_latents is not None else None
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
img_cond = pack(img_cond) if img_cond is not None else None
noise = pack(noise)
x = pack(x)
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len correctly.
assert image_seq_len == x.shape[1]
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len, packed_h, and
# packed_w correctly.
assert packed_h * packed_w == x.shape[1]
# Prepare inpaint extension.
inpaint_extension: InpaintExtension | None = None
@@ -299,7 +309,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
if config.format in [ModelFormat.Checkpoint]:
# The model is non-quantized, so we can apply the LoRA weights directly into the model.
exit_stack.enter_context(
LoRAPatcher.apply_lora_patches(
LayerPatcher.apply_model_patches(
model=transformer,
patches=self._lora_iterator(context),
prefix=FLUX_LORA_TRANSFORMER_PREFIX,
@@ -314,7 +324,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
# The model is quantized, so apply the LoRA weights as sidecar layers. This results in slower inference,
# than directly patching the weights, but is agnostic to the quantization format.
exit_stack.enter_context(
LoRAPatcher.apply_lora_sidecar_patches(
LayerPatcher.apply_model_sidecar_patches(
model=transformer,
patches=self._lora_iterator(context),
prefix=FLUX_LORA_TRANSFORMER_PREFIX,
@@ -338,12 +348,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
model=transformer,
img=x,
img_ids=img_ids,
txt=pos_t5_embeddings,
txt_ids=pos_txt_ids,
vec=pos_clip_embeddings,
neg_txt=neg_t5_embeddings,
neg_txt_ids=neg_txt_ids,
neg_vec=neg_clip_embeddings,
pos_regional_prompting_extension=pos_regional_prompting_extension,
neg_regional_prompting_extension=neg_regional_prompting_extension,
timesteps=timesteps,
step_callback=self._build_step_callback(context),
guidance=self.guidance,
@@ -352,11 +358,49 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
controlnet_extensions=controlnet_extensions,
pos_ip_adapter_extensions=pos_ip_adapter_extensions,
neg_ip_adapter_extensions=neg_ip_adapter_extensions,
img_cond=img_cond,
)
x = unpack(x.float(), self.height, self.width)
return x
def _load_text_conditioning(
self,
context: InvocationContext,
cond_field: FluxConditioningField | list[FluxConditioningField],
packed_height: int,
packed_width: int,
dtype: torch.dtype,
device: torch.device,
) -> list[FluxTextConditioning]:
"""Load text conditioning data from a FluxConditioningField or a list of FluxConditioningFields."""
# Normalize to a list of FluxConditioningFields.
cond_list = [cond_field] if isinstance(cond_field, FluxConditioningField) else cond_field
text_conditionings: list[FluxTextConditioning] = []
for cond_field in cond_list:
# Load the text embeddings.
cond_data = context.conditioning.load(cond_field.conditioning_name)
assert len(cond_data.conditionings) == 1
flux_conditioning = cond_data.conditionings[0]
assert isinstance(flux_conditioning, FLUXConditioningInfo)
flux_conditioning = flux_conditioning.to(dtype=dtype, device=device)
t5_embeddings = flux_conditioning.t5_embeds
clip_embeddings = flux_conditioning.clip_embeds
# Load the mask, if provided.
mask: Optional[torch.Tensor] = None
if cond_field.mask is not None:
mask = context.tensors.load(cond_field.mask.tensor_name)
mask = mask.to(device=device)
mask = RegionalPromptingExtension.preprocess_regional_prompt_mask(
mask, packed_height, packed_width, dtype, device
)
text_conditionings.append(FluxTextConditioning(t5_embeddings, clip_embeddings, mask))
return text_conditionings
@classmethod
def prep_cfg_scale(
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
@@ -545,6 +589,29 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
return controlnet_extensions
def _prep_structural_control_img_cond(self, context: InvocationContext) -> torch.Tensor | None:
if self.control_lora is None:
return None
if not self.controlnet_vae:
raise ValueError("controlnet_vae must be set when using a FLUX Control LoRA.")
# Load the conditioning image and resize it to the target image size.
cond_img = context.images.get_pil(self.control_lora.img.image_name)
cond_img = cond_img.convert("RGB")
cond_img = cond_img.resize((self.width, self.height), Image.Resampling.BICUBIC)
cond_img = np.array(cond_img)
# Normalize the conditioning image to the range [-1, 1].
# This normalization is based on the original implementations here:
# https://github.com/black-forest-labs/flux/blob/805da8571a0b49b6d4043950bd266a65328c243b/src/flux/modules/image_embedders.py#L34
# https://github.com/black-forest-labs/flux/blob/805da8571a0b49b6d4043950bd266a65328c243b/src/flux/modules/image_embedders.py#L60
img_cond = torch.from_numpy(cond_img).float() / 127.5 - 1.0
img_cond = einops.rearrange(img_cond, "h w c -> 1 c h w")
vae_info = context.models.load(self.controlnet_vae.vae)
return FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=img_cond)
def _normalize_ip_adapter_fields(self) -> list[IPAdapterField]:
if self.ip_adapter is None:
return []
@@ -651,10 +718,15 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
return pos_ip_adapter_extensions, neg_ip_adapter_extensions
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
for lora in self.transformer.loras:
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
loras: list[Union[LoRAField, ControlLoRAField]] = [*self.transformer.loras]
if self.control_lora:
# Note: Since FLUX structural control LoRAs modify the shape of some weights, it is important that they are
# applied last.
loras.append(self.control_lora)
for lora in loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info

View File

@@ -1,19 +1,26 @@
from contextlib import ExitStack
from typing import Iterator, Literal, Tuple
from typing import Iterator, Literal, Optional, Tuple
import torch
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
from invokeai.app.invocations.fields import (
FieldDescriptions,
FluxConditioningField,
Input,
InputField,
TensorField,
UIComponent,
)
from invokeai.app.invocations.model import CLIPField, T5EncoderField
from invokeai.app.invocations.primitives import FluxConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.conditioner import HFEncoder
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.model_manager.config import ModelFormat
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
@@ -22,7 +29,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
title="FLUX Text Encoding",
tags=["prompt", "conditioning", "flux"],
category="conditioning",
version="1.1.0",
version="1.1.1",
classification=Classification.Prototype,
)
class FluxTextEncoderInvocation(BaseInvocation):
@@ -41,7 +48,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
t5_max_seq_len: Literal[256, 512] = InputField(
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
)
prompt: str = InputField(description="Text prompt to encode.")
prompt: str = InputField(description="Text prompt to encode.", ui_component=UIComponent.Textarea)
mask: Optional[TensorField] = InputField(
default=None, description="A mask defining the region that this conditioning prompt applies to."
)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
@@ -54,7 +64,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
)
conditioning_name = context.conditioning.save(conditioning_data)
return FluxConditioningOutput.build(conditioning_name)
return FluxConditioningOutput(
conditioning=FluxConditioningField(conditioning_name=conditioning_name, mask=self.mask)
)
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
@@ -99,7 +111,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
if clip_text_encoder_config.format in [ModelFormat.Diffusers]:
# The model is non-quantized, so we can apply the LoRA weights directly into the model.
exit_stack.enter_context(
LoRAPatcher.apply_lora_patches(
LayerPatcher.apply_model_patches(
model=clip_text_encoder,
patches=self._clip_lora_iterator(context),
prefix=FLUX_LORA_CLIP_PREFIX,
@@ -118,9 +130,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
assert isinstance(pooled_prompt_embeds, torch.Tensor)
return pooled_prompt_embeds
def _clip_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
def _clip_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in self.clip.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info

View File

@@ -0,0 +1,59 @@
from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import InputField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext
@invocation_output("image_panel_coordinate_output")
class ImagePanelCoordinateOutput(BaseInvocationOutput):
x_left: int = OutputField(description="The left x-coordinate of the panel.")
y_top: int = OutputField(description="The top y-coordinate of the panel.")
width: int = OutputField(description="The width of the panel.")
height: int = OutputField(description="The height of the panel.")
@invocation(
"image_panel_layout",
title="Image Panel Layout",
tags=["image", "panel", "layout"],
category="image",
version="1.0.0",
classification=Classification.Prototype,
)
class ImagePanelLayoutInvocation(BaseInvocation):
"""Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels,
then the grid may not cover the entire image.)
"""
width: int = InputField(description="The width of the entire grid.")
height: int = InputField(description="The height of the entire grid.")
num_cols: int = InputField(ge=1, default=1, description="The number of columns in the grid.")
num_rows: int = InputField(ge=1, default=1, description="The number of rows in the grid.")
panel_col_idx: int = InputField(ge=0, default=0, description="The column index of the panel to be processed.")
panel_row_idx: int = InputField(ge=0, default=0, description="The row index of the panel to be processed.")
@field_validator("panel_col_idx")
def validate_panel_col_idx(cls, v: int, info: ValidationInfo) -> int:
if v < 0 or v >= info.data["num_cols"]:
raise ValueError(f"panel_col_idx must be between 0 and {info.data['num_cols'] - 1}")
return v
@field_validator("panel_row_idx")
def validate_panel_row_idx(cls, v: int, info: ValidationInfo) -> int:
if v < 0 or v >= info.data["num_rows"]:
raise ValueError(f"panel_row_idx must be between 0 and {info.data['num_rows'] - 1}")
return v
def invoke(self, context: InvocationContext) -> ImagePanelCoordinateOutput:
x_left = self.panel_col_idx * (self.width // self.num_cols)
y_top = self.panel_row_idx * (self.height // self.num_rows)
width = self.width // self.num_cols
height = self.height // self.num_rows
return ImagePanelCoordinateOutput(x_left=x_left, y_top=y_top, width=width, height=height)

View File

@@ -13,7 +13,7 @@ from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import DEFAULT_PRECISION, LATENT_SCALE_FACTOR
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
ImageField,
@@ -49,7 +49,7 @@ class ImageToLatentsInvocation(BaseInvocation):
# NOTE: tile_size = 0 is a special value. We use this rather than `int | None`, because the workflow UI does not
# offer a way to directly set None values.
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
@staticmethod
def vae_encode(

View File

@@ -12,7 +12,7 @@ from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
from invokeai.app.invocations.constants import DEFAULT_PRECISION, LATENT_SCALE_FACTOR
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
from invokeai.app.invocations.fields import (
FieldDescriptions,
Input,
@@ -51,7 +51,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
# NOTE: tile_size = 0 is a special value. We use this rather than `int | None`, because the workflow UI does not
# offer a way to directly set None values.
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:

View File

@@ -10,7 +10,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.model_manager.config import (
@@ -65,11 +65,6 @@ class CLIPField(BaseModel):
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
class TransformerField(BaseModel):
transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel")
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
class T5EncoderField(BaseModel):
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
@@ -80,6 +75,15 @@ class VAEField(BaseModel):
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
class ControlLoRAField(LoRAField):
img: ImageField = Field(description="Image to use in structural conditioning")
class TransformerField(BaseModel):
transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel")
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
@invocation_output("unet_output")
class UNetOutput(BaseInvocationOutput):
"""Base class for invocations that output a UNet field."""

View File

@@ -16,10 +16,10 @@ from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
from invokeai.app.invocations.model import CLIPField, T5EncoderField
from invokeai.app.invocations.primitives import SD3ConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.model_manager.config import ModelFormat
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, SD3ConditioningInfo
# The SD3 T5 Max Sequence Length set based on the default in diffusers.
@@ -150,7 +150,7 @@ class Sd3TextEncoderInvocation(BaseInvocation):
if clip_text_encoder_config.format in [ModelFormat.Diffusers]:
# The model is non-quantized, so we can apply the LoRA weights directly into the model.
exit_stack.enter_context(
LoRAPatcher.apply_lora_patches(
LayerPatcher.apply_model_patches(
model=clip_text_encoder,
patches=self._clip_lora_iterator(context, clip_model),
prefix=FLUX_LORA_CLIP_PREFIX,
@@ -193,9 +193,9 @@ class Sd3TextEncoderInvocation(BaseInvocation):
def _clip_lora_iterator(
self, context: InvocationContext, clip_model: CLIPField
) -> Iterator[Tuple[LoRAModelRaw, float]]:
) -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in clip_model.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info

View File

@@ -22,8 +22,8 @@ from invokeai.app.invocations.fields import (
from invokeai.app.invocations.model import UNetField
from invokeai.app.invocations.primitives import LatentsOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion.diffusers_pipeline import ControlNetData, PipelineIntermediateState
from invokeai.backend.stable_diffusion.multi_diffusion_pipeline import (
MultiDiffusionPipeline,
@@ -194,10 +194,10 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
context.util.sd_step_callback(state, unet_config.base)
# Prepare an iterator that yields the UNet's LoRA models and their weights.
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
for lora in self.unet.loras:
lora_info = context.models.load(lora.lora)
assert isinstance(lora_info.model, LoRAModelRaw)
assert isinstance(lora_info.model, ModelPatchRaw)
yield (lora_info.model, lora.weight)
del lora_info
@@ -207,7 +207,7 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
with (
ExitStack() as exit_stack,
unet_info as unet,
LoRAPatcher.apply_lora_patches(model=unet, patches=_lora_loader(), prefix="lora_unet_"),
LayerPatcher.apply_model_patches(model=unet, patches=_lora_loader(), prefix="lora_unet_"),
):
assert isinstance(unet, UNet2DConditionModel)
latents = latents.to(device=unet.device, dtype=unet.dtype)

View File

@@ -4,6 +4,7 @@
from __future__ import annotations
import copy
import filecmp
import locale
import os
import re
@@ -96,6 +97,7 @@ class InvokeAIAppConfig(BaseSettings):
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.
log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
use_memory_db: Use in-memory database. Useful for development.
dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.
profile_graphs: Enable graph profiling using `cProfile`.
@@ -162,6 +164,7 @@ class InvokeAIAppConfig(BaseSettings):
log_format: LOG_FORMAT = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.')
log_level: LOG_LEVEL = Field(default="info", description="Emit logging messages at this level or higher.")
log_sql: bool = Field(default=False, description="Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.")
log_level_network: LOG_LEVEL = Field(default='warning', description="Log level for network-related messages. 'info' and 'debug' are very verbose.")
# Development
use_memory_db: bool = Field(default=False, description="Use in-memory database. Useful for development.")
@@ -525,9 +528,35 @@ def get_config() -> InvokeAIAppConfig:
]
example_config.write_file(config.config_file_path.with_suffix(".example.yaml"), as_example=True)
# Copy all legacy configs - We know `__path__[0]` is correct here
# Copy all legacy configs only if needed
# We know `__path__[0]` is correct here
configs_src = Path(model_configs.__path__[0]) # pyright: ignore [reportUnknownMemberType, reportUnknownArgumentType, reportAttributeAccessIssue]
shutil.copytree(configs_src, config.legacy_conf_path, dirs_exist_ok=True)
dest_path = config.legacy_conf_path
# Create destination (we don't need to check for existence)
dest_path.mkdir(parents=True, exist_ok=True)
# Compare directories recursively
comparison = filecmp.dircmp(configs_src, dest_path)
need_copy = any(
[
comparison.left_only, # Files exist only in source
comparison.diff_files, # Files that differ
comparison.common_funny, # Files that couldn't be compared
]
)
if need_copy:
# Get permissions from destination directory
dest_mode = dest_path.stat().st_mode
# Copy directory tree
shutil.copytree(configs_src, dest_path, dirs_exist_ok=True)
# Set permissions on copied files to match destination directory
dest_path.chmod(dest_mode)
for p in dest_path.glob("**/*"):
p.chmod(dest_mode)
if config.config_file_path.exists():
config_from_file = load_and_migrate_config(config.config_file_path)

View File

@@ -438,9 +438,10 @@ class ModelInstallService(ModelInstallServiceBase):
variants = "|".join(ModelRepoVariant.__members__.values())
hf_repoid_re = f"^([^/:]+/[^/:]+)(?::({variants})?(?::/?([^:]+))?)?$"
source_obj: Optional[StringLikeSource] = None
source_stripped = source.strip('"')
if Path(source).exists(): # A local file or directory
source_obj = LocalModelSource(path=Path(source))
if Path(source_stripped).exists(): # A local file or directory
source_obj = LocalModelSource(path=Path(source_stripped))
elif match := re.match(hf_repoid_re, source):
source_obj = HFModelSource(
repo_id=match.group(1),

View File

@@ -86,7 +86,7 @@ class ModelLoadService(ModelLoadServiceBase):
def torch_load_file(checkpoint: Path) -> AnyModel:
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
if scan_result.infected_files != 0 or scan_result.scan_err:
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
result = torch_load(checkpoint, map_location="cpu")
return result

View File

@@ -378,6 +378,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
self._poll_now()
async def _on_queue_item_status_changed(self, event: FastAPIEvent[QueueItemStatusChangedEvent]) -> None:
# Make sure the cancel event is for the currently processing queue item
if self._queue_item and self._queue_item.item_id != event[1].item_id:
return
if self._queue_item and event[1].status in ["completed", "failed", "canceled"]:
# When the queue item is canceled via HTTP, the queue item status is set to `"canceled"` and this event is
# emitted. We need to respond to this event and stop graph execution. This is done by setting the cancel
@@ -436,7 +439,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
poll_now_event.wait(self._polling_interval)
continue
self._invoker.services.logger.debug(f"Executing queue item {self._queue_item.item_id}")
self._invoker.services.logger.info(
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
)
cancel_event.clear()
# Run the graph

View File

@@ -35,7 +35,7 @@ class Migration11Callback:
def _remove_convert_cache(self) -> None:
"""Rename models/.cache to models/.convert_cache."""
self._logger.info("Removing .cache directory. Converted models will now be cached in .convert_cache.")
self._logger.info("Removing models/.cache directory. Converted models will now be cached in .convert_cache.")
legacy_convert_path = self._app_config.root_path / "models" / ".cache"
shutil.rmtree(legacy_convert_path, ignore_errors=True)

View File

@@ -1,9 +1,10 @@
import einops
import torch
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
from invokeai.backend.flux.math import attention
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
from invokeai.backend.flux.modules.layers import DoubleStreamBlock, SingleStreamBlock
class CustomDoubleStreamBlockProcessor:
@@ -13,7 +14,12 @@ class CustomDoubleStreamBlockProcessor:
@staticmethod
def _double_stream_block_forward(
block: DoubleStreamBlock, img: torch.Tensor, txt: torch.Tensor, vec: torch.Tensor, pe: torch.Tensor
block: DoubleStreamBlock,
img: torch.Tensor,
txt: torch.Tensor,
vec: torch.Tensor,
pe: torch.Tensor,
attn_mask: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""This function is a direct copy of DoubleStreamBlock.forward(), but it returns some of the intermediate
values.
@@ -40,7 +46,7 @@ class CustomDoubleStreamBlockProcessor:
k = torch.cat((txt_k, img_k), dim=2)
v = torch.cat((txt_v, img_v), dim=2)
attn = attention(q, k, v, pe=pe)
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
# calculate the img bloks
@@ -63,11 +69,15 @@ class CustomDoubleStreamBlockProcessor:
vec: torch.Tensor,
pe: torch.Tensor,
ip_adapter_extensions: list[XLabsIPAdapterExtension],
regional_prompting_extension: RegionalPromptingExtension,
) -> tuple[torch.Tensor, torch.Tensor]:
"""A custom implementation of DoubleStreamBlock.forward() with additional features:
- IP-Adapter support
"""
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(block, img, txt, vec, pe)
attn_mask = regional_prompting_extension.get_double_stream_attn_mask(block_index)
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(
block, img, txt, vec, pe, attn_mask=attn_mask
)
# Apply IP-Adapter conditioning.
for ip_adapter_extension in ip_adapter_extensions:
@@ -81,3 +91,48 @@ class CustomDoubleStreamBlockProcessor:
)
return img, txt
class CustomSingleStreamBlockProcessor:
"""A class containing a custom implementation of SingleStreamBlock.forward() with additional features (masking,
etc.)
"""
@staticmethod
def _single_stream_block_forward(
block: SingleStreamBlock,
x: torch.Tensor,
vec: torch.Tensor,
pe: torch.Tensor,
attn_mask: torch.Tensor | None = None,
) -> torch.Tensor:
"""This function is a direct copy of SingleStreamBlock.forward()."""
mod, _ = block.modulation(vec)
x_mod = (1 + mod.scale) * block.pre_norm(x) + mod.shift
qkv, mlp = torch.split(block.linear1(x_mod), [3 * block.hidden_size, block.mlp_hidden_dim], dim=-1)
q, k, v = einops.rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=block.num_heads)
q, k = block.norm(q, k, v)
# compute attention
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
# compute activation in mlp stream, cat again and run second linear layer
output = block.linear2(torch.cat((attn, block.mlp_act(mlp)), 2))
return x + mod.gate * output
@staticmethod
def custom_single_block_forward(
timestep_index: int,
total_num_timesteps: int,
block_index: int,
block: SingleStreamBlock,
img: torch.Tensor,
vec: torch.Tensor,
pe: torch.Tensor,
regional_prompting_extension: RegionalPromptingExtension,
) -> torch.Tensor:
"""A custom implementation of SingleStreamBlock.forward() with additional features:
- Masking
"""
attn_mask = regional_prompting_extension.get_single_stream_attn_mask(block_index)
return CustomSingleStreamBlockProcessor._single_stream_block_forward(block, img, vec, pe, attn_mask=attn_mask)

View File

@@ -7,6 +7,7 @@ from tqdm import tqdm
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput, sum_controlnet_flux_outputs
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
from invokeai.backend.flux.model import Flux
@@ -18,14 +19,8 @@ def denoise(
# model input
img: torch.Tensor,
img_ids: torch.Tensor,
# positive text conditioning
txt: torch.Tensor,
txt_ids: torch.Tensor,
vec: torch.Tensor,
# negative text conditioning
neg_txt: torch.Tensor | None,
neg_txt_ids: torch.Tensor | None,
neg_vec: torch.Tensor | None,
pos_regional_prompting_extension: RegionalPromptingExtension,
neg_regional_prompting_extension: RegionalPromptingExtension | None,
# sampling parameters
timesteps: list[float],
step_callback: Callable[[PipelineIntermediateState], None],
@@ -35,6 +30,8 @@ def denoise(
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension],
pos_ip_adapter_extensions: list[XLabsIPAdapterExtension],
neg_ip_adapter_extensions: list[XLabsIPAdapterExtension],
# extra img tokens
img_cond: torch.Tensor | None,
):
# step 0 is the initial state
total_steps = len(timesteps) - 1
@@ -61,9 +58,9 @@ def denoise(
total_num_timesteps=total_steps,
img=img,
img_ids=img_ids,
txt=txt,
txt_ids=txt_ids,
y=vec,
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
timesteps=t_vec,
guidance=guidance_vec,
)
@@ -74,13 +71,13 @@ def denoise(
# controlnet_residuals datastructure is efficient in that it likely contains multiple references to the same
# tensors. Calculating the sum materializes each tensor into its own instance.
merged_controlnet_residuals = sum_controlnet_flux_outputs(controlnet_residuals)
pred_img = torch.cat((img, img_cond), dim=-1) if img_cond is not None else img
pred = model(
img=img,
img=pred_img,
img_ids=img_ids,
txt=txt,
txt_ids=txt_ids,
y=vec,
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
timesteps=t_vec,
guidance=guidance_vec,
timestep_index=step_index,
@@ -88,6 +85,7 @@ def denoise(
controlnet_double_block_residuals=merged_controlnet_residuals.double_block_residuals,
controlnet_single_block_residuals=merged_controlnet_residuals.single_block_residuals,
ip_adapter_extensions=pos_ip_adapter_extensions,
regional_prompting_extension=pos_regional_prompting_extension,
)
step_cfg_scale = cfg_scale[step_index]
@@ -97,15 +95,15 @@ def denoise(
# TODO(ryand): Add option to run positive and negative predictions in a single batch for better performance
# on systems with sufficient VRAM.
if neg_txt is None or neg_txt_ids is None or neg_vec is None:
if neg_regional_prompting_extension is None:
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
neg_pred = model(
img=img,
img_ids=img_ids,
txt=neg_txt,
txt_ids=neg_txt_ids,
y=neg_vec,
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
timesteps=t_vec,
guidance=guidance_vec,
timestep_index=step_index,
@@ -113,6 +111,7 @@ def denoise(
controlnet_double_block_residuals=None,
controlnet_single_block_residuals=None,
ip_adapter_extensions=neg_ip_adapter_extensions,
regional_prompting_extension=neg_regional_prompting_extension,
)
pred = neg_pred + step_cfg_scale * (pred - neg_pred)

View File

@@ -0,0 +1,276 @@
from typing import Optional
import torch
import torchvision
from invokeai.backend.flux.text_conditioning import FluxRegionalTextConditioning, FluxTextConditioning
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.mask import to_standard_float_mask
class RegionalPromptingExtension:
"""A class for managing regional prompting with FLUX.
This implementation is inspired by https://arxiv.org/pdf/2411.02395 (though there are significant differences).
"""
def __init__(
self,
regional_text_conditioning: FluxRegionalTextConditioning,
restricted_attn_mask: torch.Tensor | None = None,
):
self.regional_text_conditioning = regional_text_conditioning
self.restricted_attn_mask = restricted_attn_mask
def get_double_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
order = [self.restricted_attn_mask, None]
return order[block_index % len(order)]
def get_single_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
order = [self.restricted_attn_mask, None]
return order[block_index % len(order)]
@classmethod
def from_text_conditioning(cls, text_conditioning: list[FluxTextConditioning], img_seq_len: int):
"""Create a RegionalPromptingExtension from a list of text conditionings.
Args:
text_conditioning (list[FluxTextConditioning]): The text conditionings to use for regional prompting.
img_seq_len (int): The image sequence length (i.e. packed_height * packed_width).
"""
regional_text_conditioning = cls._concat_regional_text_conditioning(text_conditioning)
attn_mask_with_restricted_img_self_attn = cls._prepare_restricted_attn_mask(
regional_text_conditioning, img_seq_len
)
return cls(
regional_text_conditioning=regional_text_conditioning,
restricted_attn_mask=attn_mask_with_restricted_img_self_attn,
)
# Keeping _prepare_unrestricted_attn_mask for reference as an alternative masking strategy:
#
# @classmethod
# def _prepare_unrestricted_attn_mask(
# cls,
# regional_text_conditioning: FluxRegionalTextConditioning,
# img_seq_len: int,
# ) -> torch.Tensor:
# """Prepare an 'unrestricted' attention mask. In this context, 'unrestricted' means that:
# - img self-attention is not masked.
# - img regions attend to both txt within their own region and to global prompts.
# """
# device = TorchDevice.choose_torch_device()
# # Infer txt_seq_len from the t5_embeddings tensor.
# txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
# # In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
# # Concatenation happens in the following order: [txt_seq, img_seq].
# # There are 4 portions of the attention mask to consider as we prepare it:
# # 1. txt attends to itself
# # 2. txt attends to corresponding regional img
# # 3. regional img attends to corresponding txt
# # 4. regional img attends to itself
# # Initialize empty attention mask.
# regional_attention_mask = torch.zeros(
# (txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
# )
# for image_mask, t5_embedding_range in zip(
# regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
# ):
# # 1. txt attends to itself
# regional_attention_mask[
# t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
# ] = 1.0
# # 2. txt attends to corresponding regional img
# # Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
# fill_value = image_mask.view(1, img_seq_len) if image_mask is not None else 1.0
# regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = fill_value
# # 3. regional img attends to corresponding txt
# # Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
# fill_value = image_mask.view(img_seq_len, 1) if image_mask is not None else 1.0
# regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = fill_value
# # 4. regional img attends to itself
# # Allow unrestricted img self attention.
# regional_attention_mask[txt_seq_len:, txt_seq_len:] = 1.0
# # Convert attention mask to boolean.
# regional_attention_mask = regional_attention_mask > 0.5
# return regional_attention_mask
@classmethod
def _prepare_restricted_attn_mask(
cls,
regional_text_conditioning: FluxRegionalTextConditioning,
img_seq_len: int,
) -> torch.Tensor | None:
"""Prepare a 'restricted' attention mask. In this context, 'restricted' means that:
- img self-attention is only allowed within regions.
- img regions only attend to txt within their own region, not to global prompts.
"""
# Identify background region. I.e. the region that is not covered by any region masks.
background_region_mask: None | torch.Tensor = None
for image_mask in regional_text_conditioning.image_masks:
if image_mask is not None:
if background_region_mask is None:
background_region_mask = torch.ones_like(image_mask)
background_region_mask *= 1 - image_mask
if background_region_mask is None:
# There are no region masks, short-circuit and return None.
# TODO(ryand): We could restrict txt-txt attention across multiple global prompts, but this would
# is a rare use case and would make the logic here significantly more complicated.
return None
device = TorchDevice.choose_torch_device()
# Infer txt_seq_len from the t5_embeddings tensor.
txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
# In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
# Concatenation happens in the following order: [txt_seq, img_seq].
# There are 4 portions of the attention mask to consider as we prepare it:
# 1. txt attends to itself
# 2. txt attends to corresponding regional img
# 3. regional img attends to corresponding txt
# 4. regional img attends to itself
# Initialize empty attention mask.
regional_attention_mask = torch.zeros(
(txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
)
for image_mask, t5_embedding_range in zip(
regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
):
# 1. txt attends to itself
regional_attention_mask[
t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
] = 1.0
if image_mask is not None:
# 2. txt attends to corresponding regional img
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
image_mask.view(1, img_seq_len)
)
# 3. regional img attends to corresponding txt
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
image_mask.view(img_seq_len, 1)
)
# 4. regional img attends to itself
image_mask = image_mask.view(img_seq_len, 1)
regional_attention_mask[txt_seq_len:, txt_seq_len:] += image_mask @ image_mask.T
else:
# We don't allow attention between non-background image regions and global prompts. This helps to ensure
# that regions focus on their local prompts. We do, however, allow attention between background regions
# and global prompts. If we didn't do this, then the background regions would not attend to any txt
# embeddings, which we found experimentally to cause artifacts.
# 2. global txt attends to background region
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
background_region_mask.view(1, img_seq_len)
)
# 3. background region attends to global txt
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
background_region_mask.view(img_seq_len, 1)
)
# Allow background regions to attend to themselves.
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(img_seq_len, 1)
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(1, img_seq_len)
# Convert attention mask to boolean.
regional_attention_mask = regional_attention_mask > 0.5
return regional_attention_mask
@classmethod
def _concat_regional_text_conditioning(
cls,
text_conditionings: list[FluxTextConditioning],
) -> FluxRegionalTextConditioning:
"""Concatenate regional text conditioning data into a single conditioning tensor (with associated masks)."""
concat_t5_embeddings: list[torch.Tensor] = []
concat_t5_embedding_ranges: list[Range] = []
image_masks: list[torch.Tensor | None] = []
# Choose global CLIP embedding.
# Use the first global prompt's CLIP embedding as the global CLIP embedding. If there is no global prompt, use
# the first prompt's CLIP embedding.
global_clip_embedding: torch.Tensor = text_conditionings[0].clip_embeddings
for text_conditioning in text_conditionings:
if text_conditioning.mask is None:
global_clip_embedding = text_conditioning.clip_embeddings
break
cur_t5_embedding_len = 0
for text_conditioning in text_conditionings:
concat_t5_embeddings.append(text_conditioning.t5_embeddings)
concat_t5_embedding_ranges.append(
Range(start=cur_t5_embedding_len, end=cur_t5_embedding_len + text_conditioning.t5_embeddings.shape[1])
)
image_masks.append(text_conditioning.mask)
cur_t5_embedding_len += text_conditioning.t5_embeddings.shape[1]
t5_embeddings = torch.cat(concat_t5_embeddings, dim=1)
# Initialize the txt_ids tensor.
pos_bs, pos_t5_seq_len, _ = t5_embeddings.shape
t5_txt_ids = torch.zeros(
pos_bs, pos_t5_seq_len, 3, dtype=t5_embeddings.dtype, device=TorchDevice.choose_torch_device()
)
return FluxRegionalTextConditioning(
t5_embeddings=t5_embeddings,
clip_embeddings=global_clip_embedding,
t5_txt_ids=t5_txt_ids,
image_masks=image_masks,
t5_embedding_ranges=concat_t5_embedding_ranges,
)
@staticmethod
def preprocess_regional_prompt_mask(
mask: Optional[torch.Tensor], packed_height: int, packed_width: int, dtype: torch.dtype, device: torch.device
) -> torch.Tensor:
"""Preprocess a regional prompt mask to match the target height and width.
If mask is None, returns a mask of all ones with the target height and width.
If mask is not None, resizes the mask to the target height and width using 'nearest' interpolation.
packed_height and packed_width are the target height and width of the mask in the 'packed' latent space.
Returns:
torch.Tensor: The processed mask. shape: (1, 1, packed_height * packed_width).
"""
if mask is None:
return torch.ones((1, 1, packed_height * packed_width), dtype=dtype, device=device)
mask = to_standard_float_mask(mask, out_dtype=dtype)
tf = torchvision.transforms.Resize(
(packed_height, packed_width), interpolation=torchvision.transforms.InterpolationMode.NEAREST
)
# Add a batch dimension to the mask, because torchvision expects shape (batch, channels, h, w).
mask = mask.unsqueeze(0) # Shape: (1, h, w) -> (1, 1, h, w)
resized_mask = tf(mask)
# Flatten the height and width dimensions into a single image_seq_len dimension.
return resized_mask.flatten(start_dim=2)

View File

@@ -5,10 +5,10 @@ from einops import rearrange
from torch import Tensor
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, attn_mask: Tensor | None = None) -> Tensor:
q, k = apply_rope(q, k, pe)
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
x = rearrange(x, "B H L D -> B L (H D)")
return x
@@ -24,12 +24,12 @@ def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
out = torch.einsum("...n,d->...nd", pos, omega)
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
return out.float()
return out.to(dtype=pos.dtype, device=pos.device)
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
xq_ = xq.view(*xq.shape[:-1], -1, 1, 2)
xk_ = xk.view(*xk.shape[:-1], -1, 1, 2)
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
return xq_out.view(*xq.shape).type_as(xq), xk_out.view(*xk.shape).type_as(xk)

View File

@@ -1,11 +1,16 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
from typing import Optional
import torch
from torch import Tensor, nn
from invokeai.backend.flux.custom_block_processor import CustomDoubleStreamBlockProcessor
from invokeai.backend.flux.custom_block_processor import (
CustomDoubleStreamBlockProcessor,
CustomSingleStreamBlockProcessor,
)
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
from invokeai.backend.flux.modules.layers import (
DoubleStreamBlock,
@@ -31,6 +36,7 @@ class FluxParams:
theta: int
qkv_bias: bool
guidance_embed: bool
out_channels: Optional[int] = None
class Flux(nn.Module):
@@ -43,7 +49,7 @@ class Flux(nn.Module):
self.params = params
self.in_channels = params.in_channels
self.out_channels = self.in_channels
self.out_channels = params.out_channels or self.in_channels
if params.hidden_size % params.num_heads != 0:
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
pe_dim = params.hidden_size // params.num_heads
@@ -95,6 +101,7 @@ class Flux(nn.Module):
controlnet_double_block_residuals: list[Tensor] | None,
controlnet_single_block_residuals: list[Tensor] | None,
ip_adapter_extensions: list[XLabsIPAdapterExtension],
regional_prompting_extension: RegionalPromptingExtension,
) -> Tensor:
if img.ndim != 3 or txt.ndim != 3:
raise ValueError("Input img and txt tensors must have 3 dimensions.")
@@ -117,7 +124,6 @@ class Flux(nn.Module):
assert len(controlnet_double_block_residuals) == len(self.double_blocks)
for block_index, block in enumerate(self.double_blocks):
assert isinstance(block, DoubleStreamBlock)
img, txt = CustomDoubleStreamBlockProcessor.custom_double_block_forward(
timestep_index=timestep_index,
total_num_timesteps=total_num_timesteps,
@@ -128,6 +134,7 @@ class Flux(nn.Module):
vec=vec,
pe=pe,
ip_adapter_extensions=ip_adapter_extensions,
regional_prompting_extension=regional_prompting_extension,
)
if controlnet_double_block_residuals is not None:
@@ -140,7 +147,17 @@ class Flux(nn.Module):
assert len(controlnet_single_block_residuals) == len(self.single_blocks)
for block_index, block in enumerate(self.single_blocks):
img = block(img, vec=vec, pe=pe)
assert isinstance(block, SingleStreamBlock)
img = CustomSingleStreamBlockProcessor.custom_single_block_forward(
timestep_index=timestep_index,
total_num_timesteps=total_num_timesteps,
block_index=block_index,
block=block,
img=img,
vec=vec,
pe=pe,
regional_prompting_extension=regional_prompting_extension,
)
if controlnet_single_block_residuals is not None:
img[:, txt.shape[1] :, ...] += controlnet_single_block_residuals[block_index]

View File

@@ -66,10 +66,7 @@ class RMSNorm(torch.nn.Module):
self.scale = nn.Parameter(torch.ones(dim))
def forward(self, x: Tensor):
x_dtype = x.dtype
x = x.float()
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
return (x * rrms).to(dtype=x_dtype) * self.scale
return torch.nn.functional.rms_norm(x, self.scale.shape, self.scale, eps=1e-6)
class QKNorm(torch.nn.Module):

View File

@@ -0,0 +1,36 @@
from dataclasses import dataclass
import torch
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
@dataclass
class FluxTextConditioning:
t5_embeddings: torch.Tensor
clip_embeddings: torch.Tensor
# If mask is None, the prompt is a global prompt.
mask: torch.Tensor | None
@dataclass
class FluxRegionalTextConditioning:
# Concatenated text embeddings.
# Shape: (1, concatenated_txt_seq_len, 4096)
t5_embeddings: torch.Tensor
# Shape: (1, concatenated_txt_seq_len, 3)
t5_txt_ids: torch.Tensor
# Global CLIP embeddings.
# Shape: (1, 768)
clip_embeddings: torch.Tensor
# A binary mask indicating the regions of the image that the prompt should be applied to. If None, the prompt is a
# global prompt.
# image_masks[i] is the mask for the ith prompt.
# image_masks[i] has shape (1, image_seq_len) and dtype torch.bool.
image_masks: list[torch.Tensor | None]
# List of ranges that represent the embedding ranges for each mask.
# t5_embedding_ranges[i] contains the range of the t5 embeddings that correspond to image_masks[i].
t5_embedding_ranges: list[Range]

View File

@@ -1,11 +0,0 @@
from typing import Union
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.lora.layers.full_layer import FullLayer
from invokeai.backend.lora.layers.ia3_layer import IA3Layer
from invokeai.backend.lora.layers.loha_layer import LoHALayer
from invokeai.backend.lora.layers.lokr_layer import LoKRLayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.layers.norm_layer import NormLayer
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer, NormLayer, ConcatenatedLoRALayer]

View File

@@ -1,34 +0,0 @@
import torch
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
class ConcatenatedLoRALinearSidecarLayer(torch.nn.Module):
def __init__(
self,
concatenated_lora_layer: ConcatenatedLoRALayer,
weight: float,
):
super().__init__()
self._concatenated_lora_layer = concatenated_lora_layer
self._weight = weight
def forward(self, input: torch.Tensor) -> torch.Tensor:
x_chunks: list[torch.Tensor] = []
for lora_layer in self._concatenated_lora_layer.lora_layers:
x_chunk = torch.nn.functional.linear(input, lora_layer.down)
if lora_layer.mid is not None:
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.mid)
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.up, bias=lora_layer.bias)
x_chunk *= self._weight * lora_layer.scale()
x_chunks.append(x_chunk)
# TODO(ryand): Generalize to support concat_axis != 0.
assert self._concatenated_lora_layer.concat_axis == 0
x = torch.cat(x_chunks, dim=-1)
return x
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
self._concatenated_lora_layer.to(device=device, dtype=dtype)
return self

View File

@@ -1,27 +0,0 @@
import torch
from invokeai.backend.lora.layers.lora_layer import LoRALayer
class LoRALinearSidecarLayer(torch.nn.Module):
def __init__(
self,
lora_layer: LoRALayer,
weight: float,
):
super().__init__()
self._lora_layer = lora_layer
self._weight = weight
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = torch.nn.functional.linear(x, self._lora_layer.down)
if self._lora_layer.mid is not None:
x = torch.nn.functional.linear(x, self._lora_layer.mid)
x = torch.nn.functional.linear(x, self._lora_layer.up, bias=self._lora_layer.bias)
x *= self._weight * self._lora_layer.scale()
return x
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
self._lora_layer.to(device=device, dtype=dtype)
return self

View File

@@ -1,24 +0,0 @@
import torch
class LoRASidecarModule(torch.nn.Module):
"""A LoRA sidecar module that wraps an original module and adds LoRA layers to it."""
def __init__(self, orig_module: torch.nn.Module, lora_layers: list[torch.nn.Module]):
super().__init__()
self.orig_module = orig_module
self._lora_layers = lora_layers
def add_lora_layer(self, lora_layer: torch.nn.Module):
self._lora_layers.append(lora_layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
x = self.orig_module(input)
for lora_layer in self._lora_layers:
x += lora_layer(input)
return x
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
self._orig_module.to(device=device, dtype=dtype)
for lora_layer in self._lora_layers:
lora_layer.to(device=device, dtype=dtype)

View File

@@ -67,6 +67,7 @@ class ModelType(str, Enum):
Main = "main"
VAE = "vae"
LoRA = "lora"
ControlLoRa = "control_lora"
ControlNet = "controlnet" # used by model_probe
TextualInversion = "embedding"
IPAdapter = "ip_adapter"
@@ -273,6 +274,36 @@ class LoRALyCORISConfig(LoRAConfigBase):
return Tag(f"{ModelType.LoRA.value}.{ModelFormat.LyCORIS.value}")
class ControlAdapterConfigBase(BaseModel):
default_settings: Optional[ControlAdapterDefaultSettings] = Field(
description="Default settings for this model", default=None
)
class ControlLoRALyCORISConfig(ModelConfigBase, ControlAdapterConfigBase):
"""Model config for Control LoRA models."""
type: Literal[ModelType.ControlLoRa] = ModelType.ControlLoRa
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
format: Literal[ModelFormat.LyCORIS] = ModelFormat.LyCORIS
@staticmethod
def get_tag() -> Tag:
return Tag(f"{ModelType.ControlLoRa.value}.{ModelFormat.LyCORIS.value}")
class ControlLoRADiffusersConfig(ModelConfigBase, ControlAdapterConfigBase):
"""Model config for Control LoRA models."""
type: Literal[ModelType.ControlLoRa] = ModelType.ControlLoRa
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
@staticmethod
def get_tag() -> Tag:
return Tag(f"{ModelType.ControlLoRa.value}.{ModelFormat.Diffusers.value}")
class LoRADiffusersConfig(LoRAConfigBase):
"""Model config for LoRA/Diffusers models."""
@@ -304,12 +335,6 @@ class VAEDiffusersConfig(ModelConfigBase):
return Tag(f"{ModelType.VAE.value}.{ModelFormat.Diffusers.value}")
class ControlAdapterConfigBase(BaseModel):
default_settings: Optional[ControlAdapterDefaultSettings] = Field(
description="Default settings for this model", default=None
)
class ControlNetDiffusersConfig(DiffusersConfigBase, ControlAdapterConfigBase):
"""Model config for ControlNet models (diffusers version)."""
@@ -535,6 +560,8 @@ AnyModelConfig = Annotated[
Annotated[ControlNetDiffusersConfig, ControlNetDiffusersConfig.get_tag()],
Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()],
Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()],
Annotated[ControlLoRALyCORISConfig, ControlLoRALyCORISConfig.get_tag()],
Annotated[ControlLoRADiffusersConfig, ControlLoRADiffusersConfig.get_tag()],
Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()],
Annotated[T5EncoderConfig, T5EncoderConfig.get_tag()],
Annotated[T5EncoderBnbQuantizedLlmInt8bConfig, T5EncoderBnbQuantizedLlmInt8bConfig.get_tag()],

View File

@@ -9,14 +9,6 @@ import torch
from safetensors.torch import load_file
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
lora_model_from_flux_diffusers_state_dict,
)
from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import (
lora_model_from_flux_kohya_state_dict,
)
from invokeai.backend.lora.conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
from invokeai.backend.lora.conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
from invokeai.backend.model_manager import (
AnyModel,
AnyModelConfig,
@@ -28,10 +20,25 @@ from invokeai.backend.model_manager import (
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
is_state_dict_likely_flux_control,
lora_model_from_flux_control_state_dict,
)
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
lora_model_from_flux_diffusers_state_dict,
)
from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import (
is_state_dict_likely_in_flux_kohya_format,
lora_model_from_flux_kohya_state_dict,
)
from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.LyCORIS)
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.ControlLoRa, format=ModelFormat.LyCORIS)
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.ControlLoRa, format=ModelFormat.Diffusers)
class LoRALoader(ModelLoader):
"""Class to load LoRA models."""
@@ -75,7 +82,10 @@ class LoRALoader(ModelLoader):
# https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_flux.py#L1194
model = lora_model_from_flux_diffusers_state_dict(state_dict=state_dict, alpha=None)
elif config.format == ModelFormat.LyCORIS:
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
if is_state_dict_likely_in_flux_kohya_format(state_dict=state_dict):
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
elif is_state_dict_likely_flux_control(state_dict=state_dict):
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
else:
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:

View File

@@ -15,9 +15,9 @@ from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import D
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.model_manager.config import AnyModel
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.textual_inversion import TextualInversionModelRaw
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
@@ -43,7 +43,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
(
TextualInversionModelRaw,
IPAdapter,
LoRAModelRaw,
ModelPatchRaw,
SpandrelImageToImageModel,
GroundingDinoPipeline,
SegmentAnythingPipeline,

View File

@@ -15,10 +15,6 @@ from invokeai.backend.flux.controlnet.state_dict_utils import (
is_state_dict_xlabs_controlnet,
)
from invokeai.backend.flux.ip_adapter.state_dict_utils import is_state_dict_xlabs_ip_adapter
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
is_state_dict_likely_in_flux_diffusers_format,
)
from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import is_state_dict_likely_in_flux_kohya_format
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
from invokeai.backend.model_manager.config import (
AnyModelConfig,
@@ -43,6 +39,13 @@ from invokeai.backend.model_manager.util.model_util import (
lora_token_vector_length,
read_checkpoint_meta,
)
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
is_state_dict_likely_in_flux_diffusers_format,
)
from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import (
is_state_dict_likely_in_flux_kohya_format,
)
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
@@ -199,8 +202,8 @@ class ModelProbe(object):
fields["default_settings"] = fields.get("default_settings")
if not fields["default_settings"]:
if fields["type"] in {ModelType.ControlNet, ModelType.T2IAdapter}:
fields["default_settings"] = get_default_settings_controlnet_t2i_adapter(fields["name"])
if fields["type"] in {ModelType.ControlNet, ModelType.T2IAdapter, ModelType.ControlLoRa}:
fields["default_settings"] = get_default_settings_control_adapters(fields["name"])
elif fields["type"] is ModelType.Main:
fields["default_settings"] = get_default_settings_main(fields["base"])
@@ -258,6 +261,9 @@ class ModelProbe(object):
ckpt = checkpoint if checkpoint else read_checkpoint_meta(model_path, scan=True)
ckpt = ckpt.get("state_dict", ckpt)
if isinstance(ckpt, dict) and is_state_dict_likely_flux_control(ckpt):
return ModelType.ControlLoRa
for key in [str(k) for k in ckpt.keys()]:
if key.startswith(
(
@@ -469,7 +475,7 @@ class ModelProbe(object):
"""
# scan model
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
if scan_result.infected_files != 0 or scan_result.scan_err:
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
@@ -485,6 +491,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
"lineart anime": "lineart_anime_image_processor",
"lineart_anime": "lineart_anime_image_processor",
"lineart": "lineart_image_processor",
"soft": "hed_image_processor",
"softedge": "hed_image_processor",
"hed": "hed_image_processor",
"shuffle": "content_shuffle_image_processor",
@@ -496,7 +503,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
}
def get_default_settings_controlnet_t2i_adapter(model_name: str) -> Optional[ControlAdapterDefaultSettings]:
def get_default_settings_control_adapters(model_name: str) -> Optional[ControlAdapterDefaultSettings]:
for k, v in MODEL_NAME_TO_PREPROCESSOR.items():
model_name_lower = model_name.lower()
if k in model_name_lower:
@@ -623,8 +630,10 @@ class LoRACheckpointProbe(CheckpointProbeBase):
return ModelFormat.LyCORIS
def get_base_type(self) -> BaseModelType:
if is_state_dict_likely_in_flux_kohya_format(self.checkpoint) or is_state_dict_likely_in_flux_diffusers_format(
self.checkpoint
if (
is_state_dict_likely_in_flux_kohya_format(self.checkpoint)
or is_state_dict_likely_in_flux_diffusers_format(self.checkpoint)
or is_state_dict_likely_flux_control(self.checkpoint)
):
return BaseModelType.Flux
@@ -1033,6 +1042,7 @@ class T2IAdapterFolderProbe(FolderProbeBase):
ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.VAE, VaeFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.LoRA, LoRAFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.ControlLoRa, LoRAFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.T5Encoder, T5EncoderFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe)
@@ -1045,6 +1055,7 @@ ModelProbe.register_probe("diffusers", ModelType.SpandrelImageToImage, SpandrelI
ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.VAE, VaeCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.LoRA, LoRACheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.ControlLoRa, LoRACheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.TextualInversion, TextualInversionCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.ControlNet, ControlNetCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpointProbe)

View File

@@ -488,6 +488,22 @@ union_cnet_flux = StarterModel(
type=ModelType.ControlNet,
)
# endregion
# region Control LoRA
flux_canny_control_lora = StarterModel(
name="Hard Edge Detection (Canny)",
base=BaseModelType.Flux,
source="black-forest-labs/FLUX.1-Canny-dev-lora::flux1-canny-dev-lora.safetensors",
description="Uses detected edges in the image to control composition.",
type=ModelType.ControlLoRa,
)
flux_depth_control_lora = StarterModel(
name="Depth Map",
base=BaseModelType.Flux,
source="black-forest-labs/FLUX.1-Depth-dev-lora::flux1-depth-dev-lora.safetensors",
description="Uses depth information in the image to control the depth in the generation.",
type=ModelType.ControlLoRa,
)
# endregion
# region T2I Adapter
t2i_canny_sd1 = StarterModel(
name="Hard Edge Detection (canny)",
@@ -630,6 +646,8 @@ STARTER_MODELS: list[StarterModel] = [
tile_sdxl,
union_cnet_sdxl,
union_cnet_flux,
flux_canny_control_lora,
flux_depth_control_lora,
t2i_canny_sd1,
t2i_sketch_sd1,
t2i_depth_sd1,
@@ -688,6 +706,8 @@ flux_bundle: list[StarterModel] = [
clip_l_encoder,
union_cnet_flux,
ip_adapter_flux,
flux_canny_control_lora,
flux_depth_control_lora,
]
STARTER_BUNDLES: dict[str, list[StarterModel]] = {

View File

@@ -44,7 +44,7 @@ def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
return checkpoint
def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]:
def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, torch.Tensor]:
if str(path).endswith(".safetensors"):
try:
path_str = path.as_posix() if isinstance(path, Path) else path
@@ -52,16 +52,15 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str
except Exception:
# TODO: create issue for support "meta"?
checkpoint = safetensors.torch.load_file(path, device="cpu")
elif str(path).endswith(".gguf"):
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
checkpoint = gguf_sd_loader(Path(path), compute_dtype=torch.float32)
else:
if scan:
scan_result = scan_file_path(path)
if scan_result.infected_files != 0:
if scan_result.infected_files != 0 or scan_result.scan_err:
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
if str(path).endswith(".gguf"):
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
checkpoint = gguf_sd_loader(Path(path), compute_dtype=torch.float32)
else:
checkpoint = torch.load(path, map_location=torch.device("meta"))
checkpoint = torch.load(path, map_location=torch.device("meta"))
return checkpoint

View File

@@ -5,17 +5,14 @@ from __future__ import annotations
import pickle
from contextlib import contextmanager
from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union
from typing import Any, Iterator, List, Optional, Tuple, Type, Union
import numpy as np
import torch
from diffusers import UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.textual_inversion import TextualInversionManager, TextualInversionModelRaw
@@ -176,180 +173,3 @@ class ModelPatcher:
assert hasattr(unet, "disable_freeu") # mypy doesn't pick up this attribute?
if did_apply_freeu:
unet.disable_freeu()
class ONNXModelPatcher:
# based on
# https://github.com/ssube/onnx-web/blob/ca2e436f0623e18b4cfe8a0363fcfcf10508acf7/api/onnx_web/convert/diffusion/lora.py#L323
@classmethod
@contextmanager
def apply_lora(
cls,
model: IAIOnnxRuntimeModel,
loras: List[Tuple[LoRAModelRaw, float]],
prefix: str,
) -> None:
from invokeai.backend.models.base import IAIOnnxRuntimeModel
if not isinstance(model, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
orig_weights = {}
try:
blended_loras: Dict[str, torch.Tensor] = {}
for lora, lora_weight in loras:
for layer_key, layer in lora.layers.items():
if not layer_key.startswith(prefix):
continue
layer.to(dtype=torch.float32)
layer_key = layer_key.replace(prefix, "")
# TODO: rewrite to pass original tensor weight(required by ia3)
layer_weight = layer.get_weight(None).detach().cpu().numpy() * lora_weight
if layer_key in blended_loras:
blended_loras[layer_key] += layer_weight
else:
blended_loras[layer_key] = layer_weight
node_names = {}
for node in model.nodes.values():
node_names[node.name.replace("/", "_").replace(".", "_").lstrip("_")] = node.name
for layer_key, lora_weight in blended_loras.items():
conv_key = layer_key + "_Conv"
gemm_key = layer_key + "_Gemm"
matmul_key = layer_key + "_MatMul"
if conv_key in node_names or gemm_key in node_names:
if conv_key in node_names:
conv_node = model.nodes[node_names[conv_key]]
else:
conv_node = model.nodes[node_names[gemm_key]]
weight_name = [n for n in conv_node.input if ".weight" in n][0]
orig_weight = model.tensors[weight_name]
if orig_weight.shape[-2:] == (1, 1):
if lora_weight.shape[-2:] == (1, 1):
new_weight = orig_weight.squeeze((3, 2)) + lora_weight.squeeze((3, 2))
else:
new_weight = orig_weight.squeeze((3, 2)) + lora_weight
new_weight = np.expand_dims(new_weight, (2, 3))
else:
if orig_weight.shape != lora_weight.shape:
new_weight = orig_weight + lora_weight.reshape(orig_weight.shape)
else:
new_weight = orig_weight + lora_weight
orig_weights[weight_name] = orig_weight
model.tensors[weight_name] = new_weight.astype(orig_weight.dtype)
elif matmul_key in node_names:
weight_node = model.nodes[node_names[matmul_key]]
matmul_name = [n for n in weight_node.input if "MatMul" in n][0]
orig_weight = model.tensors[matmul_name]
new_weight = orig_weight + lora_weight.transpose()
orig_weights[matmul_name] = orig_weight
model.tensors[matmul_name] = new_weight.astype(orig_weight.dtype)
else:
# warn? err?
pass
yield
finally:
# restore original weights
for name, orig_weight in orig_weights.items():
model.tensors[name] = orig_weight
@classmethod
@contextmanager
def apply_ti(
cls,
tokenizer: CLIPTokenizer,
text_encoder: IAIOnnxRuntimeModel,
ti_list: List[Tuple[str, Any]],
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
from invokeai.backend.models.base import IAIOnnxRuntimeModel
if not isinstance(text_encoder, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
orig_embeddings = None
try:
# HACK: The CLIPTokenizer API does not include a way to remove tokens after calling add_tokens(...). As a
# workaround, we create a full copy of `tokenizer` so that its original behavior can be restored after
# exiting this `apply_ti(...)` context manager.
#
# In a previous implementation, the deep copy was obtained with `ti_tokenizer = copy.deepcopy(tokenizer)`,
# but a pickle roundtrip was found to be much faster (1 sec vs. 0.05 secs).
ti_tokenizer = pickle.loads(pickle.dumps(tokenizer))
ti_manager = TextualInversionManager(ti_tokenizer)
def _get_trigger(ti_name: str, index: int) -> str:
trigger = ti_name
if index > 0:
trigger += f"-!pad-{i}"
return f"<{trigger}>"
# modify text_encoder
orig_embeddings = text_encoder.tensors["text_model.embeddings.token_embedding.weight"]
# modify tokenizer
new_tokens_added = 0
for ti_name, ti in ti_list:
if ti.embedding_2 is not None:
ti_embedding = (
ti.embedding_2 if ti.embedding_2.shape[1] == orig_embeddings.shape[0] else ti.embedding
)
else:
ti_embedding = ti.embedding
for i in range(ti_embedding.shape[0]):
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
embeddings = np.concatenate(
(np.copy(orig_embeddings), np.zeros((new_tokens_added, orig_embeddings.shape[1]))),
axis=0,
)
for ti_name, _ in ti_list:
ti_tokens = []
for i in range(ti_embedding.shape[0]):
embedding = ti_embedding[i].detach().numpy()
trigger = _get_trigger(ti_name, i)
token_id = ti_tokenizer.convert_tokens_to_ids(trigger)
if token_id == ti_tokenizer.unk_token_id:
raise RuntimeError(f"Unable to find token id for token '{trigger}'")
if embeddings[token_id].shape != embedding.shape:
raise ValueError(
f"Cannot load embedding for {trigger}. It was trained on a model with token dimension"
f" {embedding.shape[0]}, but the current model has token dimension"
f" {embeddings[token_id].shape[0]}."
)
embeddings[token_id] = embedding
ti_tokens.append(token_id)
if len(ti_tokens) > 1:
ti_manager.pad_tokens[ti_tokens[0]] = ti_tokens[1:]
text_encoder.tensors["text_model.embeddings.token_embedding.weight"] = embeddings.astype(
orig_embeddings.dtype
)
yield ti_tokenizer, ti_manager
finally:
# restore
if orig_embeddings is not None:
text_encoder.tensors["text_model.embeddings.token_embedding.weight"] = orig_embeddings

View File

@@ -0,0 +1,22 @@
from abc import ABC, abstractmethod
import torch
class BaseLayerPatch(ABC):
@abstractmethod
def get_parameters(self, orig_module: torch.nn.Module, weight: float) -> dict[str, torch.Tensor]:
"""Get the parameter residual updates that should be applied to the original parameters. Parameters omitted
from the returned dict are not updated.
"""
...
@abstractmethod
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
"""Move all internal tensors to the specified device and dtype."""
...
@abstractmethod
def calc_size(self) -> int:
"""Calculate the total size of all internal tensors in bytes."""
...

View File

@@ -2,8 +2,8 @@ from typing import Optional, Sequence
import torch
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
class ConcatenatedLoRALayer(LoRALayerBase):
@@ -20,7 +20,7 @@ class ConcatenatedLoRALayer(LoRALayerBase):
self.lora_layers = lora_layers
self.concat_axis = concat_axis
def rank(self) -> int | None:
def _rank(self) -> int | None:
return None
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:

View File

@@ -0,0 +1,19 @@
import torch
from invokeai.backend.patches.layers.lora_layer import LoRALayer
class FluxControlLoRALayer(LoRALayer):
"""A special case of LoRALayer for use with FLUX Control LoRAs that pads the target parameter with zeros if the
shapes don't match.
"""
def get_parameters(self, orig_module: torch.nn.Module, weight: float) -> dict[str, torch.Tensor]:
"""This overrides the base class behavior to skip the reshaping step."""
scale = self.scale()
params = {"weight": self.get_weight(orig_module.weight) * (weight * scale)}
bias = self.get_bias(orig_module.bias)
if bias is not None:
params["bias"] = bias * (weight * scale)
return params

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
@@ -20,7 +20,7 @@ class FullLayer(LoRALayerBase):
cls.warn_on_unhandled_keys(values=values, handled_keys={"diff", "diff_b"})
return layer
def rank(self) -> int | None:
def _rank(self) -> int | None:
return None
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
class IA3Layer(LoRALayerBase):
@@ -16,7 +16,7 @@ class IA3Layer(LoRALayerBase):
self.weight = weight
self.on_input = on_input
def rank(self) -> int | None:
def _rank(self) -> int | None:
return None
@classmethod

View File

@@ -2,7 +2,7 @@ from typing import Dict
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
@@ -32,7 +32,7 @@ class LoHALayer(LoRALayerBase):
self.t2 = t2
assert (self.t1 is None) == (self.t2 is None)
def rank(self) -> int | None:
def _rank(self) -> int | None:
return self.w1_b.shape[0]
@classmethod

View File

@@ -2,7 +2,7 @@ from typing import Dict
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
@@ -39,7 +39,7 @@ class LoKRLayer(LoRALayerBase):
assert (self.w2 is None) != (self.w2_a is None)
assert (self.w2_a is None) == (self.w2_b is None)
def rank(self) -> int | None:
def _rank(self) -> int | None:
if self.w1_b is not None:
return self.w1_b.shape[0]
elif self.w2_b is not None:

View File

@@ -2,7 +2,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
@@ -55,7 +55,7 @@ class LoRALayer(LoRALayerBase):
return layer
def rank(self) -> int:
def _rank(self) -> int:
return self.down.shape[0]
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:

View File

@@ -1,12 +1,13 @@
from typing import Dict, Optional, Set
from typing import Optional
import torch
import invokeai.backend.util.logging as logger
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class LoRALayerBase:
class LoRALayerBase(BaseLayerPatch):
"""Base class for all LoRA-like patching layers."""
# Note: It is tempting to make this a torch.nn.Module sub-class and make all tensors 'torch.nn.Parameter's. Then we
@@ -23,6 +24,7 @@ class LoRALayerBase:
def _parse_bias(
cls, bias_indices: torch.Tensor | None, bias_values: torch.Tensor | None, bias_size: torch.Tensor | None
) -> torch.Tensor | None:
"""Helper function to parse a bias tensor from a state dict in LyCORIS format."""
assert (bias_indices is None) == (bias_values is None) == (bias_size is None)
bias = None
@@ -37,11 +39,14 @@ class LoRALayerBase:
) -> float | None:
return alpha.item() if alpha is not None else None
def rank(self) -> int | None:
def _rank(self) -> int | None:
"""Return the rank of the LoRA-like layer. Or None if the layer does not have a rank. This value is used to
calculate the scale.
"""
raise NotImplementedError()
def scale(self) -> float:
rank = self.rank()
rank = self._rank()
if self._alpha is None or rank is None:
return 1.0
return self._alpha / rank
@@ -52,15 +57,23 @@ class LoRALayerBase:
def get_bias(self, orig_bias: torch.Tensor) -> Optional[torch.Tensor]:
return self.bias
def get_parameters(self, orig_module: torch.nn.Module) -> Dict[str, torch.Tensor]:
params = {"weight": self.get_weight(orig_module.weight)}
def get_parameters(self, orig_module: torch.nn.Module, weight: float) -> dict[str, torch.Tensor]:
scale = self.scale()
params = {"weight": self.get_weight(orig_module.weight) * (weight * scale)}
bias = self.get_bias(orig_module.bias)
if bias is not None:
params["bias"] = bias
params["bias"] = bias * (weight * scale)
# Reshape all params to match the original module's shape.
for param_name, param_weight in params.items():
orig_param = orig_module.get_parameter(param_name)
if param_weight.shape != orig_param.shape:
params[param_name] = param_weight.reshape(orig_param.shape)
return params
@classmethod
def warn_on_unhandled_keys(cls, values: Dict[str, torch.Tensor], handled_keys: Set[str]):
def warn_on_unhandled_keys(cls, values: dict[str, torch.Tensor], handled_keys: set[str]):
"""Log a warning if values contains unhandled keys."""
unknown_keys = set(values.keys()) - handled_keys
if unknown_keys:

View File

@@ -2,7 +2,7 @@ from typing import Dict
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
@@ -20,7 +20,7 @@ class NormLayer(LoRALayerBase):
cls.warn_on_unhandled_keys(values, {"w_norm", "b_norm"})
return layer
def rank(self) -> int | None:
def _rank(self) -> int | None:
return None
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:

View File

@@ -0,0 +1,27 @@
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
class SetParameterLayer(BaseLayerPatch):
"""A layer that sets a single parameter to a new target value.
(The diff between the target value and current value is calculated internally.)
"""
def __init__(self, param_name: str, weight: torch.Tensor):
super().__init__()
self.weight = weight
self.param_name = param_name
def get_parameters(self, orig_module: torch.nn.Module, weight: float) -> dict[str, torch.Tensor]:
# Note: We intentionally ignore the weight parameter here. This matches the behavior in the official FLUX
# Control LoRA implementation.
diff = self.weight - orig_module.get_parameter(self.param_name)
return {self.param_name: diff}
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
self.weight = self.weight.to(device=device, dtype=dtype)
def calc_size(self) -> int:
return calc_tensor_size(self.weight)

View File

@@ -2,16 +2,16 @@ from typing import Dict
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.full_layer import FullLayer
from invokeai.backend.lora.layers.ia3_layer import IA3Layer
from invokeai.backend.lora.layers.loha_layer import LoHALayer
from invokeai.backend.lora.layers.lokr_layer import LoKRLayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.layers.norm_layer import NormLayer
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.full_layer import FullLayer
from invokeai.backend.patches.layers.ia3_layer import IA3Layer
from invokeai.backend.patches.layers.loha_layer import LoHALayer
from invokeai.backend.patches.layers.lokr_layer import LoKRLayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.layers.norm_layer import NormLayer
def any_lora_layer_from_state_dict(state_dict: Dict[str, torch.Tensor]) -> AnyLoRALayer:
def any_lora_layer_from_state_dict(state_dict: Dict[str, torch.Tensor]) -> BaseLayerPatch:
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules

View File

@@ -0,0 +1,84 @@
import re
from typing import Any, Dict
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.layers.set_parameter_layer import SetParameterLayer
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
# A regex pattern that matches all of the keys in the Flux Dev/Canny LoRA format.
# Example keys:
# guidance_in.in_layer.lora_B.bias
# single_blocks.0.linear1.lora_A.weight
# double_blocks.0.img_attn.norm.key_norm.scale
FLUX_CONTROL_TRANSFORMER_KEY_REGEX = r"(\w+\.)+(lora_A\.weight|lora_B\.weight|lora_B\.bias|scale)"
def is_state_dict_likely_flux_control(state_dict: Dict[str, Any]) -> bool:
"""Checks if the provided state dict is likely in the FLUX Control LoRA format.
This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
"""
all_keys_match = all(re.match(FLUX_CONTROL_TRANSFORMER_KEY_REGEX, str(k)) for k in state_dict.keys())
# Check the shape of the img_in weight, because this layer shape is modified by FLUX control LoRAs.
lora_a_weight = state_dict.get("img_in.lora_A.weight", None)
lora_b_bias = state_dict.get("img_in.lora_B.bias", None)
lora_b_weight = state_dict.get("img_in.lora_B.weight", None)
return (
all_keys_match
and lora_a_weight is not None
and lora_b_bias is not None
and lora_b_weight is not None
and lora_a_weight.shape[1] == 128
and lora_b_weight.shape[0] == 3072
and lora_b_bias.shape[0] == 3072
)
def lora_model_from_flux_control_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw:
# Group keys by layer.
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
for key, value in state_dict.items():
key_props = key.split(".")
layer_prop_size = -2 if any(prop in key for prop in ["lora_B", "lora_A"]) else -1
layer_name = ".".join(key_props[:layer_prop_size])
param_name = ".".join(key_props[layer_prop_size:])
if layer_name not in grouped_state_dict:
grouped_state_dict[layer_name] = {}
grouped_state_dict[layer_name][param_name] = value
# Create LoRA layers.
layers: dict[str, BaseLayerPatch] = {}
for layer_key, layer_state_dict in grouped_state_dict.items():
prefixed_key = f"{FLUX_LORA_TRANSFORMER_PREFIX}{layer_key}"
if layer_key == "img_in":
# img_in is a special case because it changes the shape of the original weight.
layers[prefixed_key] = FluxControlLoRALayer(
layer_state_dict["lora_B.weight"],
None,
layer_state_dict["lora_A.weight"],
None,
layer_state_dict["lora_B.bias"],
)
elif all(k in layer_state_dict for k in ["lora_A.weight", "lora_B.bias", "lora_B.weight"]):
layers[prefixed_key] = LoRALayer(
layer_state_dict["lora_B.weight"],
None,
layer_state_dict["lora_A.weight"],
None,
layer_state_dict["lora_B.bias"],
)
elif "scale" in layer_state_dict:
layers[prefixed_key] = SetParameterLayer("scale", layer_state_dict["scale"])
else:
raise ValueError(f"{layer_key} not expected")
return ModelPatchRaw(layers=layers)

View File

@@ -2,11 +2,11 @@ from typing import Dict
import torch
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Tensor]) -> bool:
@@ -30,7 +30,9 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
return all_keys_in_peft_format and all_expected_keys_present
def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor], alpha: float | None) -> LoRAModelRaw:
def lora_model_from_flux_diffusers_state_dict(
state_dict: Dict[str, torch.Tensor], alpha: float | None
) -> ModelPatchRaw:
"""Loads a state dict in the Diffusers FLUX LoRA format into a LoRAModelRaw object.
This function is based on:
@@ -49,7 +51,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
mlp_ratio = 4.0
mlp_hidden_dim = int(hidden_size * mlp_ratio)
layers: dict[str, AnyLoRALayer] = {}
layers: dict[str, BaseLayerPatch] = {}
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
if src_key in grouped_state_dict:
@@ -215,7 +217,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
layers_with_prefix = {f"{FLUX_LORA_TRANSFORMER_PREFIX}{k}": v for k, v in layers.items()}
return LoRAModelRaw(layers=layers_with_prefix)
return ModelPatchRaw(layers=layers_with_prefix)
def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:

View File

@@ -3,10 +3,13 @@ from typing import Any, Dict, TypeVar
import torch
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_TRANSFORMER_PREFIX
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.lora_conversions.flux_lora_constants import (
FLUX_LORA_CLIP_PREFIX,
FLUX_LORA_TRANSFORMER_PREFIX,
)
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
# A regex pattern that matches all of the transformer keys in the Kohya FLUX LoRA format.
# Example keys:
@@ -36,7 +39,7 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo
)
def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw:
# Group keys by layer.
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
for key, value in state_dict.items():
@@ -61,14 +64,14 @@ def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -
clip_grouped_sd = _convert_flux_clip_kohya_state_dict_to_invoke_format(clip_grouped_sd)
# Create LoRA layers.
layers: dict[str, AnyLoRALayer] = {}
layers: dict[str, BaseLayerPatch] = {}
for layer_key, layer_state_dict in transformer_grouped_sd.items():
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
for layer_key, layer_state_dict in clip_grouped_sd.items():
layers[FLUX_LORA_CLIP_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
# Create and return the LoRAModelRaw.
return LoRAModelRaw(layers=layers)
return ModelPatchRaw(layers=layers)
T = TypeVar("T")

View File

@@ -2,19 +2,19 @@ from typing import Dict
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw:
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_state(state_dict)
layers: dict[str, AnyLoRALayer] = {}
layers: dict[str, BaseLayerPatch] = {}
for layer_key, values in grouped_state_dict.items():
layers[layer_key] = any_lora_layer_from_state_dict(values)
return LoRAModelRaw(layers=layers)
return ModelPatchRaw(layers=layers)
def _group_state(state_dict: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, torch.Tensor]]:

View File

@@ -3,20 +3,17 @@ from typing import Mapping, Optional
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.raw_model import RawModel
class LoRAModelRaw(RawModel): # (torch.nn.Module):
def __init__(self, layers: Mapping[str, AnyLoRALayer]):
class ModelPatchRaw(RawModel):
def __init__(self, layers: Mapping[str, BaseLayerPatch]):
self.layers = layers
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
for _key, layer in self.layers.items():
for layer in self.layers.values():
layer.to(device=device, dtype=dtype)
def calc_size(self) -> int:
model_size = 0
for _, layer in self.layers.items():
model_size += layer.calc_size()
return model_size
return sum(layer.calc_size() for layer in self.layers.values())

View File

@@ -3,26 +3,23 @@ from typing import Dict, Iterable, Optional, Tuple
import torch
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.lora.layers.lora_layer import LoRALayer
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.sidecar_layers.concatenated_lora.concatenated_lora_linear_sidecar_layer import (
ConcatenatedLoRALinearSidecarLayer,
)
from invokeai.backend.lora.sidecar_layers.lora.lora_linear_sidecar_layer import LoRALinearSidecarLayer
from invokeai.backend.lora.sidecar_layers.lora_sidecar_module import LoRASidecarModule
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
from invokeai.backend.patches.sidecar_wrappers.base_sidecar_wrapper import BaseSidecarWrapper
from invokeai.backend.patches.sidecar_wrappers.utils import wrap_module_with_sidecar_wrapper
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
class LoRAPatcher:
class LayerPatcher:
@staticmethod
@torch.no_grad()
@contextmanager
def apply_lora_patches(
def apply_model_patches(
model: torch.nn.Module,
patches: Iterable[Tuple[LoRAModelRaw, float]],
patches: Iterable[Tuple[ModelPatchRaw, float]],
prefix: str,
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
):
@@ -40,7 +37,7 @@ class LoRAPatcher:
original_weights = OriginalWeightsStorage(cached_weights)
try:
for patch, patch_weight in patches:
LoRAPatcher.apply_lora_patch(
LayerPatcher.apply_model_patch(
model=model,
prefix=prefix,
patch=patch,
@@ -52,14 +49,15 @@ class LoRAPatcher:
yield
finally:
for param_key, weight in original_weights.get_changed_weights():
model.get_parameter(param_key).copy_(weight)
cur_param = model.get_parameter(param_key)
cur_param.data = weight.to(dtype=cur_param.dtype, device=cur_param.device, copy=True)
@staticmethod
@torch.no_grad()
def apply_lora_patch(
def apply_model_patch(
model: torch.nn.Module,
prefix: str,
patch: LoRAModelRaw,
patch: ModelPatchRaw,
patch_weight: float,
original_weights: OriginalWeightsStorage,
):
@@ -87,46 +85,70 @@ class LoRAPatcher:
if not layer_key.startswith(prefix):
continue
module_key, module = LoRAPatcher._get_submodule(
module_key, module = LayerPatcher._get_submodule(
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
)
# All of the LoRA weight calculations will be done on the same device as the module weight.
# (Performance will be best if this is a CUDA device.)
device = module.weight.device
dtype = module.weight.dtype
LayerPatcher._apply_model_layer_patch(
module_to_patch=module,
module_to_patch_key=module_key,
patch=layer,
patch_weight=patch_weight,
original_weights=original_weights,
)
layer_scale = layer.scale()
@staticmethod
@torch.no_grad()
def _apply_model_layer_patch(
module_to_patch: torch.nn.Module,
module_to_patch_key: str,
patch: BaseLayerPatch,
patch_weight: float,
original_weights: OriginalWeightsStorage,
):
# All of the LoRA weight calculations will be done on the same device as the module weight.
# (Performance will be best if this is a CUDA device.)
first_param = next(module_to_patch.parameters())
device = first_param.device
dtype = first_param.dtype
# We intentionally move to the target device first, then cast. Experimentally, this was found to
# be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the
# same thing in a single call to '.to(...)'.
layer.to(device=device)
layer.to(dtype=torch.float32)
# We intentionally move to the target device first, then cast. Experimentally, this was found to
# be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the
# same thing in a single call to '.to(...)'.
patch.to(device=device)
patch.to(dtype=torch.float32)
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
for param_name, lora_param_weight in layer.get_parameters(module).items():
param_key = module_key + "." + param_name
module_param = module.get_parameter(param_name)
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
for param_name, param_weight in patch.get_parameters(module_to_patch, weight=patch_weight).items():
param_key = module_to_patch_key + "." + param_name
module_param = module_to_patch.get_parameter(param_name)
# Save original weight
original_weights.save(param_key, module_param)
# Save original weight
original_weights.save(param_key, module_param)
if module_param.shape != lora_param_weight.shape:
lora_param_weight = lora_param_weight.reshape(module_param.shape)
# HACK(ryand): This condition is only necessary to handle layers in FLUX control LoRAs that change the
# shape of the original layer.
if module_param.nelement() != param_weight.nelement():
assert isinstance(patch, FluxControlLoRALayer)
expanded_weight = pad_with_zeros(module_param, param_weight.shape)
setattr(
module_to_patch,
param_name,
torch.nn.Parameter(expanded_weight, requires_grad=module_param.requires_grad),
)
module_param = expanded_weight
lora_param_weight *= patch_weight * layer_scale
module_param += lora_param_weight.to(dtype=dtype)
module_param += param_weight.to(dtype=dtype)
layer.to(device=TorchDevice.CPU_DEVICE)
patch.to(device=TorchDevice.CPU_DEVICE)
@staticmethod
@torch.no_grad()
@contextmanager
def apply_lora_sidecar_patches(
def apply_model_sidecar_patches(
model: torch.nn.Module,
patches: Iterable[Tuple[LoRAModelRaw, float]],
patches: Iterable[Tuple[ModelPatchRaw, float]],
prefix: str,
dtype: torch.dtype,
):
@@ -147,7 +169,7 @@ class LoRAPatcher:
original_modules: dict[str, torch.nn.Module] = {}
try:
for patch, patch_weight in patches:
LoRAPatcher._apply_lora_sidecar_patch(
LayerPatcher._apply_model_sidecar_patch(
model=model,
prefix=prefix,
patch=patch,
@@ -160,14 +182,14 @@ class LoRAPatcher:
# Restore original modules.
# Note: This logic assumes no nested modules in original_modules.
for module_key, orig_module in original_modules.items():
module_parent_key, module_name = LoRAPatcher._split_parent_key(module_key)
module_parent_key, module_name = LayerPatcher._split_parent_key(module_key)
parent_module = model.get_submodule(module_parent_key)
LoRAPatcher._set_submodule(parent_module, module_name, orig_module)
LayerPatcher._set_submodule(parent_module, module_name, orig_module)
@staticmethod
def _apply_lora_sidecar_patch(
def _apply_model_sidecar_patch(
model: torch.nn.Module,
patch: LoRAModelRaw,
patch: ModelPatchRaw,
patch_weight: float,
prefix: str,
original_modules: dict[str, torch.nn.Module],
@@ -190,32 +212,50 @@ class LoRAPatcher:
if not layer_key.startswith(prefix):
continue
module_key, module = LoRAPatcher._get_submodule(
module_key, module = LayerPatcher._get_submodule(
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
)
# Initialize the LoRA sidecar layer.
lora_sidecar_layer = LoRAPatcher._initialize_lora_sidecar_layer(module, layer, patch_weight)
LayerPatcher._apply_model_layer_wrapper_patch(
model=model,
module_to_patch=module,
module_to_patch_key=module_key,
patch=layer,
patch_weight=patch_weight,
original_modules=original_modules,
dtype=dtype,
)
# Replace the original module with a LoRASidecarModule if it has not already been done.
if module_key in original_modules:
# The module has already been patched with a LoRASidecarModule. Append to it.
assert isinstance(module, LoRASidecarModule)
lora_sidecar_module = module
else:
# The module has not yet been patched with a LoRASidecarModule. Create one.
lora_sidecar_module = LoRASidecarModule(module, [])
original_modules[module_key] = module
module_parent_key, module_name = LoRAPatcher._split_parent_key(module_key)
module_parent = model.get_submodule(module_parent_key)
LoRAPatcher._set_submodule(module_parent, module_name, lora_sidecar_module)
@staticmethod
@torch.no_grad()
def _apply_model_layer_wrapper_patch(
model: torch.nn.Module,
module_to_patch: torch.nn.Module,
module_to_patch_key: str,
patch: BaseLayerPatch,
patch_weight: float,
original_modules: dict[str, torch.nn.Module],
dtype: torch.dtype,
):
"""Apply a single LoRA wrapper patch to a model."""
# Replace the original module with a BaseSidecarWrapper if it has not already been done.
if not isinstance(module_to_patch, BaseSidecarWrapper):
wrapped_module = wrap_module_with_sidecar_wrapper(orig_module=module_to_patch)
original_modules[module_to_patch_key] = module_to_patch
module_parent_key, module_name = LayerPatcher._split_parent_key(module_to_patch_key)
module_parent = model.get_submodule(module_parent_key)
LayerPatcher._set_submodule(module_parent, module_name, wrapped_module)
else:
assert module_to_patch_key in original_modules
wrapped_module = module_to_patch
# Move the LoRA sidecar layer to the same device/dtype as the orig module.
# TODO(ryand): Experiment with moving to the device first, then casting. This could be faster.
lora_sidecar_layer.to(device=lora_sidecar_module.orig_module.weight.device, dtype=dtype)
# Move the LoRA layer to the same device/dtype as the orig module.
first_param = next(module_to_patch.parameters())
device = first_param.device
patch.to(device=device, dtype=dtype)
# Add the LoRA sidecar layer to the LoRASidecarModule.
lora_sidecar_module.add_lora_layer(lora_sidecar_layer)
# Add the patch to the sidecar wrapper.
wrapped_module.add_patch(patch, patch_weight)
@staticmethod
def _split_parent_key(module_key: str) -> tuple[str, str]:
@@ -235,21 +275,6 @@ class LoRAPatcher:
else:
raise ValueError(f"Invalid module key: {module_key}")
@staticmethod
def _initialize_lora_sidecar_layer(orig_layer: torch.nn.Module, lora_layer: AnyLoRALayer, patch_weight: float):
# TODO(ryand): Add support for more original layer types and LoRA layer types.
if isinstance(orig_layer, torch.nn.Linear) or (
isinstance(orig_layer, LoRASidecarModule) and isinstance(orig_layer.orig_module, torch.nn.Linear)
):
if isinstance(lora_layer, LoRALayer):
return LoRALinearSidecarLayer(lora_layer=lora_layer, weight=patch_weight)
elif isinstance(lora_layer, ConcatenatedLoRALayer):
return ConcatenatedLoRALinearSidecarLayer(concatenated_lora_layer=lora_layer, weight=patch_weight)
else:
raise ValueError(f"Unsupported Linear LoRA layer type: {type(lora_layer)}")
else:
raise ValueError(f"Unsupported layer type: {type(orig_layer)}")
@staticmethod
def _set_submodule(parent_module: torch.nn.Module, module_name: str, submodule: torch.nn.Module):
try:

View File

@@ -0,0 +1,9 @@
import torch
def pad_with_zeros(orig_weight: torch.Tensor, target_shape: torch.Size) -> torch.Tensor:
"""Pad a weight tensor with zeros to match the target shape."""
expanded_weight = torch.zeros(target_shape, dtype=orig_weight.dtype, device=orig_weight.device)
slices = tuple(slice(0, dim) for dim in orig_weight.shape)
expanded_weight[slices] = orig_weight
return expanded_weight

View File

@@ -0,0 +1,54 @@
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
class BaseSidecarWrapper(torch.nn.Module):
"""A base class for sidecar wrappers.
A sidecar wrapper is a wrapper for an existing torch.nn.Module that applies a
list of patches as 'sidecar' patches. I.e. it applies the sidecar patches during forward inference without modifying
the original module.
Sidecar wrappers are typically used over regular patches when:
- The original module is quantized and so the weights can't be patched in the usual way.
- The original module is on the CPU and modifying the weights would require backing up the original weights and
doubling the CPU memory usage.
"""
def __init__(
self, orig_module: torch.nn.Module, patches_and_weights: list[tuple[BaseLayerPatch, float]] | None = None
):
super().__init__()
self._orig_module = orig_module
self._patches_and_weights = [] if patches_and_weights is None else patches_and_weights
@property
def orig_module(self) -> torch.nn.Module:
return self._orig_module
def add_patch(self, patch: BaseLayerPatch, patch_weight: float):
"""Add a patch to the sidecar wrapper."""
self._patches_and_weights.append((patch, patch_weight))
def _aggregate_patch_parameters(
self, patches_and_weights: list[tuple[BaseLayerPatch, float]]
) -> dict[str, torch.Tensor]:
"""Helper function that aggregates the parameters from all patches into a single dict."""
params: dict[str, torch.Tensor] = {}
for patch, patch_weight in patches_and_weights:
# TODO(ryand): self._orig_module could be quantized. Depending on what the patch is doing with the original
# module, this might fail or return incorrect results.
layer_params = patch.get_parameters(self._orig_module, weight=patch_weight)
for param_name, param_weight in layer_params.items():
if param_name not in params:
params[param_name] = param_weight
else:
params[param_name] += param_weight
return params
def forward(self, *args, **kwargs): # type: ignore
raise NotImplementedError()

View File

@@ -0,0 +1,11 @@
import torch
from invokeai.backend.patches.sidecar_wrappers.base_sidecar_wrapper import BaseSidecarWrapper
class Conv1dSidecarWrapper(BaseSidecarWrapper):
def forward(self, input: torch.Tensor) -> torch.Tensor:
aggregated_param_residuals = self._aggregate_patch_parameters(self._patches_and_weights)
return self.orig_module(input) + torch.nn.functional.conv1d(
input, aggregated_param_residuals["weight"], aggregated_param_residuals.get("bias", None)
)

View File

@@ -0,0 +1,11 @@
import torch
from invokeai.backend.patches.sidecar_wrappers.base_sidecar_wrapper import BaseSidecarWrapper
class Conv2dSidecarWrapper(BaseSidecarWrapper):
def forward(self, input: torch.Tensor) -> torch.Tensor:
aggregated_param_residuals = self._aggregate_patch_parameters(self._patches_and_weights)
return self.orig_module(input) + torch.nn.functional.conv1d(
input, aggregated_param_residuals["weight"], aggregated_param_residuals.get("bias", None)
)

View File

@@ -0,0 +1,24 @@
import torch
from invokeai.backend.patches.layers.set_parameter_layer import SetParameterLayer
from invokeai.backend.patches.sidecar_wrappers.base_sidecar_wrapper import BaseSidecarWrapper
class FluxRMSNormSidecarWrapper(BaseSidecarWrapper):
"""A sidecar wrapper for a FLUX RMSNorm layer.
This wrapper is a special case. It is added specifically to enable FLUX structural control LoRAs, which overwrite
the RMSNorm scale parameters.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
# Given the narrow focus of this wrapper, we only support a very particular patch configuration:
assert len(self._patches_and_weights) == 1
patch, _patch_weight = self._patches_and_weights[0]
assert isinstance(patch, SetParameterLayer)
assert patch.param_name == "scale"
# Apply the patch.
# NOTE(ryand): Currently, we ignore the patch weight when running as a sidecar. It's not clear how this should
# be handled.
return torch.nn.functional.rms_norm(input, patch.weight.shape, patch.weight, eps=1e-6)

View File

@@ -0,0 +1,66 @@
import torch
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
from invokeai.backend.patches.layers.lora_layer import LoRALayer
from invokeai.backend.patches.sidecar_wrappers.base_sidecar_wrapper import BaseSidecarWrapper
class LinearSidecarWrapper(BaseSidecarWrapper):
def _lora_forward(self, input: torch.Tensor, lora_layer: LoRALayer, lora_weight: float) -> torch.Tensor:
"""An optimized implementation of the residual calculation for a Linear LoRALayer."""
x = torch.nn.functional.linear(input, lora_layer.down)
if lora_layer.mid is not None:
x = torch.nn.functional.linear(x, lora_layer.mid)
x = torch.nn.functional.linear(x, lora_layer.up, bias=lora_layer.bias)
x *= lora_weight * lora_layer.scale()
return x
def _concatenated_lora_forward(
self, input: torch.Tensor, concatenated_lora_layer: ConcatenatedLoRALayer, lora_weight: float
) -> torch.Tensor:
"""An optimized implementation of the residual calculation for a Linear ConcatenatedLoRALayer."""
x_chunks: list[torch.Tensor] = []
for lora_layer in concatenated_lora_layer.lora_layers:
x_chunk = torch.nn.functional.linear(input, lora_layer.down)
if lora_layer.mid is not None:
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.mid)
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.up, bias=lora_layer.bias)
x_chunk *= lora_weight * lora_layer.scale()
x_chunks.append(x_chunk)
# TODO(ryand): Generalize to support concat_axis != 0.
assert concatenated_lora_layer.concat_axis == 0
x = torch.cat(x_chunks, dim=-1)
return x
def forward(self, input: torch.Tensor) -> torch.Tensor:
# First, apply the original linear layer.
# NOTE: We slice the input to match the original weight shape in order to work with FluxControlLoRAs, which
# change the linear layer's in_features.
orig_input = input
input = orig_input[..., : self.orig_module.in_features]
output = self.orig_module(input)
# Then, apply layers for which we have optimized implementations.
unprocessed_patches_and_weights: list[tuple[BaseLayerPatch, float]] = []
for patch, patch_weight in self._patches_and_weights:
if isinstance(patch, FluxControlLoRALayer):
# Note that we use the original input here, not the sliced input.
output += self._lora_forward(orig_input, patch, patch_weight)
elif isinstance(patch, LoRALayer):
output += self._lora_forward(input, patch, patch_weight)
elif isinstance(patch, ConcatenatedLoRALayer):
output += self._concatenated_lora_forward(input, patch, patch_weight)
else:
unprocessed_patches_and_weights.append((patch, patch_weight))
# Finally, apply any remaining patches.
if len(unprocessed_patches_and_weights) > 0:
aggregated_param_residuals = self._aggregate_patch_parameters(unprocessed_patches_and_weights)
output += torch.nn.functional.linear(
input, aggregated_param_residuals["weight"], aggregated_param_residuals.get("bias", None)
)
return output

View File

@@ -0,0 +1,20 @@
import torch
from invokeai.backend.flux.modules.layers import RMSNorm
from invokeai.backend.patches.sidecar_wrappers.conv1d_sidecar_wrapper import Conv1dSidecarWrapper
from invokeai.backend.patches.sidecar_wrappers.conv2d_sidecar_wrapper import Conv2dSidecarWrapper
from invokeai.backend.patches.sidecar_wrappers.flux_rms_norm_sidecar_wrapper import FluxRMSNormSidecarWrapper
from invokeai.backend.patches.sidecar_wrappers.linear_sidecar_wrapper import LinearSidecarWrapper
def wrap_module_with_sidecar_wrapper(orig_module: torch.nn.Module) -> torch.nn.Module:
if isinstance(orig_module, torch.nn.Linear):
return LinearSidecarWrapper(orig_module)
elif isinstance(orig_module, torch.nn.Conv1d):
return Conv1dSidecarWrapper(orig_module)
elif isinstance(orig_module, torch.nn.Conv2d):
return Conv2dSidecarWrapper(orig_module)
elif isinstance(orig_module, RMSNorm):
return FluxRMSNormSidecarWrapper(orig_module)
else:
raise ValueError(f"No sidecar wrapper found for module type: {type(orig_module)}")

View File

@@ -52,6 +52,7 @@ GGML_TENSOR_OP_TABLE = {
torch.ops.aten.t.default: dequantize_and_run, # pyright: ignore
torch.ops.aten.addmm.default: dequantize_and_run, # pyright: ignore
torch.ops.aten.mul.Tensor: dequantize_and_run, # pyright: ignore
torch.ops.aten.add.Tensor: dequantize_and_run, # pyright: ignore
}
if torch.backends.mps.is_available():

View File

@@ -5,8 +5,8 @@ from typing import TYPE_CHECKING
from diffusers import UNet2DConditionModel
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
from invokeai.backend.lora.lora_patcher import LoRAPatcher
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
from invokeai.backend.patches.model_patcher import LayerPatcher
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase
if TYPE_CHECKING:
@@ -30,8 +30,8 @@ class LoRAExt(ExtensionBase):
@contextmanager
def patch_unet(self, unet: UNet2DConditionModel, original_weights: OriginalWeightsStorage):
lora_model = self._node_context.models.load(self._model_id).model
assert isinstance(lora_model, LoRAModelRaw)
LoRAPatcher.apply_lora_patch(
assert isinstance(lora_model, ModelPatchRaw)
LayerPatcher.apply_model_patch(
model=unet,
prefix="lora_unet_",
patch=lora_model,

View File

@@ -1,3 +1,3 @@
# Invoke UI
<https://invoke-ai.github.io/InvokeAI/contributing/frontend/OVERVIEW/>
<https://invoke-ai.github.io/InvokeAI/contributing/frontend/>

View File

@@ -96,7 +96,10 @@
"new": "Neu",
"ok": "OK",
"close": "Schließen",
"clipboard": "Zwischenablage"
"clipboard": "Zwischenablage",
"generating": "Generieren",
"loadingModel": "Lade Modell",
"warnings": "Warnungen"
},
"gallery": {
"galleryImageSize": "Bildgröße",
@@ -268,7 +271,7 @@
"title": "Ansichts-Tool"
},
"quickSwitch": {
"title": "Ebenen schnell umschalten",
"title": "Ebenen Schnell-Umschalten",
"desc": "Wechseln Sie zwischen den beiden zuletzt gewählten Ebenen. Wenn eine Ebene mit einem Lesezeichen versehen ist, wird zwischen ihr und der letzten nicht markierten Ebene gewechselt."
},
"applyFilter": {
@@ -591,7 +594,17 @@
"loraTriggerPhrases": "LoRA-Auslösephrasen",
"installingBundle": "Bündel wird installiert",
"triggerPhrases": "Auslösephrasen",
"mainModelTriggerPhrases": "Hauptmodell-Auslösephrasen"
"mainModelTriggerPhrases": "Hauptmodell-Auslösephrasen",
"noDefaultSettings": "Für dieses Modell sind keine Standardeinstellungen konfiguriert. Besuchen Sie den Modell-Manager, um Standardeinstellungen hinzuzufügen.",
"defaultSettingsOutOfSync": "Einige Einstellungen stimmen nicht mit den Standardeinstellungen des Modells überein:",
"clipLEmbed": "CLIP-L einbetten",
"clipGEmbed": "CLIP-G einbetten",
"hfTokenLabel": "HuggingFace-Token (für einige Modelle erforderlich)",
"hfTokenHelperText": "Für die Nutzung einiger Modelle ist ein HF-Token erforderlich. Klicken Sie hier, um Ihr Token zu erstellen oder zu erhalten.",
"hfForbidden": "Sie haben keinen Zugriff auf dieses HF-Modell",
"hfTokenInvalid": "Ungültiges oder fehlendes HF-Token",
"restoreDefaultSettings": "Klicken, um die Standardeinstellungen des Modells zu verwenden.",
"usingDefaultSettings": "Die Standardeinstellungen des Modells werden verwendet"
},
"parameters": {
"images": "Bilder",
@@ -632,17 +645,22 @@
"remixImage": "Remix des Bilds erstellen",
"imageActions": "Weitere Bildaktionen",
"invoke": {
"layer": {
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Bbox-Breite ist {{width}}",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Skalierte Bbox-Breite ist {{width}}",
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Skalierte Bbox-Höhe ist {{height}}",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Bbox-Höhe ist {{height}}"
},
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Breite ist {{width}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Höhe ist {{height}}",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Breite ist {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Höhe ist {{height}}"
}
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Höhe ist {{height}}",
"noNodesInGraph": "Keine Knoten im Graphen",
"canvasIsTransforming": "Leinwand ist beschäftigt (wird transformiert)",
"canvasIsRasterizing": "Leinwand ist beschäftigt (wird gerastert)",
"canvasIsCompositing": "Leinwand ist beschäftigt (wird zusammengesetzt)",
"canvasIsFiltering": "Leinwand ist beschäftigt (wird gefiltert)",
"canvasIsSelectingObject": "Leinwand ist beschäftigt (wird Objekt ausgewählt)",
"noPrompts": "Keine Eingabeaufforderungen generiert"
},
"seed": "Seed",
"patchmatchDownScaleSize": "Herunterskalieren",
"seamlessXAxis": "Nahtlose X Achse",
"seamlessYAxis": "Nahtlose Y Achse"
},
"settings": {
"displayInProgress": "Zwischenbilder anzeigen",
@@ -841,7 +859,8 @@
"upscaling": "Hochskalierung",
"canvas": "Leinwand",
"prompts_one": "Prompt",
"prompts_other": "Prompts"
"prompts_other": "Prompts",
"batchSize": "Stapelgröße"
},
"metadata": {
"negativePrompt": "Negativ Beschreibung",
@@ -1081,6 +1100,33 @@
},
"patchmatchDownScaleSize": {
"heading": "Herunterskalieren"
},
"paramHeight": {
"heading": "Höhe",
"paragraphs": [
"Höhe des generierten Bildes. Muss ein Vielfaches von 8 sein."
]
},
"paramUpscaleMethod": {
"heading": "Vergrößerungsmethode",
"paragraphs": [
"Methode zum Hochskalieren des Bildes für High Resolution Fix."
]
},
"paramHrf": {
"heading": "High Resolution Fix aktivieren"
},
"seamlessTilingYAxis": {
"heading": "Nahtlose Kachelung Y Achse",
"paragraphs": [
"Nahtloses Kacheln eines Bildes entlang der vertikalen Achse."
]
},
"seamlessTilingXAxis": {
"paragraphs": [
"Nahtloses Kacheln eines Bildes entlang der horizontalen Achse."
],
"heading": "Nahtlose Kachelung X Achse"
}
},
"invocationCache": {
@@ -1292,7 +1338,15 @@
"loadWorkflow": "Arbeitsablauf $t(common.load)",
"updated": "Aktualisiert",
"created": "Erstellt",
"descending": "Absteigend"
"descending": "Absteigend",
"edit": "Bearbeiten",
"loadFromGraph": "Arbeitsablauf aus dem Graph laden",
"delete": "Löschen",
"copyShareLinkForWorkflow": "Teilen-Link für Arbeitsablauf kopieren",
"autoLayout": "Auto Layout",
"copyShareLink": "Teilen-Link kopieren",
"download": "Herunterladen",
"convertGraph": "Graph konvertieren"
},
"sdxl": {
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
@@ -1542,7 +1596,8 @@
"filters": "Filter",
"filterType": "Filtertyp",
"filter": "Filter"
}
},
"bookmark": "Lesezeichen für Schnell-Umschalten"
},
"upsell": {
"shareAccess": "Zugang teilen",

View File

@@ -176,7 +176,8 @@
"reset": "Reset",
"none": "None",
"new": "New",
"generating": "Generating"
"generating": "Generating",
"warnings": "Warnings"
},
"hrf": {
"hrf": "High Resolution Fix",
@@ -808,6 +809,7 @@
"starterBundleHelpText": "Easily install all models needed to get started with a base model, including a main model, controlnets, IP adapters, and more. Selecting a bundle will skip any models that you already have installed.",
"starterModels": "Starter Models",
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
"controlLora": "Control LoRA",
"syncModels": "Sync Models",
"textualInversions": "Textual Inversions",
"triggerPhrases": "Trigger Phrases",
@@ -1031,6 +1033,7 @@
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox height is {{height}}",
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
"canvasIsFiltering": "Canvas is busy (filtering)",
"canvasIsTransforming": "Canvas is busy (transforming)",
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
@@ -1038,20 +1041,7 @@
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
"noPrompts": "No prompts generated",
"noNodesInGraph": "No nodes in graph",
"systemDisconnected": "System disconnected",
"layer": {
"controlAdapterNoModelSelected": "no Control Adapter model selected",
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, bbox width is {{width}}",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, bbox height is {{height}}",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, scaled bbox width is {{width}}",
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, scaled bbox height is {{height}}",
"ipAdapterNoModelSelected": "no IP adapter selected",
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
"ipAdapterNoImageSelected": "no IP Adapter image selected",
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
"rgNoRegion": "no region selected"
}
"systemDisconnected": "System disconnected"
},
"maskBlur": "Mask Blur",
"negativePromptPlaceholder": "Negative Prompt",
@@ -1319,8 +1309,9 @@
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
"paragraphs": [
"The part of the of the denoising process that will have the Control Adapter applied.",
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
"This setting determines which portion of the denoising (generation) process incorporates the guidance from this layer.",
"• Start Step (%): Specifies when to begin applying the guidance from this layer during the generation process.",
"• End Step (%): Specifies when to stop applying this layer's guidance and revert general guidance from the model and other settings."
]
},
"controlNetControlMode": {
@@ -1338,13 +1329,15 @@
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
},
"ipAdapterMethod": {
"heading": "Method",
"paragraphs": ["Method by which to apply the current IP Adapter."]
"heading": "Mode",
"paragraphs": ["The mode defines how the reference image will guide the generation process."]
},
"controlNetWeight": {
"heading": "Weight",
"paragraphs": [
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
"Adjusts how strongly the layer influences the generation process",
"• Higher Weight (.75-2): Creates a more significant impact on the final result.",
"• Lower Weight (0-.75): Creates a smaller impact on the final result."
]
},
"dynamicPrompts": {
@@ -1710,6 +1703,8 @@
"controlLayer": "Control Layer",
"inpaintMask": "Inpaint Mask",
"regionalGuidance": "Regional Guidance",
"referenceImageRegional": "Reference Image (Regional)",
"referenceImageGlobal": "Reference Image (Global)",
"asRasterLayer": "As $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "As $t(controlLayers.rasterLayer) (Resize)",
"asControlLayer": "As $t(controlLayers.controlLayer)",
@@ -1781,6 +1776,7 @@
"pullBboxIntoLayer": "Pull Bbox into Layer",
"pullBboxIntoReferenceImage": "Pull Bbox into Reference Image",
"showProgressOnCanvas": "Show Progress on Canvas",
"useImage": "Use Image",
"prompt": "Prompt",
"negativePrompt": "Negative Prompt",
"beginEndStepPercentShort": "Begin/End %",
@@ -1793,6 +1789,22 @@
"resetGenerationSettings": "Reset Generation Settings",
"replaceCurrent": "Replace Current",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
"warnings": {
"problemsFound": "Problems found",
"unsupportedModel": "layer not supported for selected base model",
"controlAdapterNoModelSelected": "no Control Layer model selected",
"controlAdapterIncompatibleBaseModel": "incompatible Control Layer base model",
"controlAdapterNoControl": "no control selected/drawn",
"ipAdapterNoModelSelected": "no Reference Image model selected",
"ipAdapterIncompatibleBaseModel": "incompatible Reference Image base model",
"ipAdapterNoImageSelected": "no Reference Image image selected",
"rgNoPromptsOrIPAdapters": "no text prompts or Reference Images",
"rgNegativePromptNotSupported": "Negative Prompt not supported for selected base model",
"rgReferenceImagesNotSupported": "regional Reference Images not supported for selected base model",
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
"rgNoRegion": "no region drawn"
},
"controlMode": {
"controlMode": "Control Mode",
"balanced": "Balanced (recommended)",
@@ -1801,10 +1813,13 @@
"megaControl": "Mega Control"
},
"ipAdapterMethod": {
"ipAdapterMethod": "IP Adapter Method",
"ipAdapterMethod": "Mode",
"full": "Style and Composition",
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
"style": "Style Only",
"composition": "Composition Only"
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
"composition": "Composition Only",
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
},
"fill": {
"fillColor": "Fill Color",
@@ -2097,6 +2112,7 @@
},
"logNamespaces": {
"logNamespaces": "Log Namespaces",
"dnd": "Drag and Drop",
"gallery": "Gallery",
"models": "Models",
"config": "Config",
@@ -2120,8 +2136,7 @@
"whatsNew": {
"whatsNewInInvoke": "What's New in Invoke",
"items": [
"<StrongComponent>Workflows</StrongComponent>: Run a workflow for a collection of images using the new <StrongComponent>Image Batch</StrongComponent> node.",
"<StrongComponent>FLUX</StrongComponent>: Support for XLabs IP Adapter v2."
"<StrongComponent>Flux Control Layers</StrongComponent>: New control models for edge detection and depth mapping are now supported for Flux dev models."
],
"readReleaseNotes": "Read Release Notes",
"watchRecentReleaseVideos": "Watch Recent Release Videos",

View File

@@ -507,8 +507,7 @@
"watchUiUpdatesOverview": "Descripción general de las actualizaciones de la interfaz de usuario de Watch",
"whatsNewInInvoke": "Novedades en Invoke",
"items": [
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large.",
"<StrongComponent>Lienzo</StrongComponent>: Se ha simplificado el procesamiento de la capa de control y se ha mejorado la configuración predeterminada del control."
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large."
]
},
"invocationCache": {

View File

@@ -96,7 +96,9 @@
"negativePrompt": "Prompt Négatif",
"ok": "Ok",
"close": "Fermer",
"clipboard": "Presse-papier"
"clipboard": "Presse-papier",
"loadingModel": "Chargement du modèle",
"generating": "En Génération"
},
"gallery": {
"galleryImageSize": "Taille de l'image",
@@ -287,7 +289,20 @@
"noDefaultSettings": "Aucun paramètre par défaut configuré pour ce modèle. Visitez le Gestionnaire de Modèles pour ajouter des paramètres par défaut.",
"usingDefaultSettings": "Utilisation des paramètres par défaut du modèle",
"defaultSettingsOutOfSync": "Certain paramètres ne correspondent pas aux valeurs par défaut du modèle :",
"restoreDefaultSettings": "Cliquez pour utiliser les paramètres par défaut du modèle."
"restoreDefaultSettings": "Cliquez pour utiliser les paramètres par défaut du modèle.",
"hfForbiddenErrorMessage": "Nous vous recommandons de visiter la page du modèle sur HuggingFace.com. Le propriétaire peut exiger l'acceptation des conditions pour pouvoir télécharger.",
"hfTokenRequired": "Vous essayez de télécharger un modèle qui nécessite un token HuggingFace valide.",
"clipLEmbed": "CLIP-L Embed",
"hfTokenSaved": "Token HF enregistré",
"hfTokenUnableToVerifyErrorMessage": "Impossible de vérifier le token HuggingFace. Cela est probablement dû à une erreur réseau. Veuillez réessayer plus tard.",
"clipGEmbed": "CLIP-G Embed",
"hfTokenUnableToVerify": "Impossible de vérifier le token HF",
"hfTokenInvalidErrorMessage": "Token HuggingFace invalide ou manquant.",
"hfTokenLabel": "Token HuggingFace (Requis pour certains modèles)",
"hfTokenHelperText": "Un token HF est requis pour utiliser certains modèles. Cliquez ici pour créer ou obtenir votre token.",
"hfTokenInvalid": "Token HF invalide ou manquant",
"hfForbidden": "Vous n'avez pas accès à ce modèle HF.",
"hfTokenInvalidErrorMessage2": "Mettre à jour dans le "
},
"parameters": {
"images": "Images",
@@ -317,19 +332,6 @@
"info": "Info",
"showOptionsPanel": "Afficher le panneau latéral (O ou T)",
"invoke": {
"layer": {
"rgNoPromptsOrIPAdapters": "aucun prompts ou IP Adapters",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la largeur de la bounding box mise à l'échelle est {{width}}",
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la hauteur de la bounding box mise à l'échelle est {{height}}",
"ipAdapterNoModelSelected": "aucun IP adapter sélectionné",
"ipAdapterNoImageSelected": "aucune image d'IP adapter sélectionnée",
"controlAdapterIncompatibleBaseModel": "modèle de base de Control Adapter incompatible",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la hauteur de la bounding box est {{height}}",
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la largeur de la bounding box est {{width}}",
"ipAdapterIncompatibleBaseModel": "modèle de base d'IP adapter incompatible",
"rgNoRegion": "aucune zone sélectionnée",
"controlAdapterNoModelSelected": "aucun modèle de Control Adapter sélectionné"
},
"noPrompts": "Aucun prompts généré",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} entrée manquante",
"missingFieldTemplate": "Modèle de champ manquant",
@@ -349,7 +351,11 @@
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box est {{width}}",
"noT5EncoderModelSelected": "Aucun modèle T5 Encoder sélectionné pour la génération FLUX",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), la largeur de la bounding box mise à l'échelle est {{width}}",
"canvasIsCompositing": "La toile est en train de composer"
"canvasIsCompositing": "La toile est en train de composer",
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} collection vide",
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}} : trop peu d'éléments, minimum {{minItems}}",
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}} : trop d'éléments, maximum {{maxItems}}",
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)"
},
"negativePromptPlaceholder": "Prompt Négatif",
"positivePromptPlaceholder": "Prompt Positif",
@@ -390,7 +396,9 @@
"sendToUpscale": "Envoyer à Agrandir",
"guidance": "Guidage",
"postProcessing": "Post-traitement (Maj + U)",
"processImage": "Traiter l'image"
"processImage": "Traiter l'image",
"disabledNoRasterContent": "Désactivé (Aucun contenu raster)",
"recallMetadata": "Rappeler les métadonnées"
},
"settings": {
"models": "Modèles",
@@ -428,7 +436,8 @@
"confirmOnNewSession": "Confirmer lors d'une nouvelle session",
"modelDescriptionsDisabledDesc": "Les descriptions des modèles dans les menus déroulants ont été désactivées. Activez-les dans les paramètres.",
"enableModelDescriptions": "Activer les descriptions de modèle dans les menus déroulants",
"modelDescriptionsDisabled": "Descriptions de modèle dans les menus déroulants désactivés"
"modelDescriptionsDisabled": "Descriptions de modèle dans les menus déroulants désactivés",
"showDetailedInvocationProgress": "Afficher les détails de progression"
},
"toast": {
"uploadFailed": "Importation échouée",
@@ -647,7 +656,8 @@
"iterations_one": "Itération",
"iterations_many": "Itérations",
"iterations_other": "Itérations",
"back": "fin"
"back": "fin",
"batchSize": "Taille de lot"
},
"prompt": {
"noMatchingTriggers": "Pas de déclancheurs correspondants",
@@ -1165,7 +1175,8 @@
"heading": "Force de débruitage",
"paragraphs": [
"Intensité du bruit ajouté à l'image d'entrée.",
"0 produira une image identique, tandis que 1 produira une image complètement différente."
"0 produira une image identique, tandis que 1 produira une image complètement différente.",
"Lorsque aucune couche raster avec du contenu visible n'est présente, ce paramètre est ignoré."
]
},
"lora": {
@@ -1460,7 +1471,9 @@
"parsingFailed": "L'analyse a échoué",
"recallParameter": "Rappeler {{label}}",
"canvasV2Metadata": "Toile",
"guidance": "Guide"
"guidance": "Guide",
"seamlessXAxis": "Axe X sans bords",
"seamlessYAxis": "Axe Y sans bords"
},
"sdxl": {
"freePromptStyle": "Écriture de Prompt manuelle",
@@ -1681,7 +1694,13 @@
"delete": "Supprimer"
},
"whatsNew": {
"whatsNewInInvoke": "Quoi de neuf dans Invoke"
"whatsNewInInvoke": "Quoi de neuf dans Invoke",
"watchRecentReleaseVideos": "Regarder les vidéos des dernières versions",
"items": [
"<StrongComponent>FLUX Guidage Régional (bêta)</StrongComponent> : Notre version bêta de FLUX Guidage Régional est en ligne pour le contrôle des prompt régionaux."
],
"readReleaseNotes": "Notes de version",
"watchUiUpdatesOverview": "Aperçu des mises à jour de l'interface utilisateur"
},
"ui": {
"tabs": {
@@ -1789,7 +1808,10 @@
},
"process": "Traiter",
"apply": "Appliquer",
"cancel": "Annuler"
"cancel": "Annuler",
"advanced": "Avancé",
"processingLayerWith": "Calque de traitement avec le filtre {{type}}.",
"forMoreControl": "Pour plus de contrôle, cliquez sur Avancé ci-dessous."
},
"canvasContextMenu": {
"saveToGalleryGroup": "Enregistrer dans la galerie",
@@ -2042,7 +2064,17 @@
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
"newInpaintMask": "Nouveau $t(controlLayers.inpaintMask)",
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)"
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)",
"mergingLayers": "Fusionner les couches",
"resetCanvasLayers": "Réinitialiser les couches de la toile",
"resetGenerationSettings": "Réinitialiser les paramètres de génération",
"mergeDown": "Fusionner",
"controlLayerEmptyState": "<UploadButton>Télécharger une image</UploadButton>, faites glisser une image depuis la <GalleryButton>galerie</GalleryButton> sur ce calque, ou dessinez sur la toile pour commencer.",
"asRasterLayer": "En tant que $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "En tant que $t(controlLayers.rasterLayer) (Redimensionner)",
"asControlLayer": "En tant que $t(controlLayers.controlLayer)",
"asControlLayerResize": "En $t(controlLayers.controlLayer) (Redimensionner)",
"newSession": "Nouvelle session"
},
"upscaling": {
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
@@ -2060,7 +2092,9 @@
"postProcessingModel": "Modèle de post-traitement",
"missingUpscaleModel": "Modèle d'agrandissement manquant",
"missingUpscaleInitialImage": "Image initiale manquante pour l'agrandissement",
"missingTileControlNetModel": "Aucun modèle ControlNet valide installé"
"missingTileControlNetModel": "Aucun modèle ControlNet valide installé",
"incompatibleBaseModelDesc": "L'upscaling est pris en charge uniquement pour les modèles d'architecture SD1.5 et SDXL. Changez le modèle principal pour activer l'upscaling.",
"incompatibleBaseModel": "Modèle principal non pris en charge pour l'upscaling"
},
"stylePresets": {
"deleteTemplate": "Supprimer le template",
@@ -2146,5 +2180,62 @@
"inviteTeammates": "Inviter des collègues",
"professionalUpsell": "Disponible dans l'édition professionnelle d'Invoke. Cliquez ici ou visitez invoke.com/pricing pour plus de détails.",
"professional": "Professionnel"
},
"supportVideos": {
"watch": "Regarder",
"videos": {
"upscaling": {
"description": "Comment améliorer la résolution des images avec les outils d'Invoke pour les agrandir.",
"title": "Upscaling"
},
"howDoIGenerateAndSaveToTheGallery": {
"description": "Étapes pour générer et enregistrer des images dans la galerie.",
"title": "Comment générer et enregistrer dans la galerie?"
},
"usingControlLayersAndReferenceGuides": {
"title": "Utilisation des couche de contrôle et des guides de référence",
"description": "Apprenez à guider la création de vos images avec des couche de contrôle et des images de référence."
},
"exploringAIModelsAndConceptAdapters": {
"description": "Plongez dans les modèles d'IA et découvrez comment utiliser les adaptateurs de concepts pour un contrôle créatif.",
"title": "Exploration des modèles d'IA et des adaptateurs de concepts"
},
"howDoIUseControlNetsAndControlLayers": {
"title": "Comment utiliser les réseaux de contrôle et les couches de contrôle?",
"description": "Apprenez à appliquer des couches de contrôle et des ControlNets à vos images."
},
"creatingAndComposingOnInvokesControlCanvas": {
"description": "Apprenez à composer des images en utilisant le canvas de contrôle d'Invoke.",
"title": "Créer et composer sur le canvas de contrôle d'Invoke"
},
"howDoIEditOnTheCanvas": {
"title": "Comment puis-je modifier sur la toile?",
"description": "Guide pour éditer des images directement sur la toile."
},
"howDoIDoImageToImageTransformation": {
"title": "Comment effectuer une transformation d'image à image?",
"description": "Tutoriel sur la réalisation de transformations d'image à image dans Invoke."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"title": "Comment utiliser les IP Adapters globaux et les images de référence?",
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
},
"howDoIUseInpaintMasks": {
"title": "Comment utiliser les masques d'inpainting?"
},
"creatingYourFirstImage": {
"title": "Créer votre première image",
"description": "Introduction à la création d'une image à partir de zéro en utilisant les outils d'Invoke."
},
"understandingImageToImageAndDenoising": {
"title": "Comprendre l'Image-à-Image et le Débruitage",
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
}
},
"gettingStarted": "Commencer",
"studioSessionsDesc1": "Consultez le <StudioSessionsPlaylistLink /> pour des approfondissements sur Invoke.",
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
"supportVideos": "Vidéos d'assistance",
"controlCanvas": "Contrôler la toile"
}
}

View File

@@ -96,7 +96,8 @@
"clipboard": "Appunti",
"ok": "Ok",
"generating": "Generazione",
"loadingModel": "Caricamento del modello"
"loadingModel": "Caricamento del modello",
"warnings": "Avvisi"
},
"gallery": {
"galleryImageSize": "Dimensione dell'immagine",
@@ -610,7 +611,8 @@
"hfForbidden": "Non hai accesso a questo modello HF",
"hfTokenLabel": "Gettone HuggingFace (richiesto per alcuni modelli)",
"hfForbiddenErrorMessage": "Consigliamo di visitare la pagina del repository su HuggingFace.com. Il proprietario potrebbe richiedere l'accettazione dei termini per poter effettuare il download.",
"hfTokenInvalidErrorMessage2": "Aggiornalo in "
"hfTokenInvalidErrorMessage2": "Aggiornalo in ",
"controlLora": "Controllo LoRA"
},
"parameters": {
"images": "Immagini",
@@ -662,21 +664,8 @@
"addingImagesTo": "Aggiungi immagini a",
"systemDisconnected": "Sistema disconnesso",
"missingNodeTemplate": "Modello di nodo mancante",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: ingresso mancante",
"missingFieldTemplate": "Modello di campo mancante",
"layer": {
"controlAdapterNoModelSelected": "Nessun modello di adattatore di controllo selezionato",
"controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile",
"ipAdapterNoModelSelected": "Nessun adattatore IP selezionato",
"ipAdapterIncompatibleBaseModel": "Il modello base dell'adattatore IP non è compatibile",
"ipAdapterNoImageSelected": "Nessuna immagine dell'adattatore IP selezionata",
"rgNoPromptsOrIPAdapters": "Nessun prompt o adattatore IP",
"rgNoRegion": "Nessuna regione selezionata",
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, larghezza riquadro è {{width}}",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, altezza riquadro è {{height}}",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, larghezza del riquadro scalato {{width}}",
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, altezza del riquadro scalato {{height}}"
},
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), altezza riquadro è {{height}}",
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza riquadro è {{width}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza del riquadro scalato è {{width}}",
@@ -684,10 +673,15 @@
"noT5EncoderModelSelected": "Nessun modello di encoder T5 selezionato per la generazione con FLUX",
"noCLIPEmbedModelSelected": "Nessun modello CLIP Embed selezionato per la generazione con FLUX",
"noFLUXVAEModelSelected": "Nessun modello VAE selezionato per la generazione con FLUX",
"canvasIsTransforming": "La tela sta trasformando",
"canvasIsRasterizing": "La tela sta rasterizzando",
"canvasIsCompositing": "La tela è in fase di composizione",
"canvasIsFiltering": "La tela sta filtrando"
"canvasIsTransforming": "La tela è occupata (sta trasformando)",
"canvasIsRasterizing": "La tela è occupata (sta rasterizzando)",
"canvasIsCompositing": "La tela è occupata (in composizione)",
"canvasIsFiltering": "La tela è occupata (sta filtrando)",
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi elementi, massimo {{maxItems}}",
"canvasIsSelectingObject": "La tela è occupata (selezione dell'oggetto)",
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi pochi elementi, minimo {{minItems}}",
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} raccolta vuota",
"fluxModelMultipleControlLoRAs": "È possibile utilizzare solo 1 Controllo LoRA alla volta"
},
"useCpuNoise": "Usa la CPU per generare rumore",
"iterations": "Iterazioni",
@@ -972,7 +966,9 @@
"saveToGallery": "Salva nella Galleria",
"noMatchingWorkflows": "Nessun flusso di lavoro corrispondente",
"noWorkflows": "Nessun flusso di lavoro",
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>."
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>.",
"specialDesc": "Questa invocazione comporta una gestione speciale nell'applicazione. Ad esempio, i nodi Lotto vengono utilizzati per mettere in coda più grafici da un singolo flusso di lavoro.",
"internalDesc": "Questa invocazione è utilizzata internamente da Invoke. Potrebbe subire modifiche significative durante gli aggiornamenti dell'app e potrebbe essere rimossa in qualsiasi momento."
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
@@ -1093,7 +1089,8 @@
"workflows": "Flussi di lavoro",
"generation": "Generazione",
"other": "Altro",
"gallery": "Galleria"
"gallery": "Galleria",
"batchSize": "Dimensione del lotto"
},
"models": {
"noMatchingModels": "Nessun modello corrispondente",
@@ -1195,8 +1192,9 @@
"controlNetBeginEnd": {
"heading": "Percentuale passi Inizio / Fine",
"paragraphs": [
"La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.",
"In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli."
"Questa impostazione determina quale parte del processo di rimozione del rumore (generazione) incorpora la guida da questo livello.",
"• Passo iniziale (%): specifica quando iniziare ad applicare la guida da questo livello durante il processo di generazione.",
"• Passo finale (%): specifica quando interrompere l'applicazione della guida di questo livello e ripristinare la guida generale dal modello e altre impostazioni."
]
},
"noiseUseCPU": {
@@ -1300,7 +1298,9 @@
"controlNetWeight": {
"heading": "Peso",
"paragraphs": [
"Peso dell'adattatore di controllo. Un peso maggiore porterà a impatti maggiori sull'immagine finale."
"Regola la forza con cui il livello influenza il processo di generazione",
"• Peso maggiore (0.75-2): crea un impatto più significativo sul risultato finale.",
"• Peso inferiore (0-0.75): crea un impatto minore sul risultato finale."
]
},
"paramCFGScale": {
@@ -1477,9 +1477,9 @@
]
},
"ipAdapterMethod": {
"heading": "Metodo",
"heading": "Modalità",
"paragraphs": [
"Metodo con cui applicare l'adattatore IP corrente."
"La modalità definisce il modo in cui l'immagine di riferimento guiderà il processo di generazione."
]
},
"scale": {
@@ -1801,7 +1801,10 @@
"full": "Stile e Composizione",
"style": "Solo Stile",
"composition": "Solo Composizione",
"ipAdapterMethod": "Metodo Adattatore IP"
"ipAdapterMethod": "Modalità",
"fullDesc": "Applica lo stile visivo (colori, texture) e la composizione (disposizione, struttura).",
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione.",
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento."
},
"showingType": "Mostra {{type}}",
"dynamicGrid": "Griglia dinamica",
@@ -2044,7 +2047,33 @@
"replaceCurrent": "Sostituisci corrente",
"mergeDown": "Unire in basso",
"mergingLayers": "Unione dei livelli",
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare."
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
"useImage": "Usa immagine",
"resetGenerationSettings": "Ripristina impostazioni di generazione",
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello.",
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
"asControlLayer": "Come $t(controlLayers.controlLayer)",
"asControlLayerResize": "Come $t(controlLayers.controlLayer) (Ridimensiona)",
"newSession": "Nuova sessione",
"resetCanvasLayers": "Ripristina livelli Tela",
"referenceImageRegional": "Immagine di riferimento (regionale)",
"referenceImageGlobal": "Immagine di riferimento (globale)",
"warnings": {
"controlAdapterNoModelSelected": "nessun modello selezionato per il livello di controllo",
"controlAdapterNoControl": "nessun controllo selezionato/disegnato",
"ipAdapterNoModelSelected": "nessun modello di immagine di riferimento selezionato",
"rgNoPromptsOrIPAdapters": "nessun prompt testuale o immagini di riferimento",
"rgReferenceImagesNotSupported": "Immagini di riferimento regionali non supportate per il modello base selezionato",
"rgNoRegion": "nessuna regione disegnata",
"problemsFound": "Problemi riscontrati",
"unsupportedModel": "livello non supportato per il modello base selezionato",
"controlAdapterIncompatibleBaseModel": "modello di base del livello di controllo incompatibile",
"rgNegativePromptNotSupported": "Prompt negativo non supportato per il modello base selezionato",
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato"
}
},
"ui": {
"tabs": {
@@ -2144,8 +2173,7 @@
"watchRecentReleaseVideos": "Guarda i video su questa versione",
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
"items": [
"<StrongComponent>SD 3.5</StrongComponent>: supporto per SD 3.5 Medium e Large.",
"<StrongComponent>Tela</StrongComponent>: elaborazione semplificata del livello di controllo e impostazioni di controllo predefinite migliorate."
"<StrongComponent>Livelli di controllo Flux</StrongComponent>: nuovi modelli di controllo per il rilevamento dei bordi e la mappatura della profondità sono ora supportati per i modelli di Flux dev."
]
},
"system": {
@@ -2169,8 +2197,71 @@
"events": "Eventi",
"system": "Sistema",
"metadata": "Metadati",
"logNamespaces": "Elementi del registro"
"logNamespaces": "Elementi del registro",
"dnd": "Trascina e rilascia"
},
"enableLogging": "Abilita la registrazione"
},
"supportVideos": {
"gettingStarted": "Iniziare",
"supportVideos": "Video di supporto",
"videos": {
"usingControlLayersAndReferenceGuides": {
"title": "Utilizzo di livelli di controllo e guide di riferimento",
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
},
"creatingYourFirstImage": {
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
"title": "Creazione della tua prima immagine"
},
"understandingImageToImageAndDenoising": {
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
},
"howDoIDoImageToImageTransformation": {
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
},
"howDoIUseInpaintMasks": {
"title": "Come si usano le maschere Inpaint?",
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
},
"howDoIOutpaint": {
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
"title": "Come posso eseguire l'outpainting?"
},
"exploringAIModelsAndConceptAdapters": {
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
},
"upscaling": {
"title": "Ampliamento",
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
},
"creatingAndComposingOnInvokesControlCanvas": {
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
"title": "Creare e comporre sulla tela di controllo di Invoke"
},
"howDoIGenerateAndSaveToTheGallery": {
"description": "Passaggi per generare e salvare le immagini nella galleria.",
"title": "Come posso generare e salvare nella Galleria?"
},
"howDoIEditOnTheCanvas": {
"title": "Come posso apportare modifiche sulla tela?",
"description": "Guida alla modifica delle immagini direttamente sulla tela."
},
"howDoIUseControlNetsAndControlLayers": {
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
}
},
"controlCanvas": "Tela di Controllo",
"watch": "Guarda",
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
}
}

View File

@@ -230,16 +230,7 @@
"systemDisconnected": "Systeem is niet verbonden",
"missingNodeTemplate": "Knooppuntsjabloon ontbreekt",
"missingFieldTemplate": "Veldsjabloon ontbreekt",
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan",
"layer": {
"controlAdapterNoModelSelected": "geen controle-adaptermodel geselecteerd",
"controlAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor controle-adapter",
"ipAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor IP-adapter",
"ipAdapterNoImageSelected": "geen afbeelding voor IP-adapter geselecteerd",
"rgNoRegion": "geen gebied geselecteerd",
"rgNoPromptsOrIPAdapters": "geen tekstprompts of IP-adapters",
"ipAdapterNoModelSelected": "geen IP-adapter geselecteerd"
}
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan"
},
"patchmatchDownScaleSize": "Verklein",
"useCpuNoise": "Gebruik CPU-ruis",

View File

@@ -10,7 +10,24 @@
"load": "Załaduj",
"statusDisconnected": "Odłączono od serwera",
"githubLabel": "GitHub",
"discordLabel": "Discord"
"discordLabel": "Discord",
"clipboard": "Schowek",
"aboutDesc": "Wykorzystujesz Invoke do pracy? Sprawdź:",
"ai": "SI",
"areYouSure": "Czy jesteś pewien?",
"copyError": "$t(gallery.copy) Błąd",
"apply": "Zastosuj",
"copy": "Kopiuj",
"or": "albo",
"add": "Dodaj",
"off": "Wyłączony",
"accept": "Zaakceptuj",
"cancel": "Anuluj",
"advanced": "Zawansowane",
"back": "Do tyłu",
"auto": "Automatyczny",
"beta": "Beta",
"close": "Wyjdź"
},
"gallery": {
"galleryImageSize": "Rozmiar obrazów",
@@ -65,6 +82,42 @@
"uploadImage": "Wgrywanie obrazu",
"previousImage": "Poprzedni obraz",
"nextImage": "Następny obraz",
"menu": "Menu"
"menu": "Menu",
"mode": "Tryb"
},
"boards": {
"cancel": "Anuluj",
"noBoards": "Brak tablic typu {{boardType}}",
"imagesWithCount_one": "{{count}} zdjęcie",
"imagesWithCount_few": "{{count}} zdjęcia",
"imagesWithCount_many": "{{count}} zdjęcia",
"private": "Prywatne tablice",
"updateBoardError": "Błąd aktualizacji tablicy",
"uncategorized": "Nieskategoryzowane",
"selectBoard": "Wybierz tablicę",
"downloadBoard": "Pobierz tablice",
"loading": "Ładowanie...",
"move": "Przenieś",
"noMatching": "Brak pasujących tablic"
},
"accordions": {
"compositing": {
"title": "Kompozycja",
"infillTab": "Inskrypcja",
"coherenceTab": "Przebieg Koherencji"
},
"generation": {
"title": "Generowanie"
},
"image": {
"title": "Zdjęcie"
},
"advanced": {
"options": "$t(accordions.advanced.title) Opcje",
"title": "Zaawansowane"
},
"control": {
"title": "Kontrola"
}
}
}

View File

@@ -648,19 +648,6 @@
"missingFieldTemplate": "Отсутствует шаблон поля",
"addingImagesTo": "Добавление изображений в",
"invoke": "Создать",
"layer": {
"ipAdapterNoModelSelected": "IP адаптер не выбран",
"controlAdapterNoModelSelected": "не выбрана модель адаптера контроля",
"controlAdapterIncompatibleBaseModel": "несовместимая базовая модель адаптера контроля",
"rgNoRegion": "регион не выбран",
"rgNoPromptsOrIPAdapters": "нет текстовых запросов или IP-адаптеров",
"ipAdapterIncompatibleBaseModel": "несовместимая базовая модель IP-адаптера",
"ipAdapterNoImageSelected": "изображение IP-адаптера не выбрано",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, масштабированная ширина рамки {{width}}",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, высота рамки {{height}}",
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, ширина рамки {{width}}",
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, масштабированная высота рамки {{height}}"
},
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), ширина рамки {{width}}",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), высота рамки {{height}}",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), масштабированная высота рамки {{height}}",

View File

@@ -46,7 +46,7 @@
"menuItemAutoAdd": "Tự động thêm cho Bảng này",
"move": "Di Chuyển",
"topMessage": "Bảng này chứa ảnh được dùng với những tính năng sau:",
"uncategorized": "Chưa Phân Loại",
"uncategorized": "Chưa Sắp Xếp",
"archived": "Được Lưu Trữ",
"loading": "Đang Tải...",
"selectBoard": "Chọn Bảng",
@@ -91,7 +91,7 @@
"sideBySide": "Cạnh Nhau",
"alwaysShowImageSizeBadge": "Luôn Hiển Thị Kích Thước Ảnh",
"autoAssignBoardOnClick": "Tự Động Gán Vào Bảng Khi Nhấp Chuột",
"jump": "Nhảy Vào",
"jump": "Nhảy Đến",
"go": "Đi",
"autoSwitchNewImages": "Tự Động Đổi Sang Hình Ảnh Mới",
"featuresWillReset": "Nếu bạn xoá hình ảnh này, những tính năng đó sẽ lập tức được khởi động lại.",
@@ -133,7 +133,7 @@
"alpha": "Alpha",
"edit": "Sửa",
"nodes": "Workflow",
"format": "định dạng",
"format": "Định Dạng",
"delete": "Xoá",
"details": "Chi Tiết",
"imageFailedToLoad": "Không Thể Tải Hình Ảnh",
@@ -164,7 +164,7 @@
"discordLabel": "Discord",
"back": "Trở Về",
"advanced": "Nâng Cao",
"batch": "Quản Lý Hàng Loạt",
"batch": "Quản Lý ",
"modelManager": "Quản Lý Model",
"dontShowMeThese": "Không hiển thị thứ này",
"ok": "OK",
@@ -196,7 +196,7 @@
"areYouSure": "Bạn chắc chứ?",
"ai": "ai",
"aboutDesc": "Sử dụng Invoke cho công việc? Xem thử:",
"aboutHeading": "Sở Hữu Khả Năng Sáng Tạo Cho Riêng Mình",
"aboutHeading": "Quyền Năng Sáng Tạo Của Riêng",
"enabled": "Đã Bật",
"close": "Đóng",
"data": "Dữ Liệu",
@@ -217,7 +217,10 @@
"direction": "Phương Hướng",
"unknownError": "Lỗi Không Rõ",
"selected": "Đã chọn",
"tab": "Tab"
"tab": "Tab",
"loadingModel": "Đang Tải Model",
"generating": "Đang Tạo Sinh",
"warnings": "Cảnh Báo"
},
"prompt": {
"addPromptTrigger": "Thêm Prompt Trigger",
@@ -229,24 +232,24 @@
"enqueueing": "Xếp Vào Hàng Hàng Loạt",
"prompts_other": "Lệnh",
"iterations_other": "Lặp Lại",
"total": "Tổng Cộng",
"total": "Tổng",
"pruneFailed": "Có Vấn Đề Khi Cắt Bớt Mục Khỏi Hàng",
"clearSucceeded": "Hàng Đã Được Dọn Sạch",
"cancel": "Huỷ Bỏ",
"clearQueueAlertDialog2": "Bạn chắc chắn muốn dọn sạch hàng không?",
"queueEmpty": "Hàng Trống",
"queueBack": "Thêm Vào Hàng",
"batchFieldValues": "Giá Trị Vùng Hàng Loạt",
"batchFieldValues": "Giá Trị Vùng Theo Lô",
"openQueue": "Mở Queue",
"pause": "Dừng Lại",
"pauseFailed": "Có Vấn Đề Khi Dừng Lại Bộ Xử Lý",
"batchQueued": "Hàng Loạt Đã Vào hàng",
"batchFailedToQueue": "Lỗi Khi Xếp Hàng Loạt Vào Hàng",
"batchQueued": " Đã Vào Hàng",
"batchFailedToQueue": "Lỗi Khi Xếp Vào Hàng",
"next": "Tiếp Theo",
"in_progress": "Đang Tiến Hành",
"in_progress": "Đang Chạy",
"failed": "Thất Bại",
"canceled": "Bị Huỷ",
"cancelBatchFailed": "Có Vấn Đề Khi Huỷ Bỏ Hàng Loạt",
"cancelBatchFailed": "Có Vấn Đề Khi Huỷ Bỏ ",
"workflows": "Workflow (Luồng làm việc)",
"canvas": "Canvas (Vùng ảnh)",
"upscaling": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)",
@@ -269,19 +272,19 @@
"resumeTooltip": "Tiếp Tục Bộ Xử Lý",
"clearFailed": "Có Vấn Đề Khi Dọn Dẹp Hàng",
"generations_other": "Máy Tạo Sinh",
"cancelBatch": "Huỷ Bỏ Hàng Loạt",
"cancelBatch": "Huỷ Bỏ ",
"status": "Trạng Thái",
"pending": "Đang Chờ",
"gallery": "Thư Viện",
"front": "trước",
"batch": "Hàng Loạt",
"batch": "",
"origin": "Nguồn Gốc",
"destination": "Điểm Đến",
"other": "Khác",
"graphFailedToQueue": "Lỗi Khi Xếp Đồ Thị Vào Hàng",
"notReady": "Không Thể Xếp Hàng",
"cancelItem": "Huỷ Bỏ Mục",
"cancelBatchSucceeded": "Mục Hàng Loạt Đã Huỷ Bỏ",
"cancelBatchSucceeded": " Đã Huỷ Bỏ",
"current": "Hiện Tại",
"time": "Thời Gian",
"completed": "Hoàn Tất",
@@ -290,7 +293,8 @@
"cancelSucceeded": "Mục Đã Huỷ Bỏ",
"completedIn": "Hoàn tất trong",
"graphQueued": "Đồ Thị Đã Vào Hàng",
"batchQueuedDesc_other": "Thêm {{count}} phiên vào {{direction}} của hàng"
"batchQueuedDesc_other": "Thêm {{count}} phiên vào {{direction}} của hàng",
"batchSize": "Kích Thước Lô"
},
"hotkeys": {
"canvas": {
@@ -453,8 +457,8 @@
"title": "Gợi Lại Tất Cả Metadata"
},
"recallSeed": {
"title": "Gợi Lại Tham Số Hạt Giống",
"desc": "Gợi lại tham số hạt giống của ảnh hiện tại."
"title": "Gợi Lại Hạt Giống",
"desc": "Gợi lại hạt giống của ảnh hiện tại."
},
"useSize": {
"title": "Dùng Kích Thước",
@@ -486,7 +490,7 @@
"title": "Đổi Ảnh So Sánh"
},
"remix": {
"desc": "Gợi lại tất cả metadata cho tham số hạt giống của ảnh hiện tại.",
"desc": "Gợi lại tất cả metadata cho hạt giống của ảnh hiện tại.",
"title": "Phối Lại"
},
"runPostprocessing": {
@@ -629,7 +633,7 @@
"modelManager": "Quản Lý Model",
"name": "Tên",
"noModelSelected": "Không Có Model Được Chọn",
"installQueue": "Tải Xuống Danh Sách Đợi",
"installQueue": "Danh Sách Tải Xuống",
"modelDeleteFailed": "Xoá model thất bại",
"inplaceInstallDesc": "Tải xuống model mà không sao chép toàn bộ tài nguyên. Khi sử dụng model, nó được sẽ tải từ vị trí được đặt. Nếu bị tắt, toàn bộ tài nguyên của model sẽ được sao chép vào thư mục quản lý model của Invoke trong quá trình tải xuống.",
"modelType": "Loại Model",
@@ -701,7 +705,7 @@
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
"starterBundles": "Quà Tân Thủ",
"vae": "VAE",
"urlOrLocalPath": "URL Hoặc Đường Dẫn Trên Máy Chủ",
"urlOrLocalPath": "URL / Đường Dẫn",
"triggerPhrases": "Từ Ngữ Kích Hoạt",
"variant": "Biến Thể",
"urlOrLocalPathHelper": "Url cần chỉ vào một tệp duy nhất. Còn đường dẫn trên máy chủ có thể chỉ vào một tệp hoặc một thư mục cho chỉ một model diffusers.",
@@ -719,7 +723,7 @@
"starterModels": "Model Khởi Đầu",
"typePhraseHere": "Thêm từ ngữ ở đây",
"upcastAttention": "Upcast Attention",
"vaePrecision": "VAE Precision",
"vaePrecision": "Độ Chuẩn VAE",
"installingBundle": "Đang Tải Nguyên Bộ",
"installingModel": "Đang Tải Model",
"installingXModels_other": "Đang tải {{count}} model",
@@ -733,7 +737,9 @@
"textualInversions": "Bộ Đảo Ngược Văn Bản",
"loraTriggerPhrases": "Từ Ngữ Kích Hoạt Cho LoRA",
"width": "Chiều Rộng",
"starterModelsInModelManager": "Model khởi đầu có thể tìm thấy ở Trình Quản Lý Model"
"starterModelsInModelManager": "Model khởi đầu có thể tìm thấy ở Trình Quản Lý Model",
"clipLEmbed": "CLIP-L Embed",
"clipGEmbed": "CLIP-G Embed"
},
"metadata": {
"guidance": "Hướng Dẫn",
@@ -745,18 +751,18 @@
"parameterSet": "Dữ liệu tham số {{parameter}}",
"positivePrompt": "Lệnh Tích Cực",
"recallParameter": "Gợi Nhớ {{label}}",
"seed": "Tham Số Hạt Giống",
"seed": "Hạt Giống",
"negativePrompt": "Lệnh Tiêu Cực",
"noImageDetails": "Không tìm thấy chí tiết ảnh",
"strength": "Mức độ mạnh từ ảnh sang ảnh",
"Threshold": "Ngưỡng Nhiễu",
"width": "Chiều Rộng",
"steps": "Tham Số Bước",
"steps": "Số Bước",
"vae": "VAE",
"workflow": "Workflow",
"seamlessXAxis": "Trục X Liền Mạch",
"seamlessYAxis": "Trục Y Liền Mạch",
"cfgScale": "Thước Đo CFG",
"cfgScale": "Thang CFG",
"allPrompts": "Tất Cả Lệnh",
"generationMode": "Chế Độ Tạo Sinh",
"height": "Chiều Dài",
@@ -770,7 +776,7 @@
},
"accordions": {
"generation": {
"title": "Generation (Máy Tạo Sinh)"
"title": "Generation (Tạo Sinh)"
},
"image": {
"title": "Hình Ảnh"
@@ -780,7 +786,7 @@
"options": "Lựa Chọn $t(accordions.advanced.title)"
},
"compositing": {
"coherenceTab": "Coherence Pass (Lớp Kết Hợp)",
"coherenceTab": "Coherence Pass (Liên Kết)",
"title": "Kết Hợp",
"infillTab": "Infill (Lấp Đầy)"
},
@@ -789,21 +795,21 @@
}
},
"invocationCache": {
"disableSucceeded": "Bộ Nhớ Đệm Kích Hoạt Đã Tắt",
"disableFailed": "Có Vấn Đề Khi Tắt Bộ Nhớ Đệm Kích Hoạt",
"hits": "Truy Cập Bộ Nhớ Đệm",
"maxCacheSize": "Kích Thước Tối Đa Bộ Nhớ Đệm",
"cacheSize": "Kích Thước Bộ Nhớ Đệm",
"enableFailed": "Có Vấn Đề Khi Bật Bộ Nhớ Đệm Kích Hoạt",
"disableSucceeded": "Bộ Nhớ Đệm Đã Tắt",
"disableFailed": "Có Vấn Đề Khi Tắt Bộ Nhớ Đệm",
"hits": "Số Lần Trúng",
"maxCacheSize": "Tối Đa",
"cacheSize": "Tổng Cache",
"enableFailed": "Có Vấn Đề Khi Bật Bộ Nhớ Đệm",
"disable": "Tắt",
"invocationCache": "Bộ Nhớ Đệm Kích Hoạt",
"clearSucceeded": "Bộ Nhớ Đệm Kích Hoạt Đã Được Dọn",
"enableSucceeded": "Bộ Nhớ Đệm Kích Hoạt Đã Bật",
"invocationCache": "Bộ Nhớ Đệm",
"clearSucceeded": "Bộ Nhớ Đệm Đã Được Dọn",
"enableSucceeded": "Bộ Nhớ Đệm Đã Bật",
"useCache": "Dùng Bộ Nhớ Đệm",
"enable": "Bật",
"misses": "Không Truy Cập Bộ Nhớ Đệm",
"misses": "Số Lần Trật",
"clear": "Dọn Dẹp",
"clearFailed": "Có Vấn Đề Khi Dọn Dẹp Bộ Nhớ Đệm Kích Hoạt"
"clearFailed": "Có Vấn Đề Khi Dọn Dẹp Bộ Nhớ Đệm"
},
"hrf": {
"metadata": {
@@ -905,7 +911,7 @@
"unknownNode": "Node Không Rõ",
"unknownNodeType": "Loại Node Không Rõ",
"unknownTemplate": "Mẫu Trình Bày Không Rõ",
"cannotConnectOutputToOutput": "Không thế kết nối đầu ra với đầu vào",
"cannotConnectOutputToOutput": "Không thế kết nối đầu ra với đầu ra",
"cannotConnectToSelf": "Không thể kết nối với chính nó",
"workflow": "Workflow",
"addNodeToolTip": "Thêm Node (Shift+A, Space)",
@@ -952,13 +958,15 @@
"executionStateInProgress": "Đang Xử Lý",
"showLegendNodes": "Hiển Thị Vùng Nhập",
"outputFieldTypeParseError": "Không thể phân tích loại dữ liệu đầu ra của {{node}}.{{field}} ({{message}})",
"modelAccessError": "Không thể tìm thấy model {{key}}, chuyển về mặc định"
"modelAccessError": "Không thể tìm thấy model {{key}}, chuyển về mặc định",
"internalDesc": "Trình kích hoạt này được dùng bên trong bởi Invoke. Nó có thể phá hỏng thay đổi trong khi cập nhật ứng dụng và có thể bị xoá bất cứ lúc nào.",
"specialDesc": "Trình kích hoạt này có một số xử lý đặc biệt trong ứng dụng. Ví dụ, Node Hàng Loạt được dùng để xếp vào nhiều đồ thị từ một workflow."
},
"popovers": {
"paramCFGRescaleMultiplier": {
"heading": "CFG Rescale Multiplier",
"heading": "Hệ Số Nhân Thang CFG",
"paragraphs": [
"Hệ số nhân điều chỉnh cho hướng dẫn CFG, dùng cho model được huấn luyện bằng zero-terminal SNR (ztsnr).",
"Hệ số nhân điều chỉnh để hướng dẫn cho CFG, dùng cho model được huấn luyện bằng zero-terminal SNR (ztsnr).",
"Giá trị khuyến cáo là 0.7 cho những model này."
]
},
@@ -970,10 +978,10 @@
]
},
"paramCFGScale": {
"heading": "Thước Đo CFG",
"heading": "Thang CFG",
"paragraphs": [
"Điều khiển mức độ lệnh tác động lên quá trình tạo sinh.",
"Giá trị của Thước đo CFG quá cao có thể tạo độ bão hoà quá mức và khiến ảnh tạo sinh bị méo mó. "
"Giá trị của Thang CFG quá cao có thể tạo độ bão hoà quá mức và khiến ảnh tạo sinh bị méo mó. "
]
},
"paramScheduler": {
@@ -984,13 +992,13 @@
]
},
"compositingCoherencePass": {
"heading": "Coherence Pass (Lớp Kết Hợp)",
"heading": "Coherence Pass (Liên Kết)",
"paragraphs": [
"Bước thứ hai trong quá trình khử nhiễu để hợp nhất với ảnh inpaint/outpaint."
]
},
"refinerNegativeAestheticScore": {
"heading": "Điểm Tiêu Cực Cho Tiêu Chuẩn",
"heading": "Điểm Khác Tiêu Chuẩn",
"paragraphs": [
"Trọng lượng để tạo sinh ảnh giống với ảnh có điểm tiêu chuẩn thấp, dựa vào dữ liệu huấn luyện."
]
@@ -998,26 +1006,26 @@
"refinerCfgScale": {
"paragraphs": [
"Điều khiển mức độ lệnh tác động lên quá trình tạo sinh.",
"Giống với thước đo CFG để tạo sinh."
"Giống với thang CFG để tạo sinh."
],
"heading": "Thước Đo CFG"
"heading": "Thang CFG"
},
"refinerSteps": {
"heading": "Tham Số Bước",
"heading": "Số Bước",
"paragraphs": [
"Số bước diễn ra trong khi tinh chế các phần nhỏ của quá trình tạo sinh.",
"Giống với tham số bước để tạo sinh."
"Giống với số bước để tạo sinh."
]
},
"paramSteps": {
"heading": "Tham Số Bước",
"heading": "Số Bước",
"paragraphs": [
"Số bước dùng để biểu diễn trong mỗi lần tạo sinh.",
"Số bước càng cao thường sẽ tạo ra ảnh tốt hơn nhưng ngốn nhiều thời gian hơn."
]
},
"paramWidth": {
"heading": "Chiều Rộng",
"heading": "Rộng",
"paragraphs": [
"Chiều rộng của ảnh tạo sinh. Phải là bội số của 8."
]
@@ -1044,14 +1052,14 @@
"paragraphs": [
"Trọng lượng để tạo sinh ảnh giống với ảnh có điểm tiêu chuẩn cao, dựa vào dữ liệu huấn luyện."
],
"heading": "Điểm Tích Cực Cho Tiêu Chuẩn"
"heading": "Điểm Giống Tiêu Chuẩn"
},
"paramVAEPrecision": {
"paragraphs": [
"Độ chính xác dùng trong khi mã hoá và giải mã VAE.",
"Chính xác một nửa/Fp16 sẽ hiệu quả hơn, đổi lại cho những thay đổi nhỏ với ảnh."
],
"heading": "VAE Precision"
"heading": "Độ Chuẩn VAE"
},
"fluxDevLicense": {
"heading": "Giấy Phép Phi Thương Mại",
@@ -1070,14 +1078,14 @@
"paragraphs": [
"Chiều dài của ảnh tạo sinh. Phải là bội số của 8."
],
"heading": "Chiều Dài"
"heading": "Dài"
},
"paramRatio": {
"paragraphs": [
"Tỉ lệ khung hình của kích thước của ảnh được tạo ra.",
"Kích thước ảnh (theo số lượng pixel) tương đương với 512x512 được khuyến nghị cho model SD1.5 và kích thước tương đương với 1024x1024 được khuyến nghị cho model SDXL."
],
"heading": "Tỉ Lệ Khung Hình"
"heading": "Tỉ Lệ"
},
"seamlessTilingYAxis": {
"paragraphs": [
@@ -1105,7 +1113,9 @@
},
"controlNetWeight": {
"paragraphs": [
"Trọng lượng của Control Adapter. Trọng lượng càng cao sẽ dẫn đến tác động càng lớn lên ảnh cuối cùng."
"Điều chỉnh mức độ layer ảnh hưởng đến quá trình xử lý tạo sinh.",
"• Trọng Lượng Lớn Hơn (.75-2): Gây ra ảnh hưởng lớn hơn lên kết quả cuối cùng.",
"• Trọng Lượng Nhỏ Hơn (0-.75): Gây ra ảnh hưởng nhỏ hơn lên kết quả cuối cùng."
],
"heading": "Trọng Lượng"
},
@@ -1130,26 +1140,26 @@
},
"compositingCoherenceMinDenoise": {
"paragraphs": [
"Sức mạnh khử nhiễu nhỏ nhất cho chế độ kết hợp",
"Sức mạnh khử nhiễu nhỏ nhất cho vùng kết hợp khi inpaint/outpaint"
"Độ khử nhiễu nhỏ nhất cho chế độ liên kết",
"Sức mạnh khử nhiễu nhỏ nhất cho vùng liên kết khi inpaint/outpaint"
],
"heading": "Độ Khử Nhiễu Tối Thiểu"
"heading": "Min Khử Nhiễu"
},
"compositingCoherenceEdgeSize": {
"paragraphs": [
"Kích thước cạnh cho lớp kết hợp."
"Kích c cạnh dùng cho coherence pass."
],
"heading": "Kích Thước Cạnh"
"heading": "Kích Cỡ Cạnh"
},
"compositingMaskBlur": {
"heading": "Mask Blur",
"heading": "Độ Mờ Vùng",
"paragraphs": [
"Độ mờ của phần được phủ."
]
},
"ipAdapterMethod": {
"paragraphs": [
"Cách thức dùng để áp dụng IP Adapter hiện tại."
"Phương thức định nghĩa cách ảnh mẫu sẽ chỉ dẫn quá trình xử lý tạo sinh."
],
"heading": "Cách Thức"
},
@@ -1170,7 +1180,7 @@
"noiseUseCPU": {
"paragraphs": [
"Điều chỉnh độ nhiễu được tạo ra trên CPU hay GPU.",
"Với Độ nhiễu CPU được bật, một tham số hạt giống cụ thể sẽ tạo ra hình ảnh giống nhau trên mọi máy.",
"Với Độ nhiễu CPU được bật, một hạt giống cụ thể sẽ tạo ra hình ảnh giống nhau trên mọi máy.",
"Không có tác động nào đến hiệu suất khi bật Độ nhiễu CPU."
],
"heading": "Dùng Độ Nhiễu CPU"
@@ -1196,10 +1206,11 @@
},
"controlNetBeginEnd": {
"paragraphs": [
"Một phần trong quá trình xử lý khử nhiễu mà sẽ được Control Adapter áp dụng.",
"Nói chung, Control Adapter áp dụng vào lúc bắt đầu của quá trình hướng dẫn thành phần, và cũng áp dụng vào lúc kết thúc hướng dẫn chi tiết."
"Cài đặt này xác định phần xử lý khử nhiễu (trong khi tạo sinh) kết hợp với chỉ dẫn từ layer này.",
"• Bước Bắt Đầu (%): Chỉ định lúc bắt đầu áp dụng chỉ dẫn từ layer này trong quá trình tạo sinh.",
"• Bước Kết Thúc (%): Chỉ định lúc dừng áp dụng chỉ dẫn của layer này và trở về chỉ dẫn chung từ model và các thiết lập khác."
],
"heading": "Phần Trăm Tham Số Bước Khi Bắt Đầu/Kết Thúc"
"heading": "Phần Trăm Số Bước Khi Bắt Đầu/Kết Thúc"
},
"scale": {
"heading": "Tỉ Lệ",
@@ -1221,12 +1232,12 @@
},
"dynamicPromptsSeedBehaviour": {
"paragraphs": [
"Điều khiển cách tham số hạt giống được dùng khi tạo sinh từ lệnh.",
"Cứ mỗi lần lặp, một tham số hạt giống mới sẽ được dùng. Dùng nó để khám phá những biến thể từ lệnh trên mỗi tham số hạt giống.",
"Ví dụ, nếu bạn có 5 lệnh, mỗi ảnh sẽ dùng cùng tham số hạt giống.",
"Một tham số hạt giống mới sẽ được dùng cho từng ảnh. Nó tạo ra nhiều biến thể."
"Điều khiển cách hạt giống được dùng khi tạo sinh từ lệnh.",
"Cứ mỗi lần lặp, một hạt giống mới sẽ được dùng. Dùng nó để khám phá những biến thể từ lệnh trên mỗi hạt giống.",
"Ví dụ, nếu bạn có 5 lệnh, mỗi ảnh sẽ dùng cùng hạt giống.",
"Một hạt giống mới sẽ được dùng cho từng ảnh. Nó tạo ra nhiều biến thể."
],
"heading": "Hành Động Cho Tham Số Hạt Giống"
"heading": "Hành Vi Của Hạt Giống"
},
"paramGuidance": {
"heading": "Hướng Dẫn",
@@ -1255,10 +1266,10 @@
},
"paramAspect": {
"paragraphs": [
"Tỉ lệ khung hành của ảnh tạo sinh. Điều chỉnh tỉ lệ se cập nhật Chiều Rộng và Chiều Dài tương ứng.",
"\"Tối ưu hoá\" sẽ đặt Chiều Rộng và Chiều Dài vào kích thước tối ưu cho model được chọn."
"Tỉ lệ khung hành của ảnh tạo sinh. Điều chỉnh tỉ lệ s cập nhật chiều rộng và chiều dài tương ứng.",
"\"Tối ưu hoá\" sẽ đặt chiều rộng và chiều dài vào kích thước tối ưu cho model được chọn."
],
"heading": "Khung Hình"
"heading": "Tỉ Lệ"
},
"paramNegativeConditioning": {
"heading": "Lệnh Tiêu Cực",
@@ -1278,7 +1289,7 @@
"Nơi trong quá trình xử lý tạo sinh mà refiner bắt đầu được dùng.",
"0 nghĩa là bộ refiner sẽ được dùng trong toàn bộ quá trình tạo sinh , 0.8 nghĩa là refiner sẽ được dùng trong 20% cuối cùng quá trình tạo sinh."
],
"heading": "Nơi Bắt Đầu Refiner"
"heading": "Bắt Đầu Refiner"
},
"paramUpscaleMethod": {
"paragraphs": [
@@ -1299,9 +1310,9 @@
"heading": "Độ Cấu Trúc"
},
"infillMethod": {
"heading": "Cách Thức Infill",
"heading": "Cách Infill",
"paragraphs": [
"Cách thức làm infill trong quá trình inpaint/outpaint."
"Cách thức infill trong quá trình inpaint/outpaint."
]
},
"paramDenoisingStrength": {
@@ -1330,7 +1341,7 @@
"Điều khiển độ nhiễu ban đầu được dùng để tạo sinh.",
"Tắt lựa chọn \"Ngẫu Nhiên\" để tạo ra kết quá y hệt nhau với cùng một thiết lập tạo sinh."
],
"heading": "Tham Số Hạt Giống"
"heading": "Hạt Giống"
},
"clipSkip": {
"heading": "CLIP Skip",
@@ -1355,7 +1366,7 @@
"compositingCoherenceMode": {
"heading": "Chế Độ",
"paragraphs": [
"Cách thức được dùng để kết hợp ảnh với vùng bao phủ vừa được tạo sinh."
"Cách thức được dùng để liên kết ảnh với vùng bao phủ vừa được tạo sinh."
]
},
"paramModel": {
@@ -1380,7 +1391,7 @@
},
"models": {
"addLora": "Thêm LoRA",
"concepts": "Khái Niệm",
"concepts": "LoRA",
"loading": "đang tải",
"lora": "LoRA",
"noMatchingLoRAs": "Không có LoRA phù hợp",
@@ -1395,30 +1406,17 @@
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
"symmetry": "Tính Đối Xứng",
"type": "Loại",
"seed": "Tham Số Hạt Giống",
"seed": "Hạt Giống",
"processImage": "Xử Lý Hình Ảnh",
"useSize": "Dùng Kích Thước",
"invoke": {
"layer": {
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, tỉ lệ chiều dài hộp giới hạn là {{height}}",
"rgNoRegion": "không có vùng được chọn",
"ipAdapterNoModelSelected": "không có IP Adapter được lựa chọn",
"ipAdapterNoImageSelected": "không có ảnh IP Adapter được lựa chọn",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, chiều dài hộp giới hạn là {{height}}",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, tỉ lệ chiều rộng hộp giới hạn là {{width}}",
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, chiều rộng hộp giới hạn là {{width}}",
"rgNoPromptsOrIPAdapters": "không có lệnh chữ hoặc IP Adapter",
"controlAdapterIncompatibleBaseModel": "model cơ sở của Control Adapter không tương thích",
"ipAdapterIncompatibleBaseModel": "dạng model cơ sở của IP Adapter không tương thích",
"controlAdapterNoModelSelected": "không có model Control Adapter được chọn"
},
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều rộng hộp giới hạn là {{width}}",
"noModelSelected": "Không có model được lựa chọn",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều dài hộp giới hạn là {{height}}",
"canvasIsFiltering": "Canvas đang được lọc",
"canvasIsRasterizing": "Canvas đang được raster hoá",
"canvasIsTransforming": "Canvas đang được biến đổi",
"canvasIsCompositing": "Canvas đang được kết hợp",
"canvasIsFiltering": "Canvas đang bận (đang lọc)",
"canvasIsRasterizing": "Canvas đang bận (đang raster hoá)",
"canvasIsTransforming": "Canvas đang bận (đang biến đổi)",
"canvasIsCompositing": "Canvas đang bận (đang kết hợp)",
"noPrompts": "Không có lệnh được tạo",
"noNodesInGraph": "Không có node trong đồ thị",
"addingImagesTo": "Thêm ảnh vào",
@@ -1430,22 +1428,26 @@
"missingNodeTemplate": "Thiếu mẫu trình bày node",
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), chiều dài hộp giới hạn là {{height}}",
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), tỉ lệ chiều rộng hộp giới hạn là {{width}}",
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} thiếu đầu ra",
"missingFieldTemplate": "Thiếu vùng mẫu trình bày"
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: thiếu đầu vào",
"missingFieldTemplate": "Thiếu vùng mẫu trình bày",
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} tài nguyên trống",
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: quá ít mục, tối thiểu {{minItems}}",
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: quá nhiều mục, tối đa {{maxItems}}",
"canvasIsSelectingObject": "Canvas đang bận (đang chọn đồ vật)"
},
"cfgScale": "Thước Đo CFG",
"useSeed": "Dùng Tham Số Hạt Giống",
"cfgScale": "Thang CFG",
"useSeed": "Dùng Hạt Giống",
"imageActions": "Hành Động Với Hình Ảnh",
"steps": "Tham Số Bước",
"aspect": "Khung Hình",
"steps": "Số Bước",
"aspect": "Tỉ Lệ",
"coherenceMode": "Chế Độ",
"coherenceEdgeSize": "Kích Thước Cạnh",
"coherenceMinDenoise": "Tham Số Khử Nhiễu Nhỏ Nhất",
"coherenceEdgeSize": "Kích Cỡ Cạnh",
"coherenceMinDenoise": "Min Khử Nhiễu",
"denoisingStrength": "Sức Mạnh Khử Nhiễu",
"infillMethod": "Cách Thức Infill",
"infillMethod": "Cách Infill",
"setToOptimalSize": "Tối ưu hoá kích cỡ cho model",
"maskBlur": "Mask Blur",
"width": "Chiều Rộng",
"maskBlur": "Độ Mờ Vùng",
"width": "Rộng",
"scale": "Tỉ Lệ",
"recallMetadata": "Gợi Lại Metadata",
"clipSkip": "CLIP Skip",
@@ -1453,7 +1455,7 @@
"boxBlur": "Box Blur",
"gaussianBlur": "Gaussian Blur",
"staged": "Staged (Tăng khử nhiễu có hệ thống)",
"scaledHeight": "Tỉ Lệ Chiều Dài",
"scaledHeight": "Tỉ Lệ Dài",
"cancel": {
"cancel": "Huỷ"
},
@@ -1461,12 +1463,12 @@
"optimizedImageToImage": "Tối Ưu Hoá Hình Ảnh Sang Hình Ảnh",
"sendToCanvas": "Gửi Vào Canvas",
"sendToUpscale": "Gửi Vào Upscale",
"scaledWidth": "Tỉ Lệ Chiều Rộng",
"scaledWidth": "Tỉ Lệ Rộng",
"scheduler": "Scheduler",
"seamlessXAxis": "Trục X Liền Mạch",
"seamlessYAxis": "Trục Y Liền Mạch",
"guidance": "Hướng Dẫn",
"height": "Chiều Cao",
"height": "Dài",
"noiseThreshold": "Ngưỡng Nhiễu",
"negativePromptPlaceholder": "Lệnh Tiêu Cực",
"iterations": "Lặp Lại",
@@ -1479,13 +1481,13 @@
"useCpuNoise": "Dùng Độ Nhiễu CPU",
"remixImage": "Phối Lại Hình Ảnh",
"showOptionsPanel": "Hiển Thị Bảng Bên Cạnh (O hoặc T)",
"shuffle": "Xáo Trộn Tham Số Hạt Giống",
"shuffle": "Xáo Trộn",
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (lớn quá)",
"cfgRescaleMultiplier": "CFG Rescale Multiplier",
"cfgRescaleMultiplier": "Hệ Số Nhân Thang CFG",
"setToOptimalSizeTooSmall": "$t(parameters.setToOptimalSize) (nhỏ quá)",
"images": "Ảnh Ban Đầu",
"controlNetControlMode": "Chế Độ Điều Khiển",
"lockAspectRatio": "Khoá Tỉ Lệ Khung Hình",
"lockAspectRatio": "Khoá Tỉ Lệ",
"swapDimensions": "Hoán Đổi Kích Thước",
"copyImage": "Sao Chép Hình Ảnh",
"downloadImage": "Tải Xuống Hình Ảnh",
@@ -1498,11 +1500,11 @@
},
"dynamicPrompts": {
"seedBehaviour": {
"perIterationDesc": "Sử dụng tham số hạt giống khác nhau cho mỗi lần lặp lại",
"perPromptDesc": "Sử dụng tham số hạt giống khác nhau cho mỗi hình ảnh",
"label": "Hành Động Cho Tham Số Hạt Giống",
"perPromptLabel": "Tham Số Hạt Giống Mỗi Hình Ảnh",
"perIterationLabel": "Tham Số Hạt Giống Mỗi Lần Lặp Lại"
"perIterationDesc": "Sử dụng hạt giống khác nhau cho mỗi lần lặp lại",
"perPromptDesc": "Sử dụng hạt giống khác nhau cho mỗi hình ảnh",
"label": "Hành Động Cho Hạt Giống",
"perPromptLabel": "Một Hạt Giống Mỗi Ảnh",
"perIterationLabel": "Hạt Giống Mỗi Lần Lặp Lại"
},
"loading": "Tạo Sinh Dùng Dynamic Prompt...",
"showDynamicPrompts": "HIện Dynamic Prompt",
@@ -1513,9 +1515,9 @@
"settings": {
"beta": "Beta",
"general": "Cài Đặt Chung",
"confirmOnDelete": "Chắp Nhận Xoá",
"confirmOnDelete": "Xác Nhận Khi Xoá",
"developer": "Nhà Phát Triển",
"confirmOnNewSession": "Chắp Nhận Mở Phiên Mới",
"confirmOnNewSession": "Xác Nhận Khi Mở Phiên Mới",
"antialiasProgressImages": "Xử Lý Khử Răng Cưa Hình Ảnh",
"models": "Models",
"informationalPopoversDisabledDesc": "Hộp thoại hỗ trợ thông tin đã tắt. Bật lại trong Cài đặt.",
@@ -1542,24 +1544,25 @@
"resetWebUIDesc2": "Nếu ảnh không được xuất hiện trong thư viện hoặc điều gì đó không ổn đang diễn ra, hãy thử khởi động lại trước khi báo lỗi trên Github.",
"displayInProgress": "Hiển Thị Hình Ảnh Đang Xử Lý",
"intermediatesClearedFailed": "Có Vấn Đề Khi Dọn Sạch Sản Phẩm Trung Gian",
"enableInvisibleWatermark": "Bật Chế Độ Ẩn Watermark"
"enableInvisibleWatermark": "Bật Chế Độ Ẩn Watermark",
"showDetailedInvocationProgress": "Hiện Dữ Liệu Xử Lý"
},
"sdxl": {
"loading": "Đang Tải...",
"posAestheticScore": "Điểm Tích Cực Cho Tiêu Chuẩn",
"steps": "Tham Số Bước",
"refinerSteps": "Tham Số Bước Refiner",
"posAestheticScore": "Điểm Giống Tiêu Chuẩn",
"steps": "Số Bước",
"refinerSteps": "Số Bước Refiner",
"refinermodel": "Model Refiner",
"refinerStart": "Nơi Bắt Đầu Refiner",
"refinerStart": "Bắt Đầu Refiner",
"denoisingStrength": "Sức Mạnh Khử Nhiễu",
"posStylePrompt": "Điểm Tích Cực Cho Lệnh Phong Cách",
"scheduler": "Scheduler",
"refiner": "Refiner",
"cfgScale": "Thước Đo CFG",
"cfgScale": "Thang CFG",
"concatPromptStyle": "Liên Kết Lệnh & Phong Cách",
"freePromptStyle": "Viết Lệnh Thủ Công Cho Phong Cách",
"negStylePrompt": "Điểm Tiêu Cực Cho Lệnh Phong Cách",
"negAestheticScore": "Điểm Tiêu Cực Cho Tiêu Chuẩn",
"negAestheticScore": "Điểm Khác Tiêu Chuẩn",
"noModelsAvailable": "Không có sẵn model"
},
"controlLayers": {
@@ -1594,7 +1597,7 @@
"pullBboxIntoLayerError": "Có Vấn Đề Khi Chuyển Hộp Giới Hạn Thành Layer",
"pullBboxIntoReferenceImageOk": "Chuyển Hộp Giới Hạn Thành Ảnh Mẫu",
"clearCaches": "Xoá Bộ Nhớ Đệm",
"outputOnlyMaskedRegions": "Chỉ Xuất Đầu Ra Ở Vùng Phủ",
"outputOnlyMaskedRegions": "Chỉ Xuất Đầu Ra Ở Vùng Tạo Sinh",
"addLayer": "Thêm Layer",
"regional": "Khu Vực",
"regionIsEmpty": "Vùng được chọn trống",
@@ -1608,10 +1611,13 @@
"moveForward": "Chuyển Lên Đầu",
"fitBboxToLayers": "Xếp Vừa Hộp Giới Hạn Vào Layer",
"ipAdapterMethod": {
"full": "Đầy Đủ",
"full": "Phong Cách Và Thành Phần",
"style": "Chỉ Lấy Phong Cách",
"composition": "Chỉ Lấy Thành Phần",
"ipAdapterMethod": "Cách Thức IP Adapter"
"ipAdapterMethod": "Cách Thức",
"compositionDesc": "Áp dụng cách trình bày và bỏ qua phong cách mẫu.",
"fullDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) & thành phần (cách trình bày).",
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày."
},
"deletePrompt": "Xoá Lệnh",
"rasterLayer": "Layer Dạng Raster",
@@ -1899,7 +1905,33 @@
"colorPicker": "Chọn Màu"
},
"mergingLayers": "Đang gộp layer",
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu."
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
"referenceImageEmptyState": "<UploadButton>Tải lên ảnh</UploadButton> hoặc kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này để bắt đầu.",
"useImage": "Dùng Hình Ảnh",
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
"asRasterLayerResize": "Như $t(controlLayers.rasterLayer) (Thay Đổi Kích Thước)",
"asControlLayer": "Như $t(controlLayers.controlLayer)",
"asControlLayerResize": "Như $t(controlLayers.controlLayer) (Thay Đổi Kích Thước)",
"newSession": "Phiên Làm Việc Mới",
"resetGenerationSettings": "Khởi Động Lại Cài Đặt Tạo Sinh",
"referenceImageRegional": "Ảnh Mẫu (Khu Vực)",
"referenceImageGlobal": "Ảnh Mẫu (Toàn Vùng)",
"warnings": {
"problemsFound": "Phát hiện vấn đề",
"unsupportedModel": "layer không được hỗ trợ cho model cơ sở này",
"controlAdapterNoModelSelected": "không có model được chọn cho Layer Chỉnh Sửa Được",
"controlAdapterNoControl": "chưa chọn/vẽ điều khiển",
"ipAdapterIncompatibleBaseModel": "model cơ sở cho Ảnh Mẫu không tương thích",
"ipAdapterNoImageSelected": "chưa chọn Ảnh Mẫu",
"controlAdapterIncompatibleBaseModel": "model cơ sở cho Layer Chỉnh Sửa Được không tương thích",
"ipAdapterNoModelSelected": "không có model được chọn cho Ảnh Mẫu",
"rgNoPromptsOrIPAdapters": "không có lệnh hoặc Ảnh Mẫu",
"rgNegativePromptNotSupported": "Lệnh Tiêu Cực không được hỗ trợ cho model cơ sở được chọn",
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
"rgNoRegion": "không có khu vực được vẽ"
}
},
"stylePresets": {
"negativePrompt": "Lệnh Tiêu Cực",
@@ -1960,7 +1992,8 @@
"generation": "Generation",
"system": "Hệ Thống",
"canvas": "Canvas",
"logNamespaces": "Nơi Được Log"
"logNamespaces": "Vùng Ghi Log",
"dnd": "Kéo Thả"
},
"logLevel": {
"logLevel": "Cấp Độ Log",
@@ -2124,8 +2157,7 @@
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
"items": [
"<StrongComponent>SD 3.5</StrongComponent>: Hỗ trợ cho Từ ngữ Sang Hình Ảnh trong Workflow với phiên bản SD 3.5 Medium hoặc Large.",
"<StrongComponent>Canvas</StrongComponent>: Hợp lý hoá cách xử lý Layer Điều Khiển Được và cải thiện thiết lập điều khiển mặc định."
"<StrongComponent>Hướng Dẫn Khu Vực FLUX (beta)</StrongComponent>: Bản beta của Hướng Dẫn Khu Vực FLUX của chúng ta đã có mắt tại bảng điều khiển lệnh khu vực."
]
},
"upsell": {
@@ -2133,5 +2165,67 @@
"inviteTeammates": "Thêm Đồng Đội",
"shareAccess": "Chia Sẻ Quyền Truy Cập",
"professionalUpsell": "Không có sẵn Phiên Bản Chuyên Nghiệp cho Invoke. Bấm vào đây hoặc đến invoke.com/pricing để thêm chi tiết."
},
"supportVideos": {
"supportVideos": "Video Hỗ Trợ",
"gettingStarted": "Bắt Đầu Làm Quen",
"studioSessionsDesc1": "Xem thử <StudioSessionsPlaylistLink /> để hiểu rõ Invoke hơn.",
"studioSessionsDesc2": "Đến <DiscordLink /> để tham gia vào phiên trực tiếp và hỏi câu hỏi. Các phiên được tải lên danh sách phát vào các tuần.",
"videos": {
"howDoIDoImageToImageTransformation": {
"title": "Làm Sao Để Tôi Dùng Trình Biến Đổi Hình Ảnh Sang Hình Ảnh?",
"description": "Hướng dẫn cách thực hiện biến đổi ảnh sang ảnh trong Invoke."
},
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
"description": "Giới thiệu về ảnh mẫu và IP adapter toàn vùng.",
"title": "Làm Sao Để Tôi Dùng IP Adapter Toàn Vùng Và Ảnh Mẫu?"
},
"creatingAndComposingOnInvokesControlCanvas": {
"description": "Học cách sáng tạo ảnh bằng trình điều khiển canvas của Invoke.",
"title": "Sáng Tạo Trong Trình Kiểm Soát Canvas Của Invoke"
},
"upscaling": {
"description": "Cách upscale ảnh bằng bộ công cụ của Invoke để nâng cấp độ phân giải.",
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
},
"howDoIGenerateAndSaveToTheGallery": {
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện?",
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện."
},
"howDoIEditOnTheCanvas": {
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
"title": "Làm Sao Để Tôi Chỉnh Sửa Trên Canvas?"
},
"howDoIUseControlNetsAndControlLayers": {
"title": "Làm Sao Để Tôi Dùng ControlNet và Layer Điều Khiển Được?",
"description": "Học cách áp dụng layer điều khiển được và controlnet vào ảnh của bạn."
},
"howDoIUseInpaintMasks": {
"title": "Làm Sao Để Tôi Dùng Lớp Phủ Inpaint?",
"description": "Cách áp dụng lớp phủ inpaint vào chỉnh sửa và thay đổi ảnh."
},
"howDoIOutpaint": {
"title": "Làm Sao Để Tôi Outpaint?",
"description": "Hướng dẫn outpaint bên ngoài viền ảnh gốc."
},
"creatingYourFirstImage": {
"description": "Giới thiệu về cách tạo ảnh từ ban đầu bằng công cụ Invoke.",
"title": "Tạo Hình Ảnh Đầu Tiên Của Bạn"
},
"usingControlLayersAndReferenceGuides": {
"description": "Học cách chỉ dẫn ảnh được tạo ra bằng layer điều khiển được và ảnh mẫu.",
"title": "Dùng Layer Điều Khiển Được và Chỉ Dẫn Mẫu"
},
"understandingImageToImageAndDenoising": {
"title": "Hiểu Rõ Trình Hình Ảnh Sang Hình Ảnh Và Trình Khử Nhiễu",
"description": "Tổng quan về trình biến đổi ảnh sang ảnh và trình khử nhiễu trong Invoke."
},
"exploringAIModelsAndConceptAdapters": {
"title": "Khám Phá Model AI Và Khái Niệm Về Adapter",
"description": "Đào sâu vào model AI và cách dùng những adapter để điều khiển một cách sáng tạo."
}
},
"controlCanvas": "Điều Khiển Canvas",
"watch": "Xem"
}
}

View File

@@ -661,19 +661,6 @@
"missingFieldTemplate": "缺失模板",
"addingImagesTo": "添加图像到",
"noPrompts": "没有已生成的提示词",
"layer": {
"ipAdapterNoModelSelected": "未选择IP adapter",
"controlAdapterNoModelSelected": "未选择Control Adapter模型",
"rgNoPromptsOrIPAdapters": "无文本提示或IP Adapters",
"controlAdapterIncompatibleBaseModel": "Control Adapter的基础模型不兼容",
"ipAdapterIncompatibleBaseModel": "IP Adapter的基础模型不兼容",
"ipAdapterNoImageSelected": "未选择IP Adapter图像",
"rgNoRegion": "未选择区域",
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框宽度为 {{width}}",
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框高度为 {{height}}",
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框高度为 {{height}}",
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框宽度为 {{width}}"
},
"canvasIsFiltering": "画布正在过滤",
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",

View File

@@ -1,3 +1,4 @@
import { useStore } from '@nanostores/react';
import { useAppStore } from 'app/store/storeHooks';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { withResultAsync } from 'common/util/result';
@@ -9,6 +10,7 @@ import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { sentImageToCanvas } from 'features/gallery/store/actions';
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
import { toast } from 'features/toast/toast';
@@ -51,6 +53,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const { t } = useTranslation();
// Use a ref to ensure that we only perform the action once
const didInit = useRef(false);
const didParseOpenAPISchema = useStore($hasTemplates);
const store = useAppStore();
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
@@ -174,7 +177,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
);
useEffect(() => {
if (didInit.current || !action) {
if (didInit.current || !action || !didParseOpenAPISchema) {
return;
}
@@ -187,22 +190,29 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
case 'selectStylePreset':
handleSelectStylePreset(action.data.stylePresetId);
break;
case 'sendToCanvas':
handleSendToCanvas(action.data.imageName);
break;
case 'useAllParameters':
handleUseAllMetadata(action.data.imageName);
break;
case 'goToDestination':
handleGoToDestination(action.data.destination);
break;
default:
break;
}
}, [
handleSendToCanvas,
handleUseAllMetadata,
action,
handleLoadWorkflow,
handleSelectStylePreset,
handleGoToDestination,
handleLoadWorkflow,
didParseOpenAPISchema,
]);
};

View File

@@ -2,7 +2,6 @@ import { logger } from 'app/logging/logger';
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { extractMessageFromAssertionError } from 'common/util/extractMessageFromAssertionError';
import type { Result } from 'common/util/result';
import { withResult, withResultAsync } from 'common/util/result';
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
@@ -10,11 +9,9 @@ import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGr
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
import { toast } from 'features/toast/toast';
import { serializeError } from 'serialize-error';
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
import type { Invocation } from 'services/api/types';
import { assert, AssertionError } from 'tsafe';
import type { JsonObject } from 'type-fest';
@@ -25,42 +22,32 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
enqueueRequested.match(action) && action.payload.tabName === 'canvas',
effect: async (action, { getState, dispatch }) => {
log.debug('Enqueue requested');
const state = getState();
const model = state.params.model;
const { prepend } = action.payload;
const manager = $canvasManager.get();
assert(manager, 'No model found in state');
let buildGraphResult: Result<
{
g: Graph;
noise: Invocation<'noise' | 'flux_denoise' | 'sd3_denoise'>;
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'sd3_text_encoder'>;
},
Error
>;
assert(manager, 'No canvas manager');
const model = state.params.model;
assert(model, 'No model found in state');
const base = model.base;
switch (base) {
case 'sdxl':
buildGraphResult = await withResultAsync(() => buildSDXLGraph(state, manager));
break;
case 'sd-1':
case `sd-2`:
buildGraphResult = await withResultAsync(() => buildSD1Graph(state, manager));
break;
case `sd-3`:
buildGraphResult = await withResultAsync(() => buildSD3Graph(state, manager));
break;
case `flux`:
buildGraphResult = await withResultAsync(() => buildFLUXGraph(state, manager));
break;
default:
assert(false, `No graph builders for base ${base}`);
}
const buildGraphResult = await withResultAsync(async () => {
switch (base) {
case 'sdxl':
return await buildSDXLGraph(state, manager);
case 'sd-1':
case `sd-2`:
return await buildSD1Graph(state, manager);
case `sd-3`:
return await buildSD3Graph(state, manager);
case `flux`:
return await buildFLUXGraph(state, manager);
default:
assert(false, `No graph builders for base ${base}`);
}
});
if (buildGraphResult.isErr()) {
let description: string | null = null;

Some files were not shown because too many files have changed in this diff Show More