mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 07:28:06 -05:00
Compare commits
530 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ffe831958 | ||
|
|
481423d678 | ||
|
|
89ede0aef3 | ||
|
|
359bdee9c6 | ||
|
|
0e6fba3763 | ||
|
|
652502d7a6 | ||
|
|
91d981a49e | ||
|
|
24f61d21b2 | ||
|
|
eb9a4177c5 | ||
|
|
3c43351a5b | ||
|
|
b1359b6dff | ||
|
|
bddccf6d2f | ||
|
|
21ffaab2a2 | ||
|
|
1e969f938f | ||
|
|
9c6c86ee4f | ||
|
|
6b53a48b48 | ||
|
|
c813fa3fc0 | ||
|
|
a08e61184a | ||
|
|
a0d62a5f41 | ||
|
|
616c0f11e1 | ||
|
|
e1626a4e49 | ||
|
|
6ab891a319 | ||
|
|
492de41316 | ||
|
|
c064efc866 | ||
|
|
1a0885bfb1 | ||
|
|
e8b202d0a5 | ||
|
|
c6fc82f756 | ||
|
|
9a77e951d2 | ||
|
|
8bd4207a27 | ||
|
|
0bb601aaf7 | ||
|
|
2da25a0043 | ||
|
|
51d0931898 | ||
|
|
357b68d1ba | ||
|
|
d9ddb6c32e | ||
|
|
ad02a99a83 | ||
|
|
b707dafc7b | ||
|
|
02906c8f5d | ||
|
|
8538e508f1 | ||
|
|
8c333ffd14 | ||
|
|
72ace5fdff | ||
|
|
9b7583fc84 | ||
|
|
989eee338e | ||
|
|
acc3d7b91b | ||
|
|
49de868658 | ||
|
|
b1702c7d90 | ||
|
|
e49e19ea13 | ||
|
|
c9f91f391e | ||
|
|
4cb6b2b701 | ||
|
|
7d132ea148 | ||
|
|
1088accd91 | ||
|
|
8d237d8f8b | ||
|
|
0c86a3232d | ||
|
|
dbfb0359cb | ||
|
|
b4c2aa596b | ||
|
|
87e89b7995 | ||
|
|
9b089430e2 | ||
|
|
f2b0025958 | ||
|
|
4b390906bc | ||
|
|
c5b8efe03b | ||
|
|
4d08d00ad8 | ||
|
|
9b0130262b | ||
|
|
878093f64e | ||
|
|
d5ff7ef250 | ||
|
|
f36583f866 | ||
|
|
829bc1bc7d | ||
|
|
17c7b57145 | ||
|
|
6a12189542 | ||
|
|
96a31a5563 | ||
|
|
067747eca9 | ||
|
|
c7878fddc6 | ||
|
|
54c51e0a06 | ||
|
|
1640ea0298 | ||
|
|
0c32ae9775 | ||
|
|
fdb8ca5165 | ||
|
|
571faf6d7c | ||
|
|
bdbdb22b74 | ||
|
|
9bbb5644af | ||
|
|
e90ad19f22 | ||
|
|
0ba11e8f73 | ||
|
|
1cf7600f5b | ||
|
|
4f9d12b872 | ||
|
|
68c3b0649b | ||
|
|
8ef8bd4261 | ||
|
|
50897ba066 | ||
|
|
3510643870 | ||
|
|
ca9cb1c9ef | ||
|
|
b89caa02bd | ||
|
|
eaf4e08c44 | ||
|
|
fb19621361 | ||
|
|
9179619077 | ||
|
|
13cb5f0ba2 | ||
|
|
7e52fc1c17 | ||
|
|
7f60a4a282 | ||
|
|
3f880496f7 | ||
|
|
f05efd3270 | ||
|
|
79eb8172b6 | ||
|
|
7732b5d478 | ||
|
|
a2a1934b66 | ||
|
|
dff6570078 | ||
|
|
04e4fb63af | ||
|
|
83609d5008 | ||
|
|
2618ed0ae7 | ||
|
|
bb3cedddd5 | ||
|
|
5b3e1593ca | ||
|
|
2d08078a7d | ||
|
|
75acece1f1 | ||
|
|
a9db2ffefd | ||
|
|
cdd148b4d1 | ||
|
|
730fabe2de | ||
|
|
6c59790a7f | ||
|
|
0e6cb91863 | ||
|
|
a0fefcd43f | ||
|
|
c37251d6f7 | ||
|
|
2854210162 | ||
|
|
5545b980af | ||
|
|
0c9434c464 | ||
|
|
8771de917d | ||
|
|
122946ef4c | ||
|
|
2d974f670c | ||
|
|
75f0da9c35 | ||
|
|
5df3c00e28 | ||
|
|
b049880502 | ||
|
|
e5293fdd1a | ||
|
|
8883775762 | ||
|
|
cfadb313d2 | ||
|
|
b5cadd9a1a | ||
|
|
5361b6e014 | ||
|
|
ff346172af | ||
|
|
92f660018b | ||
|
|
1afc2cba4e | ||
|
|
ee8359242c | ||
|
|
f0c80a8d7a | ||
|
|
8da9e7c1f6 | ||
|
|
6d7a486e5b | ||
|
|
57122c6aa3 | ||
|
|
54abd8d4d1 | ||
|
|
06283cffed | ||
|
|
27fa0e1140 | ||
|
|
533d48abdb | ||
|
|
6845cae4c9 | ||
|
|
31c9acb1fa | ||
|
|
fb5e462300 | ||
|
|
2f3abc29b1 | ||
|
|
c5c071f285 | ||
|
|
93a3ed56e7 | ||
|
|
406fc58889 | ||
|
|
cf67d084fd | ||
|
|
d4a95af14f | ||
|
|
8c8e7102c2 | ||
|
|
b6b9ea9d70 | ||
|
|
63126950bc | ||
|
|
29d63d5dea | ||
|
|
a5f8c23dee | ||
|
|
7bb4ea57c6 | ||
|
|
75dc961bcb | ||
|
|
a9a1f6ef21 | ||
|
|
aa40161f26 | ||
|
|
6efa812874 | ||
|
|
8a683f5a3c | ||
|
|
f4b0b6a93d | ||
|
|
1337c33ad3 | ||
|
|
2f6b035138 | ||
|
|
4f9ae44472 | ||
|
|
c682330852 | ||
|
|
c064257759 | ||
|
|
8a4c629576 | ||
|
|
496b02a3bc | ||
|
|
7b5efc2203 | ||
|
|
a01d44f813 | ||
|
|
63fb3a15e9 | ||
|
|
4d0837541b | ||
|
|
999809b4c7 | ||
|
|
c452edfb9f | ||
|
|
ad2cdbd8a2 | ||
|
|
f15c24bfa7 | ||
|
|
d1f653f28c | ||
|
|
244465d3a6 | ||
|
|
c6236ab70c | ||
|
|
644d5cb411 | ||
|
|
bb0a630416 | ||
|
|
2148ae9287 | ||
|
|
42d242609c | ||
|
|
fd0a52392b | ||
|
|
e64415d59a | ||
|
|
1871e0bdbf | ||
|
|
3ae9a965c2 | ||
|
|
85932e35a7 | ||
|
|
41b07a56cc | ||
|
|
54064c0cb8 | ||
|
|
68284b37fa | ||
|
|
ae5bc6f5d6 | ||
|
|
6dc16c9f54 | ||
|
|
faa9ac4e15 | ||
|
|
d0460849b0 | ||
|
|
bed3c2dd77 | ||
|
|
916ddd17d7 | ||
|
|
accfa7407f | ||
|
|
908db31e48 | ||
|
|
b70f632b26 | ||
|
|
d07a6385ab | ||
|
|
68df612fa1 | ||
|
|
3b96c79461 | ||
|
|
89bda5b983 | ||
|
|
22bff1fb22 | ||
|
|
55ba6488d1 | ||
|
|
2d78859171 | ||
|
|
3a661bac34 | ||
|
|
bb8a02de18 | ||
|
|
78155344f6 | ||
|
|
391a24b0f6 | ||
|
|
e75903389f | ||
|
|
27567052f2 | ||
|
|
6f447f7169 | ||
|
|
8b370cc182 | ||
|
|
af583d2971 | ||
|
|
0ebe8fb1bd | ||
|
|
befb629f46 | ||
|
|
874d67cb37 | ||
|
|
19f7a1295a | ||
|
|
78bd605617 | ||
|
|
b87f4e59a5 | ||
|
|
1eca4f12c8 | ||
|
|
f1de11d6bf | ||
|
|
9361ed9d70 | ||
|
|
ebabf4f7a8 | ||
|
|
606f3321f5 | ||
|
|
3970aa30fb | ||
|
|
678436e07c | ||
|
|
c620581699 | ||
|
|
c331d42ce4 | ||
|
|
1ac9b502f1 | ||
|
|
3fa478a12f | ||
|
|
2d86298b7f | ||
|
|
009cdb714c | ||
|
|
9d3f5427b4 | ||
|
|
e4b17f019a | ||
|
|
586c00bc02 | ||
|
|
0f11fda65a | ||
|
|
3e75331ef7 | ||
|
|
be133408ac | ||
|
|
7e1e0d6928 | ||
|
|
cd3d8df5a8 | ||
|
|
24d3c22017 | ||
|
|
b0d37f4e51 | ||
|
|
3559124674 | ||
|
|
6c33e02141 | ||
|
|
8cf94d602f | ||
|
|
016a6f182f | ||
|
|
6fbc019142 | ||
|
|
26f95d6a97 | ||
|
|
40f7b0d171 | ||
|
|
4904700751 | ||
|
|
83538c4b2b | ||
|
|
eb7b559529 | ||
|
|
4945465cf0 | ||
|
|
7eed7282a9 | ||
|
|
47f0781822 | ||
|
|
88b8e3e3d5 | ||
|
|
47c3ab9214 | ||
|
|
d6d436b59c | ||
|
|
6ff7057967 | ||
|
|
e032ab1179 | ||
|
|
65bddfcd93 | ||
|
|
2d3ce418dd | ||
|
|
548d72f7b9 | ||
|
|
19837a0f29 | ||
|
|
483b65a1dc | ||
|
|
b85931c7ab | ||
|
|
9225f47338 | ||
|
|
bccac5e4a6 | ||
|
|
7cb07fdc04 | ||
|
|
b137450026 | ||
|
|
dc5090469a | ||
|
|
e0ae2ace89 | ||
|
|
269faae04b | ||
|
|
e282acd41c | ||
|
|
a266668348 | ||
|
|
3bb3e142fc | ||
|
|
6ac6d70a22 | ||
|
|
b0acf33ba5 | ||
|
|
b3eb64b64c | ||
|
|
95f8ab1a29 | ||
|
|
4e043384db | ||
|
|
0f5df8ba17 | ||
|
|
2826ab48a2 | ||
|
|
7ff1b635c8 | ||
|
|
dfb5e8b5e5 | ||
|
|
7259da799c | ||
|
|
965069fce1 | ||
|
|
90232806d9 | ||
|
|
81bc153399 | ||
|
|
c63e526f99 | ||
|
|
2b74263007 | ||
|
|
d3a82f7119 | ||
|
|
291c5a0341 | ||
|
|
bcb41399ca | ||
|
|
6f0f53849b | ||
|
|
4e7d63761a | ||
|
|
198c84105d | ||
|
|
2453b9f443 | ||
|
|
b091aca986 | ||
|
|
8f02ce54a0 | ||
|
|
f4b7c63002 | ||
|
|
a4629280b5 | ||
|
|
855fb007da | ||
|
|
d805b52c1f | ||
|
|
2ea55685bb | ||
|
|
bd6ff3deaa | ||
|
|
82dd53ec88 | ||
|
|
71d749541d | ||
|
|
48a57fc4b9 | ||
|
|
530e0910fc | ||
|
|
2fdf8fc0a2 | ||
|
|
91db9c9300 | ||
|
|
bc42205593 | ||
|
|
2e3cba6416 | ||
|
|
7852aacd11 | ||
|
|
6cccd67ecd | ||
|
|
a7a89c9de1 | ||
|
|
5ca8eed89e | ||
|
|
c885c3c9a6 | ||
|
|
d81c38c350 | ||
|
|
92d5b73215 | ||
|
|
097e92db6a | ||
|
|
84c6209a45 | ||
|
|
107e48808a | ||
|
|
47168b5505 | ||
|
|
58152ec981 | ||
|
|
c74afbf332 | ||
|
|
7cdda00a54 | ||
|
|
a74282bce6 | ||
|
|
107f048c7a | ||
|
|
a2486a5f06 | ||
|
|
07ab116efb | ||
|
|
1a13af3c7a | ||
|
|
f2966a2594 | ||
|
|
58bb97e3c6 | ||
|
|
34569a2410 | ||
|
|
a84aa5c049 | ||
|
|
acfa9c87ef | ||
|
|
f245d8e429 | ||
|
|
62cf0f54e0 | ||
|
|
5f015e76ba | ||
|
|
aebcec28e0 | ||
|
|
db1c5a94f7 | ||
|
|
56222a8493 | ||
|
|
b7510ce709 | ||
|
|
5739799e2e | ||
|
|
813cf87920 | ||
|
|
c95b151daf | ||
|
|
a0f823a3cf | ||
|
|
64e0f6d688 | ||
|
|
ddd5b1087c | ||
|
|
008be9b846 | ||
|
|
8e7cabdc04 | ||
|
|
a4c4237f99 | ||
|
|
bda3740dcd | ||
|
|
5b4633baa9 | ||
|
|
96351181cb | ||
|
|
957d591d99 | ||
|
|
75f605ba1a | ||
|
|
ab898a7180 | ||
|
|
c9a4516ab1 | ||
|
|
fe97c0d5eb | ||
|
|
6056764840 | ||
|
|
8747c0dbb0 | ||
|
|
c5cdd5f9c6 | ||
|
|
abc5d53159 | ||
|
|
2f76019a89 | ||
|
|
3f45beb1ed | ||
|
|
bc1126a85b | ||
|
|
380017041e | ||
|
|
ab7cdbb7e0 | ||
|
|
e5b78d0221 | ||
|
|
1acaa6c486 | ||
|
|
b0381076b7 | ||
|
|
ffff2d6dbb | ||
|
|
afa9f07649 | ||
|
|
addb5c49ea | ||
|
|
a112d2d55b | ||
|
|
619a271c8a | ||
|
|
909f2ee36d | ||
|
|
b4cf3d9d03 | ||
|
|
e6ab6e0293 | ||
|
|
66d9c7c631 | ||
|
|
fec45f3eb6 | ||
|
|
7211d1a6fc | ||
|
|
f3069754a9 | ||
|
|
4f43152aeb | ||
|
|
7125055d02 | ||
|
|
c91a9ce390 | ||
|
|
3e7b73da2c | ||
|
|
61ac50c00d | ||
|
|
c1201f0bce | ||
|
|
acdffac5ad | ||
|
|
e420300fa4 | ||
|
|
260a5a4f9a | ||
|
|
ed0c2006fe | ||
|
|
9ffd888c86 | ||
|
|
175a9dc28d | ||
|
|
5764e4f7f2 | ||
|
|
4275a494b9 | ||
|
|
a3deb8d30d | ||
|
|
aafdb0a37b | ||
|
|
56a815719a | ||
|
|
4db26bfa3a | ||
|
|
8d84ccb12b | ||
|
|
3321d14997 | ||
|
|
43cc4684e1 | ||
|
|
afa5a4b17c | ||
|
|
33c433fe59 | ||
|
|
9cd47fa857 | ||
|
|
32d9abe802 | ||
|
|
3947d4a165 | ||
|
|
3583d03b70 | ||
|
|
bc954b9996 | ||
|
|
c08075946a | ||
|
|
df8df914e8 | ||
|
|
33924e8491 | ||
|
|
7e5ce1d69d | ||
|
|
6a24594140 | ||
|
|
61d26cffe6 | ||
|
|
fdbc244dbe | ||
|
|
0eea84c90d | ||
|
|
e079a91800 | ||
|
|
eb20173487 | ||
|
|
20dd0779b5 | ||
|
|
b384a92f5c | ||
|
|
116d32fbbe | ||
|
|
b044f31a61 | ||
|
|
6c3c24403b | ||
|
|
591f48bb95 | ||
|
|
dc6e45485c | ||
|
|
829820479d | ||
|
|
48a471bfb8 | ||
|
|
ff72315db2 | ||
|
|
790846297a | ||
|
|
230b455a13 | ||
|
|
71f0fff55b | ||
|
|
7f2c83b9e6 | ||
|
|
bc85bd4bd4 | ||
|
|
38b09d73e4 | ||
|
|
606c4ae88c | ||
|
|
f666bac77f | ||
|
|
c9bf7da23a | ||
|
|
dfc65b93e9 | ||
|
|
9ca40b4cf5 | ||
|
|
d571e71d5e | ||
|
|
ad1e6c3fe6 | ||
|
|
21d02911dd | ||
|
|
43afe0bd9a | ||
|
|
e7a68c446d | ||
|
|
b9c68a2e7e | ||
|
|
371a1b1af3 | ||
|
|
dae4591de6 | ||
|
|
8ccb2e30ce | ||
|
|
b8106a4613 | ||
|
|
ce51e9582a | ||
|
|
00848eb631 | ||
|
|
b48430a892 | ||
|
|
f94a218561 | ||
|
|
9b6ed40875 | ||
|
|
26553dbb0e | ||
|
|
9eb695d0b4 | ||
|
|
babab17e1d | ||
|
|
d0a80f3347 | ||
|
|
9b30363177 | ||
|
|
89bde36b0c | ||
|
|
86a8476d97 | ||
|
|
afa0661e55 | ||
|
|
ba09c1277f | ||
|
|
80bf9ddb71 | ||
|
|
1dbc98d747 | ||
|
|
0698188ea2 | ||
|
|
59d0ad4505 | ||
|
|
074a5692dd | ||
|
|
bb0741146a | ||
|
|
1845d9a87a | ||
|
|
748c393e71 | ||
|
|
9bd17ea02f | ||
|
|
24f9b46fbc | ||
|
|
54b3aa1d01 | ||
|
|
d85733f22b | ||
|
|
aff6ad0316 | ||
|
|
61496fdcbc | ||
|
|
ee8975401a | ||
|
|
bf3260446d | ||
|
|
f53823b45e | ||
|
|
5cbe89afdd | ||
|
|
c466d50c3d | ||
|
|
d20b894a61 | ||
|
|
20362448b9 | ||
|
|
5df10cc494 | ||
|
|
da171114ea | ||
|
|
62919a443c | ||
|
|
ffcec91d87 | ||
|
|
0a96466b60 | ||
|
|
e48cab0276 | ||
|
|
740f6eb19f | ||
|
|
d1bb4c2c70 | ||
|
|
e545f18a45 | ||
|
|
e8cd1bb3d8 | ||
|
|
90a906e203 | ||
|
|
5546110127 | ||
|
|
73bbb12f7a | ||
|
|
dde54740c5 | ||
|
|
f70a8e2c1a | ||
|
|
fdccdd52d5 | ||
|
|
31ffd73423 | ||
|
|
3fa1012879 | ||
|
|
c2a8fbd8d6 | ||
|
|
d6643d7263 | ||
|
|
412e79d8e6 | ||
|
|
f939dbdc33 | ||
|
|
24a0ca86f5 | ||
|
|
95c30f6a8b | ||
|
|
ac7441e606 | ||
|
|
9c9af312fe | ||
|
|
7bf5927c43 | ||
|
|
32c7cdd856 | ||
|
|
bbd89d54b4 | ||
|
|
ee61006a49 | ||
|
|
0b43f5fd64 | ||
|
|
6c61266990 | ||
|
|
2d5afe8094 | ||
|
|
2430137d19 | ||
|
|
6df4ee5fc8 | ||
|
|
371742d8f9 | ||
|
|
5440c03767 | ||
|
|
73d4c4d56d |
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -19,3 +19,4 @@
|
||||
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
|
||||
- [ ] _Tests added / updated (if applicable)_
|
||||
- [ ] _Documentation added / updated (if applicable)_
|
||||
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
|
||||
|
||||
@@ -38,7 +38,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
|
||||
fi &&\
|
||||
|
||||
@@ -5,7 +5,7 @@ If you're a new contributor to InvokeAI or Open Source Projects, this is the gui
|
||||
## New Contributor Checklist
|
||||
|
||||
- [x] Set up your local development environment & fork of InvokAI by following [the steps outlined here](../dev-environment.md)
|
||||
- [x] Set up your local tooling with [this guide](InvokeAI/contributing/LOCAL_DEVELOPMENT/#developing-invokeai-in-vscode). Feel free to skip this step if you already have tooling you're comfortable with.
|
||||
- [x] Set up your local tooling with [this guide](../LOCAL_DEVELOPMENT.md). Feel free to skip this step if you already have tooling you're comfortable with.
|
||||
- [x] Familiarize yourself with [Git](https://www.atlassian.com/git) & our project structure by reading through the [development documentation](development.md)
|
||||
- [x] Join the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord
|
||||
- [x] Choose an issue to work on! This can be achieved by asking in the #dev-chat channel, tackling a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) or finding an item on the [roadmap](https://github.com/orgs/invoke-ai/projects/7). If nothing in any of those places catches your eye, feel free to work on something of interest to you!
|
||||
|
||||
@@ -17,46 +17,49 @@ If you just want to use Invoke, you should use the [installer][installer link].
|
||||
## Setup
|
||||
|
||||
1. Run through the [requirements][requirements link].
|
||||
1. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||
1. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||
1. Create a python virtual environment inside the directory you just created:
|
||||
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||
3. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||
4. Create a python virtual environment inside the directory you just created:
|
||||
|
||||
```sh
|
||||
python3 -m venv .venv --prompt InvokeAI-Dev
|
||||
```
|
||||
```sh
|
||||
python3 -m venv .venv --prompt InvokeAI-Dev
|
||||
```
|
||||
|
||||
1. Activate the venv (you'll need to do this every time you want to run the app):
|
||||
5. Activate the venv (you'll need to do this every time you want to run the app):
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
1. Install the repo as an [editable install][editable install link]:
|
||||
6. Install the repo as an [editable install][editable install link]:
|
||||
|
||||
```sh
|
||||
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
```sh
|
||||
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
|
||||
1. Install the frontend dev toolchain:
|
||||
7. Install the frontend dev toolchain:
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (recommend v20 LTS)
|
||||
- [`pnpm`](https://pnpm.io/installation#installing-a-specific-version) (must be v8 - not v9!)
|
||||
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||
|
||||
1. Do a production build of the frontend:
|
||||
8. Do a production build of the frontend:
|
||||
|
||||
```sh
|
||||
pnpm build
|
||||
```
|
||||
```sh
|
||||
cd PATH_TO_INVOKEAI_REPO/invokeai/frontend/web
|
||||
pnpm i
|
||||
pnpm build
|
||||
```
|
||||
|
||||
1. Start the application:
|
||||
9. Start the application:
|
||||
|
||||
```sh
|
||||
python scripts/invokeai-web.py
|
||||
```
|
||||
```sh
|
||||
cd PATH_TO_INVOKEAI_REPO
|
||||
python scripts/invokeai-web.py
|
||||
```
|
||||
|
||||
1. Access the UI at `localhost:9090`.
|
||||
10. Access the UI at `localhost:9090`.
|
||||
|
||||
## Updating the UI
|
||||
|
||||
|
||||
@@ -209,7 +209,7 @@ checkpoint models.
|
||||
|
||||
To solve this, go to the Model Manager tab (the cube), select the
|
||||
checkpoint model that's giving you trouble, and press the "Convert"
|
||||
button in the upper right of your browser window. This will conver the
|
||||
button in the upper right of your browser window. This will convert the
|
||||
checkpoint into a diffusers model, after which loading should be
|
||||
faster and less memory-intensive.
|
||||
|
||||
|
||||
@@ -97,16 +97,16 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
||||
sudo pacman -S --needed base-devel
|
||||
```
|
||||
|
||||
2. Install `opencv` and `blas`:
|
||||
2. Install `opencv`, `blas`, and required dependencies:
|
||||
|
||||
```sh
|
||||
sudo pacman -S opencv blas
|
||||
sudo pacman -S opencv blas fmt glew vtk hdf5
|
||||
```
|
||||
|
||||
or for CUDA support
|
||||
|
||||
```sh
|
||||
sudo pacman -S opencv-cuda blas
|
||||
sudo pacman -S opencv-cuda blas fmt glew vtk hdf5
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
@@ -99,7 +99,6 @@ their descriptions.
|
||||
| Scale Latents | Scales latents by a given factor. |
|
||||
| Segment Anything Processor | Applies segment anything processing to image |
|
||||
| Show Image | Displays a provided image, and passes it forward in the pipeline. |
|
||||
| Step Param Easing | Experimental per-step parameter easing for denoising steps |
|
||||
| String Primitive Collection | A collection of string primitive values |
|
||||
| String Primitive | A string primitive value |
|
||||
| Subtract Integers | Subtracts two numbers |
|
||||
|
||||
@@ -12,7 +12,7 @@ MINIMUM_PYTHON_VERSION=3.10.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.100
|
||||
PYTHON=""
|
||||
for candidate in python3.11 python3.10 python3 python ; do
|
||||
if ppath=`which $candidate`; then
|
||||
if ppath=`which $candidate 2>/dev/null`; then
|
||||
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
|
||||
# we check that this found executable can actually run
|
||||
if [ $($candidate --version &>/dev/null; echo ${PIPESTATUS}) -gt 0 ]; then continue; fi
|
||||
@@ -30,10 +30,11 @@ done
|
||||
if [ -z "$PYTHON" ]; then
|
||||
echo "A suitable Python interpreter could not be found"
|
||||
echo "Please install Python $MINIMUM_PYTHON_VERSION or higher (maximum $MAXIMUM_PYTHON_VERSION) before running this script. See instructions at $INSTRUCTIONS for help."
|
||||
echo "For the best user experience we suggest enlarging or maximizing this window now."
|
||||
read -p "Press any key to exit"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo "For the best user experience we suggest enlarging or maximizing this window now."
|
||||
|
||||
exec $PYTHON ./lib/main.py ${@}
|
||||
read -p "Press any key to exit"
|
||||
|
||||
@@ -245,6 +245,9 @@ class InvokeAiInstance:
|
||||
|
||||
pip = local[self.pip]
|
||||
|
||||
# Uninstall xformers if it is present; the correct version of it will be reinstalled if needed
|
||||
_ = pip["uninstall", "-yqq", "xformers"] & FG
|
||||
|
||||
pipeline = pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
@@ -407,7 +410,7 @@ def get_torch_source() -> Tuple[str | None, str | None]:
|
||||
optional_modules: str | None = None
|
||||
if OS == "Linux":
|
||||
if device == GpuType.ROCM:
|
||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||
url = "https://download.pytorch.org/whl/rocm6.1"
|
||||
elif device == GpuType.CPU:
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
elif device == GpuType.CUDA:
|
||||
|
||||
@@ -259,7 +259,7 @@ def select_gpu() -> GpuType:
|
||||
[
|
||||
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
|
||||
"",
|
||||
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
|
||||
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/installation/requirements/[/] to ensure your system meets the minimum requirements.",
|
||||
"",
|
||||
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
|
||||
]
|
||||
|
||||
@@ -68,7 +68,7 @@ do_line_input() {
|
||||
printf "2: Open the developer console\n"
|
||||
printf "3: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
printf "To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest.\n\n"
|
||||
printf "To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest\n\n"
|
||||
read -p "Please enter 1-4, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
do_choice $choice
|
||||
|
||||
@@ -40,6 +40,8 @@ class AppVersion(BaseModel):
|
||||
|
||||
version: str = Field(description="App version")
|
||||
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""FastAPI route for model configuration records."""
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import pathlib
|
||||
import shutil
|
||||
@@ -10,6 +11,7 @@ from enum import Enum
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import List, Optional, Type
|
||||
|
||||
import huggingface_hub
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse, HTMLResponse
|
||||
from fastapi.routing import APIRouter
|
||||
@@ -27,6 +29,7 @@ from invokeai.app.services.model_records import (
|
||||
ModelRecordChanges,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.util.suppress_output import SuppressOutput
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
@@ -808,7 +811,11 @@ def get_is_installed(
|
||||
for model in installed_models:
|
||||
if model.source == starter_model.source:
|
||||
return True
|
||||
if model.name == starter_model.name and model.base == starter_model.base and model.type == starter_model.type:
|
||||
if (
|
||||
(model.name == starter_model.name or model.name in starter_model.previous_names)
|
||||
and model.base == starter_model.base
|
||||
and model.type == starter_model.type
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -919,3 +926,51 @@ async def get_stats() -> Optional[CacheStats]:
|
||||
"""Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded."""
|
||||
|
||||
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats
|
||||
|
||||
|
||||
class HFTokenStatus(str, Enum):
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class HFTokenHelper:
|
||||
@classmethod
|
||||
def get_status(cls) -> HFTokenStatus:
|
||||
try:
|
||||
if huggingface_hub.get_token_permission(huggingface_hub.get_token()):
|
||||
# Valid token!
|
||||
return HFTokenStatus.VALID
|
||||
# No token set
|
||||
return HFTokenStatus.INVALID
|
||||
except Exception:
|
||||
return HFTokenStatus.UNKNOWN
|
||||
|
||||
@classmethod
|
||||
def set_token(cls, token: str) -> HFTokenStatus:
|
||||
with SuppressOutput(), contextlib.suppress(Exception):
|
||||
huggingface_hub.login(token=token, add_to_git_credential=False)
|
||||
return cls.get_status()
|
||||
|
||||
|
||||
@model_manager_router.get("/hf_login", operation_id="get_hf_login_status", response_model=HFTokenStatus)
|
||||
async def get_hf_login_status() -> HFTokenStatus:
|
||||
token_status = HFTokenHelper.get_status()
|
||||
|
||||
if token_status is HFTokenStatus.UNKNOWN:
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
||||
|
||||
@model_manager_router.post("/hf_login", operation_id="do_hf_login", response_model=HFTokenStatus)
|
||||
async def do_hf_login(
|
||||
token: str = Body(description="Hugging Face token to use for login", embed=True),
|
||||
) -> HFTokenStatus:
|
||||
HFTokenHelper.set_token(token)
|
||||
token_status = HFTokenHelper.get_status()
|
||||
|
||||
if token_status is HFTokenStatus.UNKNOWN:
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
@@ -62,6 +63,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
|
||||
- `Deprecated`: The invocation is deprecated and may be removed in a future version.
|
||||
- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.
|
||||
- `Special`: The invocation is a special case and does not fit into any of the other classifications.
|
||||
"""
|
||||
|
||||
Stable = "stable"
|
||||
@@ -69,6 +71,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
Prototype = "prototype"
|
||||
Deprecated = "deprecated"
|
||||
Internal = "internal"
|
||||
Special = "special"
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
@@ -192,12 +195,19 @@ class BaseInvocation(ABC, BaseModel):
|
||||
"""Gets a pydantc TypeAdapter for the union of all invocation types."""
|
||||
if not cls._typeadapter or cls._typeadapter_needs_update:
|
||||
AnyInvocation = TypeAliasType(
|
||||
"AnyInvocation", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")]
|
||||
"AnyInvocation", Annotated[Union[tuple(cls.get_invocations())], Field(discriminator="type")]
|
||||
)
|
||||
cls._typeadapter = TypeAdapter(AnyInvocation)
|
||||
cls._typeadapter_needs_update = False
|
||||
return cls._typeadapter
|
||||
|
||||
@classmethod
|
||||
def invalidate_typeadapter(cls) -> None:
|
||||
"""Invalidates the typeadapter, forcing it to be rebuilt on next access. If the invocation allowlist or
|
||||
denylist is changed, this should be called to ensure the typeadapter is updated and validation respects
|
||||
the updated allowlist and denylist."""
|
||||
cls._typeadapter_needs_update = True
|
||||
|
||||
@classmethod
|
||||
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||
@@ -479,6 +489,26 @@ def invocation(
|
||||
title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
)
|
||||
|
||||
# Validate the `invoke()` method is implemented
|
||||
if "invoke" in cls.__abstractmethods__:
|
||||
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
|
||||
|
||||
# And validate that `invoke()` returns a subclass of `BaseInvocationOutput
|
||||
invoke_return_annotation = signature(cls.invoke).return_annotation
|
||||
|
||||
try:
|
||||
# TODO(psyche): If `invoke()` is not defined, `return_annotation` ends up as the string "BaseInvocationOutput"
|
||||
# instead of the class `BaseInvocationOutput`. This may be a pydantic bug: https://github.com/pydantic/pydantic/issues/7978
|
||||
if isinstance(invoke_return_annotation, str):
|
||||
invoke_return_annotation = getattr(sys.modules[cls.__module__], invoke_return_annotation)
|
||||
|
||||
assert invoke_return_annotation is not BaseInvocationOutput
|
||||
assert issubclass(invoke_return_annotation, BaseInvocationOutput)
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
f'Invocation "{invocation_type}" must have a return annotation of a subclass of BaseInvocationOutput (got "{invoke_return_annotation}")'
|
||||
)
|
||||
|
||||
docstring = cls.__doc__
|
||||
cls = create_model(
|
||||
cls.__qualname__,
|
||||
|
||||
@@ -95,6 +95,7 @@ class CompelInvocation(BaseInvocation):
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, CLIPTextModel)
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
compel = Compel(
|
||||
@@ -191,6 +192,7 @@ class SDXLPromptInvocationBase:
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
|
||||
|
||||
45
invokeai/app/invocations/concatenate_images.py
Normal file
45
invokeai/app/invocations/concatenate_images.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import ImageField, InputField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation(
|
||||
"concatenate_images",
|
||||
title="Concatenate Images",
|
||||
tags=["image", "concatenate"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
)
|
||||
class ConcatenateImagesInvocation(BaseInvocation):
|
||||
"""Concatenate images horizontally or vertically."""
|
||||
|
||||
image_1: ImageField = InputField(description="The first image to concatenate.")
|
||||
image_2: ImageField = InputField(description="The second image to concatenate.")
|
||||
direction: Literal["horizontal", "vertical"] = InputField(
|
||||
default="horizontal", description="The direction along which to concatenate the images."
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# For now, we force the images to be RGB.
|
||||
image_1 = np.array(context.images.get_pil(self.image_1.image_name, "RGB"))
|
||||
image_2 = np.array(context.images.get_pil(self.image_2.image_name, "RGB"))
|
||||
|
||||
axis: int = 0
|
||||
if self.direction == "horizontal":
|
||||
axis = 1
|
||||
elif self.direction == "vertical":
|
||||
axis = 0
|
||||
else:
|
||||
raise ValueError(f"Invalid direction: {self.direction}")
|
||||
|
||||
concatenated_image = np.concatenate([image_1, image_2], axis=axis)
|
||||
|
||||
image_pil = Image.fromarray(concatenated_image, mode="RGB")
|
||||
image_dto = context.images.save(image=image_pil)
|
||||
return ImageOutput.build(image_dto)
|
||||
@@ -65,6 +65,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
# TODO:
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
|
||||
|
||||
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
||||
|
||||
@@ -131,6 +131,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
image_tensor = image_tensor.unsqueeze(0)
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(
|
||||
vae_info, self.fp32, self.tiled, masked_image.clone()
|
||||
)
|
||||
|
||||
@@ -13,6 +13,7 @@ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers.scheduling_tcd import TCDScheduler
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
|
||||
from PIL import Image
|
||||
from pydantic import field_validator
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from transformers import CLIPVisionModelWithProjection
|
||||
@@ -510,6 +511,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
context: InvocationContext,
|
||||
t2i_adapters: Optional[Union[T2IAdapterField, list[T2IAdapterField]]],
|
||||
ext_manager: ExtensionsManager,
|
||||
bgr_mode: bool = False,
|
||||
) -> None:
|
||||
if t2i_adapters is None:
|
||||
return
|
||||
@@ -519,6 +521,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
t2i_adapters = [t2i_adapters]
|
||||
|
||||
for t2i_adapter_field in t2i_adapters:
|
||||
image = context.images.get_pil(t2i_adapter_field.image.image_name)
|
||||
if bgr_mode: # SDXL t2i trained on cv2's BGR outputs, but PIL won't convert straight to BGR
|
||||
r, g, b = image.split()
|
||||
image = Image.merge("RGB", (b, g, r))
|
||||
ext_manager.add_extension(
|
||||
T2IAdapterExt(
|
||||
node_context=context,
|
||||
@@ -547,7 +553,9 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
if not isinstance(single_ipa_image_fields, list):
|
||||
single_ipa_image_fields = [single_ipa_image_fields]
|
||||
|
||||
single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields]
|
||||
single_ipa_images = [
|
||||
context.images.get_pil(image.image_name, mode="RGB") for image in single_ipa_image_fields
|
||||
]
|
||||
with image_encoder_model_info as image_encoder_model:
|
||||
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
|
||||
# Get image embeddings from CLIP and ImageProjModel.
|
||||
@@ -614,13 +622,17 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
for t2i_adapter_field in t2i_adapter:
|
||||
t2i_adapter_model_config = context.models.get_config(t2i_adapter_field.t2i_adapter_model.key)
|
||||
t2i_adapter_loaded_model = context.models.load(t2i_adapter_field.t2i_adapter_model)
|
||||
image = context.images.get_pil(t2i_adapter_field.image.image_name)
|
||||
image = context.images.get_pil(t2i_adapter_field.image.image_name, mode="RGB")
|
||||
|
||||
# The max_unet_downscale is the maximum amount that the UNet model downscales the latent image internally.
|
||||
if t2i_adapter_model_config.base == BaseModelType.StableDiffusion1:
|
||||
max_unet_downscale = 8
|
||||
elif t2i_adapter_model_config.base == BaseModelType.StableDiffusionXL:
|
||||
max_unet_downscale = 4
|
||||
|
||||
# SDXL adapters are trained on cv2's BGR outputs
|
||||
r, g, b = image.split()
|
||||
image = Image.merge("RGB", (b, g, r))
|
||||
else:
|
||||
raise ValueError(f"Unexpected T2I-Adapter base model type: '{t2i_adapter_model_config.base}'.")
|
||||
|
||||
@@ -628,29 +640,39 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
with t2i_adapter_loaded_model as t2i_adapter_model:
|
||||
total_downscale_factor = t2i_adapter_model.total_downscale_factor
|
||||
|
||||
# Resize the T2I-Adapter input image.
|
||||
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the
|
||||
# result will match the latent image's dimensions after max_unet_downscale is applied.
|
||||
t2i_input_height = latents_shape[2] // max_unet_downscale * total_downscale_factor
|
||||
t2i_input_width = latents_shape[3] // max_unet_downscale * total_downscale_factor
|
||||
|
||||
# Note: We have hard-coded `do_classifier_free_guidance=False`. This is because we only want to prepare
|
||||
# a single image. If CFG is enabled, we will duplicate the resultant tensor after applying the
|
||||
# T2I-Adapter model.
|
||||
#
|
||||
# Note: We re-use the `prepare_control_image(...)` from ControlNet for T2I-Adapter, because it has many
|
||||
# of the same requirements (e.g. preserving binary masks during resize).
|
||||
|
||||
# Assuming fixed dimensional scaling of LATENT_SCALE_FACTOR.
|
||||
_, _, latent_height, latent_width = latents_shape
|
||||
control_height_resize = latent_height * LATENT_SCALE_FACTOR
|
||||
control_width_resize = latent_width * LATENT_SCALE_FACTOR
|
||||
t2i_image = prepare_control_image(
|
||||
image=image,
|
||||
do_classifier_free_guidance=False,
|
||||
width=t2i_input_width,
|
||||
height=t2i_input_height,
|
||||
width=control_width_resize,
|
||||
height=control_height_resize,
|
||||
num_channels=t2i_adapter_model.config["in_channels"], # mypy treats this as a FrozenDict
|
||||
device=t2i_adapter_model.device,
|
||||
dtype=t2i_adapter_model.dtype,
|
||||
resize_mode=t2i_adapter_field.resize_mode,
|
||||
)
|
||||
|
||||
# Resize the T2I-Adapter input image.
|
||||
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the
|
||||
# result will match the latent image's dimensions after max_unet_downscale is applied.
|
||||
# We crop the image to this size so that the positions match the input image on non-standard resolutions
|
||||
t2i_input_height = latents_shape[2] // max_unet_downscale * total_downscale_factor
|
||||
t2i_input_width = latents_shape[3] // max_unet_downscale * total_downscale_factor
|
||||
if t2i_image.shape[2] > t2i_input_height or t2i_image.shape[3] > t2i_input_width:
|
||||
t2i_image = t2i_image[
|
||||
:, :, : min(t2i_image.shape[2], t2i_input_height), : min(t2i_image.shape[3], t2i_input_width)
|
||||
]
|
||||
|
||||
adapter_state = t2i_adapter_model(t2i_image)
|
||||
|
||||
if do_classifier_free_guidance:
|
||||
@@ -898,7 +920,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
# ext = extension_field.to_extension(exit_stack, context, ext_manager)
|
||||
# ext_manager.add_extension(ext)
|
||||
self.parse_controlnet_field(exit_stack, context, self.control, ext_manager)
|
||||
self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager)
|
||||
bgr_mode = self.unet.unet.base == BaseModelType.StableDiffusionXL
|
||||
self.parse_t2i_adapter_field(exit_stack, context, self.t2i_adapter, ext_manager, bgr_mode)
|
||||
|
||||
# ext: t2i/ip adapter
|
||||
ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx)
|
||||
|
||||
@@ -41,6 +41,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
# region Model Field Types
|
||||
MainModel = "MainModelField"
|
||||
FluxMainModel = "FluxMainModelField"
|
||||
SD3MainModel = "SD3MainModelField"
|
||||
SDXLMainModel = "SDXLMainModelField"
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
@@ -52,6 +53,8 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
T2IAdapterModel = "T2IAdapterModelField"
|
||||
T5EncoderModel = "T5EncoderModelField"
|
||||
CLIPEmbedModel = "CLIPEmbedModelField"
|
||||
CLIPLEmbedModel = "CLIPLEmbedModelField"
|
||||
CLIPGEmbedModel = "CLIPGEmbedModelField"
|
||||
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
||||
# endregion
|
||||
|
||||
@@ -131,8 +134,10 @@ class FieldDescriptions:
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
t5_encoder = "T5 tokenizer and text encoder"
|
||||
clip_embed_model = "CLIP Embed loader"
|
||||
clip_g_model = "CLIP-G Embed loader"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
transformer = "Transformer"
|
||||
mmditx = "MMDiTX"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
@@ -140,6 +145,7 @@ class FieldDescriptions:
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
flux_model = "Flux model (Transformer) to load"
|
||||
sd3_model = "SD3 model (MMDiTX) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
@@ -246,6 +252,12 @@ class FluxConditioningField(BaseModel):
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
|
||||
|
||||
class ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Callable, Iterator, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
@@ -17,6 +21,7 @@ from invokeai.app.invocations.fields import (
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.flux_controlnet import FluxControlNetField
|
||||
from invokeai.app.invocations.ip_adapter import IPAdapterField
|
||||
from invokeai.app.invocations.model import TransformerField, VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -26,6 +31,8 @@ from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.sampling_utils import (
|
||||
clip_timestep_schedule_fractional,
|
||||
@@ -49,7 +56,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.1.0",
|
||||
version="3.2.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@@ -74,6 +81,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
description=FieldDescriptions.denoising_start,
|
||||
)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
add_noise: bool = InputField(default=True, description="Add noise based on denoising start.")
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
input=Input.Connection,
|
||||
@@ -82,6 +90,24 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_text_conditioning: FluxConditioningField | None = InputField(
|
||||
default=None,
|
||||
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
||||
input=Input.Connection,
|
||||
)
|
||||
cfg_scale: float | list[float] = InputField(default=1.0, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
||||
cfg_scale_start_step: int = InputField(
|
||||
default=0,
|
||||
title="CFG Scale Start Step",
|
||||
description="Index of the first step to apply cfg_scale. Negative indices count backwards from the "
|
||||
+ "the last step (e.g. a value of -1 refers to the final step).",
|
||||
)
|
||||
cfg_scale_end_step: int = InputField(
|
||||
default=-1,
|
||||
title="CFG Scale End Step",
|
||||
description="Index of the last step to apply cfg_scale. Negative indices count backwards from the "
|
||||
+ "last step (e.g. a value of -1 refers to the final step).",
|
||||
)
|
||||
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
||||
num_steps: int = InputField(
|
||||
@@ -96,10 +122,15 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
default=None, input=Input.Connection, description="ControlNet models."
|
||||
)
|
||||
controlnet_vae: VAEField | None = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
ip_adapter: IPAdapterField | list[IPAdapterField] | None = InputField(
|
||||
description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = self._run_diffusion(context)
|
||||
@@ -108,6 +139,19 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _load_text_conditioning(
|
||||
self, context: InvocationContext, conditioning_name: str, dtype: torch.dtype
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
return t5_embeddings, clip_embeddings
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
@@ -115,13 +159,15 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=inference_dtype)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
pos_t5_embeddings, pos_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.positive_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
neg_t5_embeddings: torch.Tensor | None = None
|
||||
neg_clip_embeddings: torch.Tensor | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_t5_embeddings, neg_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.negative_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
@@ -162,9 +208,12 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"to be poor. Consider using a FLUX dev model instead."
|
||||
)
|
||||
|
||||
# Noise the orig_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
x = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
if self.add_noise:
|
||||
# Noise the orig_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
x = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
else:
|
||||
x = init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
@@ -182,8 +231,16 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
b, _c, latent_h, latent_w = x.shape
|
||||
img_ids = generate_img_ids(h=latent_h, w=latent_w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
|
||||
bs, t5_seq_len, _ = t5_embeddings.shape
|
||||
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
||||
pos_bs, pos_t5_seq_len, _ = pos_t5_embeddings.shape
|
||||
pos_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
neg_txt_ids: torch.Tensor | None = None
|
||||
if neg_t5_embeddings is not None:
|
||||
neg_bs, neg_t5_seq_len, _ = neg_t5_embeddings.shape
|
||||
neg_txt_ids = torch.zeros(
|
||||
neg_bs, neg_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
# Pack all latent tensors.
|
||||
init_latents = pack(init_latents) if init_latents is not None else None
|
||||
@@ -204,6 +261,21 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
# Compute the IP-Adapter image prompt clip embeddings.
|
||||
# We do this before loading other models to minimize peak memory.
|
||||
# TODO(ryand): We should really do this in a separate invocation to benefit from caching.
|
||||
ip_adapter_fields = self._normalize_ip_adapter_fields()
|
||||
pos_image_prompt_clip_embeds, neg_image_prompt_clip_embeds = self._prep_ip_adapter_image_prompt_clip_embeds(
|
||||
ip_adapter_fields, context
|
||||
)
|
||||
|
||||
cfg_scale = self.prep_cfg_scale(
|
||||
cfg_scale=self.cfg_scale,
|
||||
timesteps=timesteps,
|
||||
cfg_scale_start_step=self.cfg_scale_start_step,
|
||||
cfg_scale_end_step=self.cfg_scale_end_step,
|
||||
)
|
||||
|
||||
with ExitStack() as exit_stack:
|
||||
# Prepare ControlNet extensions.
|
||||
# Note: We do this before loading the transformer model to minimize peak memory (see implementation).
|
||||
@@ -252,23 +324,88 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
else:
|
||||
raise ValueError(f"Unsupported model format: {config.format}")
|
||||
|
||||
# Prepare IP-Adapter extensions.
|
||||
pos_ip_adapter_extensions, neg_ip_adapter_extensions = self._prep_ip_adapter_extensions(
|
||||
pos_image_prompt_clip_embeds=pos_image_prompt_clip_embeds,
|
||||
neg_image_prompt_clip_embeds=neg_image_prompt_clip_embeds,
|
||||
ip_adapter_fields=ip_adapter_fields,
|
||||
context=context,
|
||||
exit_stack=exit_stack,
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
x = denoise(
|
||||
model=transformer,
|
||||
img=x,
|
||||
img_ids=img_ids,
|
||||
txt=t5_embeddings,
|
||||
txt_ids=txt_ids,
|
||||
vec=clip_embeddings,
|
||||
txt=pos_t5_embeddings,
|
||||
txt_ids=pos_txt_ids,
|
||||
vec=pos_clip_embeddings,
|
||||
neg_txt=neg_t5_embeddings,
|
||||
neg_txt_ids=neg_txt_ids,
|
||||
neg_vec=neg_clip_embeddings,
|
||||
timesteps=timesteps,
|
||||
step_callback=self._build_step_callback(context),
|
||||
guidance=self.guidance,
|
||||
cfg_scale=cfg_scale,
|
||||
inpaint_extension=inpaint_extension,
|
||||
controlnet_extensions=controlnet_extensions,
|
||||
pos_ip_adapter_extensions=pos_ip_adapter_extensions,
|
||||
neg_ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
)
|
||||
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
return x
|
||||
|
||||
@classmethod
|
||||
def prep_cfg_scale(
|
||||
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
||||
) -> list[float]:
|
||||
"""Prepare the cfg_scale schedule.
|
||||
|
||||
- Clips the cfg_scale schedule based on cfg_scale_start_step and cfg_scale_end_step.
|
||||
- If cfg_scale is a list, then it is assumed to be a schedule and is returned as-is.
|
||||
- If cfg_scale is a scalar, then a linear schedule is created from cfg_scale_start_step to cfg_scale_end_step.
|
||||
"""
|
||||
# num_steps is the number of denoising steps, which is one less than the number of timesteps.
|
||||
num_steps = len(timesteps) - 1
|
||||
|
||||
# Normalize cfg_scale to a list if it is a scalar.
|
||||
cfg_scale_list: list[float]
|
||||
if isinstance(cfg_scale, float):
|
||||
cfg_scale_list = [cfg_scale] * num_steps
|
||||
elif isinstance(cfg_scale, list):
|
||||
cfg_scale_list = cfg_scale
|
||||
else:
|
||||
raise ValueError(f"Unsupported cfg_scale type: {type(cfg_scale)}")
|
||||
assert len(cfg_scale_list) == num_steps
|
||||
|
||||
# Handle negative indices for cfg_scale_start_step and cfg_scale_end_step.
|
||||
start_step_index = cfg_scale_start_step
|
||||
if start_step_index < 0:
|
||||
start_step_index = num_steps + start_step_index
|
||||
end_step_index = cfg_scale_end_step
|
||||
if end_step_index < 0:
|
||||
end_step_index = num_steps + end_step_index
|
||||
|
||||
# Validate the start and end step indices.
|
||||
if not (0 <= start_step_index < num_steps):
|
||||
raise ValueError(f"Invalid cfg_scale_start_step. Out of range: {cfg_scale_start_step}.")
|
||||
if not (0 <= end_step_index < num_steps):
|
||||
raise ValueError(f"Invalid cfg_scale_end_step. Out of range: {cfg_scale_end_step}.")
|
||||
if start_step_index > end_step_index:
|
||||
raise ValueError(
|
||||
f"cfg_scale_start_step ({cfg_scale_start_step}) must be before cfg_scale_end_step "
|
||||
+ f"({cfg_scale_end_step})."
|
||||
)
|
||||
|
||||
# Set values outside the start and end step indices to 1.0. This is equivalent to disabling cfg_scale for those
|
||||
# steps.
|
||||
clipped_cfg_scale = [1.0] * num_steps
|
||||
clipped_cfg_scale[start_step_index : end_step_index + 1] = cfg_scale_list[start_step_index : end_step_index + 1]
|
||||
|
||||
return clipped_cfg_scale
|
||||
|
||||
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
|
||||
"""Prepare the inpaint mask.
|
||||
|
||||
@@ -408,6 +545,112 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
return controlnet_extensions
|
||||
|
||||
def _normalize_ip_adapter_fields(self) -> list[IPAdapterField]:
|
||||
if self.ip_adapter is None:
|
||||
return []
|
||||
elif isinstance(self.ip_adapter, IPAdapterField):
|
||||
return [self.ip_adapter]
|
||||
elif isinstance(self.ip_adapter, list):
|
||||
return self.ip_adapter
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter type: {type(self.ip_adapter)}")
|
||||
|
||||
def _prep_ip_adapter_image_prompt_clip_embeds(
|
||||
self,
|
||||
ip_adapter_fields: list[IPAdapterField],
|
||||
context: InvocationContext,
|
||||
) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
|
||||
"""Run the IPAdapter CLIPVisionModel, returning image prompt embeddings."""
|
||||
clip_image_processor = CLIPImageProcessor()
|
||||
|
||||
pos_image_prompt_clip_embeds: list[torch.Tensor] = []
|
||||
neg_image_prompt_clip_embeds: list[torch.Tensor] = []
|
||||
for ip_adapter_field in ip_adapter_fields:
|
||||
# `ip_adapter_field.image` could be a list or a single ImageField. Normalize to a list here.
|
||||
ipa_image_fields: list[ImageField]
|
||||
if isinstance(ip_adapter_field.image, ImageField):
|
||||
ipa_image_fields = [ip_adapter_field.image]
|
||||
elif isinstance(ip_adapter_field.image, list):
|
||||
ipa_image_fields = ip_adapter_field.image
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter image type: {type(ip_adapter_field.image)}")
|
||||
|
||||
if len(ipa_image_fields) != 1:
|
||||
raise ValueError(
|
||||
f"FLUX IP-Adapter only supports a single image prompt (received {len(ipa_image_fields)})."
|
||||
)
|
||||
|
||||
ipa_images = [context.images.get_pil(image.image_name, mode="RGB") for image in ipa_image_fields]
|
||||
|
||||
pos_images: list[npt.NDArray[np.uint8]] = []
|
||||
neg_images: list[npt.NDArray[np.uint8]] = []
|
||||
for ipa_image in ipa_images:
|
||||
assert ipa_image.mode == "RGB"
|
||||
pos_image = np.array(ipa_image)
|
||||
# We use a black image as the negative image prompt for parity with
|
||||
# https://github.com/XLabs-AI/x-flux-comfyui/blob/45c834727dd2141aebc505ae4b01f193a8414e38/nodes.py#L592-L593
|
||||
# An alternative scheme would be to apply zeros_like() after calling the clip_image_processor.
|
||||
neg_image = np.zeros_like(pos_image)
|
||||
pos_images.append(pos_image)
|
||||
neg_images.append(neg_image)
|
||||
|
||||
with context.models.load(ip_adapter_field.image_encoder_model) as image_encoder_model:
|
||||
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
|
||||
|
||||
clip_image: torch.Tensor = clip_image_processor(images=pos_images, return_tensors="pt").pixel_values
|
||||
clip_image = clip_image.to(device=image_encoder_model.device, dtype=image_encoder_model.dtype)
|
||||
pos_clip_image_embeds = image_encoder_model(clip_image).image_embeds
|
||||
|
||||
clip_image = clip_image_processor(images=neg_images, return_tensors="pt").pixel_values
|
||||
clip_image = clip_image.to(device=image_encoder_model.device, dtype=image_encoder_model.dtype)
|
||||
neg_clip_image_embeds = image_encoder_model(clip_image).image_embeds
|
||||
|
||||
pos_image_prompt_clip_embeds.append(pos_clip_image_embeds)
|
||||
neg_image_prompt_clip_embeds.append(neg_clip_image_embeds)
|
||||
|
||||
return pos_image_prompt_clip_embeds, neg_image_prompt_clip_embeds
|
||||
|
||||
def _prep_ip_adapter_extensions(
|
||||
self,
|
||||
ip_adapter_fields: list[IPAdapterField],
|
||||
pos_image_prompt_clip_embeds: list[torch.Tensor],
|
||||
neg_image_prompt_clip_embeds: list[torch.Tensor],
|
||||
context: InvocationContext,
|
||||
exit_stack: ExitStack,
|
||||
dtype: torch.dtype,
|
||||
) -> tuple[list[XLabsIPAdapterExtension], list[XLabsIPAdapterExtension]]:
|
||||
pos_ip_adapter_extensions: list[XLabsIPAdapterExtension] = []
|
||||
neg_ip_adapter_extensions: list[XLabsIPAdapterExtension] = []
|
||||
for ip_adapter_field, pos_image_prompt_clip_embed, neg_image_prompt_clip_embed in zip(
|
||||
ip_adapter_fields, pos_image_prompt_clip_embeds, neg_image_prompt_clip_embeds, strict=True
|
||||
):
|
||||
ip_adapter_model = exit_stack.enter_context(context.models.load(ip_adapter_field.ip_adapter_model))
|
||||
assert isinstance(ip_adapter_model, XlabsIpAdapterFlux)
|
||||
ip_adapter_model = ip_adapter_model.to(dtype=dtype)
|
||||
if ip_adapter_field.mask is not None:
|
||||
raise ValueError("IP-Adapter masks are not yet supported in Flux.")
|
||||
ip_adapter_extension = XLabsIPAdapterExtension(
|
||||
model=ip_adapter_model,
|
||||
image_prompt_clip_embed=pos_image_prompt_clip_embed,
|
||||
weight=ip_adapter_field.weight,
|
||||
begin_step_percent=ip_adapter_field.begin_step_percent,
|
||||
end_step_percent=ip_adapter_field.end_step_percent,
|
||||
)
|
||||
ip_adapter_extension.run_image_proj(dtype=dtype)
|
||||
pos_ip_adapter_extensions.append(ip_adapter_extension)
|
||||
|
||||
ip_adapter_extension = XLabsIPAdapterExtension(
|
||||
model=ip_adapter_model,
|
||||
image_prompt_clip_embed=neg_image_prompt_clip_embed,
|
||||
weight=ip_adapter_field.weight,
|
||||
begin_step_percent=ip_adapter_field.begin_step_percent,
|
||||
end_step_percent=ip_adapter_field.end_step_percent,
|
||||
)
|
||||
ip_adapter_extension.run_image_proj(dtype=dtype)
|
||||
neg_ip_adapter_extensions.append(ip_adapter_extension)
|
||||
|
||||
return pos_ip_adapter_extensions, neg_ip_adapter_extensions
|
||||
|
||||
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in self.transformer.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
|
||||
89
invokeai/app/invocations/flux_ip_adapter.py
Normal file
89
invokeai/app/invocations/flux_ip_adapter.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from builtins import float
|
||||
from typing import List, Literal, Union
|
||||
|
||||
from pydantic import field_validator, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import InputField, UIType
|
||||
from invokeai.app.invocations.ip_adapter import (
|
||||
CLIP_VISION_MODEL_MAP,
|
||||
IPAdapterField,
|
||||
IPAdapterInvocation,
|
||||
IPAdapterOutput,
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import (
|
||||
IPAdapterCheckpointConfig,
|
||||
IPAdapterInvokeAIConfig,
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_ip_adapter",
|
||||
title="FLUX IP-Adapter",
|
||||
tags=["ip_adapter", "control"],
|
||||
category="ip_adapter",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxIPAdapterInvocation(BaseInvocation):
|
||||
"""Collects FLUX IP-Adapter info to pass to other nodes."""
|
||||
|
||||
# FLUXIPAdapterInvocation is based closely on IPAdapterInvocation, but with some unsupported features removed.
|
||||
|
||||
image: ImageField = InputField(description="The IP-Adapter image prompt(s).")
|
||||
ip_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The IP-Adapter model.", title="IP-Adapter Model", ui_type=UIType.IPAdapterModel
|
||||
)
|
||||
# Currently, the only known ViT model used by FLUX IP-Adapters is ViT-L.
|
||||
clip_vision_model: Literal["ViT-L"] = InputField(description="CLIP Vision model to use.", default="ViT-L")
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v: float) -> float:
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self) -> Self:
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
||||
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
||||
ip_adapter_info = context.models.get_config(self.ip_adapter_model.key)
|
||||
assert isinstance(ip_adapter_info, (IPAdapterInvokeAIConfig, IPAdapterCheckpointConfig))
|
||||
|
||||
# Note: There is a IPAdapterInvokeAIConfig.image_encoder_model_id field, but it isn't trustworthy.
|
||||
image_encoder_starter_model = CLIP_VISION_MODEL_MAP[self.clip_vision_model]
|
||||
image_encoder_model_id = image_encoder_starter_model.source
|
||||
image_encoder_model_name = image_encoder_starter_model.name
|
||||
image_encoder_model = IPAdapterInvocation.get_clip_image_encoder(
|
||||
context, image_encoder_model_id, image_encoder_model_name
|
||||
)
|
||||
|
||||
return IPAdapterOutput(
|
||||
ip_adapter=IPAdapterField(
|
||||
image=self.image,
|
||||
ip_adapter_model=self.ip_adapter_model,
|
||||
image_encoder_model=ModelIdentifierField.from_config(image_encoder_model),
|
||||
weight=self.weight,
|
||||
target_blocks=[], # target_blocks is currently unused for FLUX IP-Adapters.
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
mask=None, # mask is currently unused for FLUX IP-Adapters.
|
||||
),
|
||||
)
|
||||
89
invokeai/app/invocations/flux_model_loader.py
Normal file
89
invokeai/app/invocations/flux_model_loader.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from typing import Literal
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
SubModelType,
|
||||
)
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
@@ -71,6 +71,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
@@ -111,6 +112,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
||||
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
|
||||
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
||||
|
||||
@@ -41,7 +41,8 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype())
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
img = vae.decode(latents)
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
@@ -53,6 +54,7 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
context.util.signal_progress("Running VAE")
|
||||
image = self._vae_decode(vae_info=vae_info, latents=latents)
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
@@ -44,9 +44,8 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
image_tensor = image_tensor.to(
|
||||
device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
|
||||
)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
latents = vae.encode(image_tensor, sample=True, generator=generator)
|
||||
return latents
|
||||
|
||||
@@ -60,6 +59,7 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE")
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -117,6 +117,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ from invokeai.app.invocations.fields import FieldDescriptions, InputField, Outpu
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
@@ -17,6 +18,12 @@ from invokeai.backend.model_manager.config import (
|
||||
IPAdapterInvokeAIConfig,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.starter_models import (
|
||||
StarterModel,
|
||||
clip_vit_l_image_encoder,
|
||||
ip_adapter_sd_image_encoder,
|
||||
ip_adapter_sdxl_image_encoder,
|
||||
)
|
||||
|
||||
|
||||
class IPAdapterField(BaseModel):
|
||||
@@ -55,10 +62,14 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
||||
|
||||
|
||||
CLIP_VISION_MODEL_MAP = {"ViT-H": "ip_adapter_sd_image_encoder", "ViT-G": "ip_adapter_sdxl_image_encoder"}
|
||||
CLIP_VISION_MODEL_MAP: dict[Literal["ViT-L", "ViT-H", "ViT-G"], StarterModel] = {
|
||||
"ViT-L": clip_vit_l_image_encoder,
|
||||
"ViT-H": ip_adapter_sd_image_encoder,
|
||||
"ViT-G": ip_adapter_sdxl_image_encoder,
|
||||
}
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.4.1")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.5.0")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@@ -70,7 +81,7 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
ui_order=-1,
|
||||
ui_type=UIType.IPAdapterModel,
|
||||
)
|
||||
clip_vision_model: Literal["ViT-H", "ViT-G"] = InputField(
|
||||
clip_vision_model: Literal["ViT-H", "ViT-G", "ViT-L"] = InputField(
|
||||
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",
|
||||
default="ViT-H",
|
||||
ui_order=2,
|
||||
@@ -111,9 +122,11 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
image_encoder_model_id = ip_adapter_info.image_encoder_model_id
|
||||
image_encoder_model_name = image_encoder_model_id.split("/")[-1].strip()
|
||||
else:
|
||||
image_encoder_model_name = CLIP_VISION_MODEL_MAP[self.clip_vision_model]
|
||||
image_encoder_starter_model = CLIP_VISION_MODEL_MAP[self.clip_vision_model]
|
||||
image_encoder_model_id = image_encoder_starter_model.source
|
||||
image_encoder_model_name = image_encoder_starter_model.name
|
||||
|
||||
image_encoder_model = self._get_image_encoder(context, image_encoder_model_name)
|
||||
image_encoder_model = self.get_clip_image_encoder(context, image_encoder_model_id, image_encoder_model_name)
|
||||
|
||||
if self.method == "style":
|
||||
if ip_adapter_info.base == "sd-1":
|
||||
@@ -147,7 +160,10 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
),
|
||||
)
|
||||
|
||||
def _get_image_encoder(self, context: InvocationContext, image_encoder_model_name: str) -> AnyModelConfig:
|
||||
@classmethod
|
||||
def get_clip_image_encoder(
|
||||
cls, context: InvocationContext, image_encoder_model_id: str, image_encoder_model_name: str
|
||||
) -> AnyModelConfig:
|
||||
image_encoder_models = context.models.search_by_attrs(
|
||||
name=image_encoder_model_name, base=BaseModelType.Any, type=ModelType.CLIPVision
|
||||
)
|
||||
@@ -159,7 +175,11 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
installer = context._services.model_manager.install
|
||||
job = installer.heuristic_import(f"InvokeAI/{image_encoder_model_name}")
|
||||
# Note: We hard-code the type to CLIPVision here because if the model contains both a CLIPVision and a
|
||||
# CLIPText model, the probe may treat it as a CLIPText model.
|
||||
job = installer.heuristic_import(
|
||||
image_encoder_model_id, ModelRecordChanges(name=image_encoder_model_name, type=ModelType.CLIPVision)
|
||||
)
|
||||
installer.wait_for_job(job, timeout=600) # Wait for up to 10 minutes
|
||||
image_encoder_models = context.models.search_by_attrs(
|
||||
name=image_encoder_model_name, base=BaseModelType.Any, type=ModelType.CLIPVision
|
||||
|
||||
@@ -60,6 +60,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
context.util.signal_progress("Running VAE decoder")
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
latents = latents.to(vae.device)
|
||||
if self.fp32:
|
||||
|
||||
@@ -5,6 +5,7 @@ from PIL import Image
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, InvocationContext, invocation
|
||||
from invokeai.app.invocations.fields import ImageField, InputField, TensorField, WithBoard, WithMetadata
|
||||
from invokeai.app.invocations.primitives import ImageOutput, MaskOutput
|
||||
from invokeai.backend.image_util.util import pil_to_np
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -148,3 +149,55 @@ class MaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
mask_pil = Image.fromarray(mask_np, mode="L")
|
||||
image_dto = context.images.save(image=mask_pil)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation(
|
||||
"apply_tensor_mask_to_image",
|
||||
title="Apply Tensor Mask to Image",
|
||||
tags=["mask"],
|
||||
category="mask",
|
||||
version="1.0.0",
|
||||
)
|
||||
class ApplyMaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Applies a tensor mask to an image.
|
||||
|
||||
The image is converted to RGBA and the mask is applied to the alpha channel."""
|
||||
|
||||
mask: TensorField = InputField(description="The mask tensor to apply.")
|
||||
image: ImageField = InputField(description="The image to apply the mask to.")
|
||||
invert: bool = InputField(default=False, description="Whether to invert the mask.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||
mask = context.tensors.load(self.mask.tensor_name)
|
||||
|
||||
# Squeeze the channel dimension if it exists.
|
||||
if mask.dim() == 3:
|
||||
mask = mask.squeeze(0)
|
||||
|
||||
# Ensure that the mask is binary.
|
||||
if mask.dtype != torch.bool:
|
||||
mask = mask > 0.5
|
||||
mask_np = (mask.float() * 255).byte().cpu().numpy().astype(np.uint8)
|
||||
|
||||
if self.invert:
|
||||
mask_np = 255 - mask_np
|
||||
|
||||
# Apply the mask only to the alpha channel where the original alpha is non-zero. This preserves the original
|
||||
# image's transparency - else the transparent regions would end up as opaque black.
|
||||
|
||||
# Separate the image into R, G, B, and A channels
|
||||
image_np = pil_to_np(image)
|
||||
r, g, b, a = np.split(image_np, 4, axis=-1)
|
||||
|
||||
# Apply the mask to the alpha channel
|
||||
new_alpha = np.where(a.squeeze() > 0, mask_np, a.squeeze())
|
||||
|
||||
# Stack the RGB channels with the modified alpha
|
||||
masked_image_np = np.dstack([r.squeeze(), g.squeeze(), b.squeeze(), new_alpha])
|
||||
|
||||
# Convert back to an image (RGBA)
|
||||
masked_image = Image.fromarray(masked_image_np.astype(np.uint8), "RGBA")
|
||||
image_dto = context.images.save(image=masked_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -40,7 +40,7 @@ class IPAdapterMetadataField(BaseModel):
|
||||
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
clip_vision_model: Literal["ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
clip_vision_model: Literal["ViT-L", "ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
@@ -147,6 +147,10 @@ GENERATION_MODES = Literal[
|
||||
"flux_img2img",
|
||||
"flux_inpaint",
|
||||
"flux_outpaint",
|
||||
"sd3_txt2img",
|
||||
"sd3_img2img",
|
||||
"sd3_inpaint",
|
||||
"sd3_outpaint",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import copy
|
||||
from typing import List, Literal, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -13,11 +13,9 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
CheckpointConfigBase,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
@@ -139,78 +137,6 @@ class ModelIdentifierInvocation(BaseInvocation):
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
|
||||
@@ -1,43 +1,4 @@
|
||||
import io
|
||||
from typing import Literal, Optional
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
from easing_functions import (
|
||||
BackEaseIn,
|
||||
BackEaseInOut,
|
||||
BackEaseOut,
|
||||
BounceEaseIn,
|
||||
BounceEaseInOut,
|
||||
BounceEaseOut,
|
||||
CircularEaseIn,
|
||||
CircularEaseInOut,
|
||||
CircularEaseOut,
|
||||
CubicEaseIn,
|
||||
CubicEaseInOut,
|
||||
CubicEaseOut,
|
||||
ElasticEaseIn,
|
||||
ElasticEaseInOut,
|
||||
ElasticEaseOut,
|
||||
ExponentialEaseIn,
|
||||
ExponentialEaseInOut,
|
||||
ExponentialEaseOut,
|
||||
LinearInOut,
|
||||
QuadEaseIn,
|
||||
QuadEaseInOut,
|
||||
QuadEaseOut,
|
||||
QuarticEaseIn,
|
||||
QuarticEaseInOut,
|
||||
QuarticEaseOut,
|
||||
QuinticEaseIn,
|
||||
QuinticEaseInOut,
|
||||
QuinticEaseOut,
|
||||
SineEaseIn,
|
||||
SineEaseInOut,
|
||||
SineEaseOut,
|
||||
)
|
||||
from matplotlib.ticker import MaxNLocator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import InputField
|
||||
@@ -65,191 +26,3 @@ class FloatLinearRangeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
param_list = list(np.linspace(self.start, self.stop, self.steps))
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
|
||||
EASING_FUNCTIONS_MAP = {
|
||||
"Linear": LinearInOut,
|
||||
"QuadIn": QuadEaseIn,
|
||||
"QuadOut": QuadEaseOut,
|
||||
"QuadInOut": QuadEaseInOut,
|
||||
"CubicIn": CubicEaseIn,
|
||||
"CubicOut": CubicEaseOut,
|
||||
"CubicInOut": CubicEaseInOut,
|
||||
"QuarticIn": QuarticEaseIn,
|
||||
"QuarticOut": QuarticEaseOut,
|
||||
"QuarticInOut": QuarticEaseInOut,
|
||||
"QuinticIn": QuinticEaseIn,
|
||||
"QuinticOut": QuinticEaseOut,
|
||||
"QuinticInOut": QuinticEaseInOut,
|
||||
"SineIn": SineEaseIn,
|
||||
"SineOut": SineEaseOut,
|
||||
"SineInOut": SineEaseInOut,
|
||||
"CircularIn": CircularEaseIn,
|
||||
"CircularOut": CircularEaseOut,
|
||||
"CircularInOut": CircularEaseInOut,
|
||||
"ExponentialIn": ExponentialEaseIn,
|
||||
"ExponentialOut": ExponentialEaseOut,
|
||||
"ExponentialInOut": ExponentialEaseInOut,
|
||||
"ElasticIn": ElasticEaseIn,
|
||||
"ElasticOut": ElasticEaseOut,
|
||||
"ElasticInOut": ElasticEaseInOut,
|
||||
"BackIn": BackEaseIn,
|
||||
"BackOut": BackEaseOut,
|
||||
"BackInOut": BackEaseInOut,
|
||||
"BounceIn": BounceEaseIn,
|
||||
"BounceOut": BounceEaseOut,
|
||||
"BounceInOut": BounceEaseInOut,
|
||||
}
|
||||
|
||||
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
|
||||
|
||||
|
||||
# actually I think for now could just use CollectionOutput (which is list[Any]
|
||||
@invocation(
|
||||
"step_param_easing",
|
||||
title="Step Param Easing",
|
||||
tags=["step", "easing"],
|
||||
category="step",
|
||||
version="1.0.2",
|
||||
)
|
||||
class StepParamEasingInvocation(BaseInvocation):
|
||||
"""Experimental per-step parameter easing for denoising steps"""
|
||||
|
||||
easing: EASING_FUNCTION_KEYS = InputField(default="Linear", description="The easing function to use")
|
||||
num_steps: int = InputField(default=20, description="number of denoising steps")
|
||||
start_value: float = InputField(default=0.0, description="easing starting value")
|
||||
end_value: float = InputField(default=1.0, description="easing ending value")
|
||||
start_step_percent: float = InputField(default=0.0, description="fraction of steps at which to start easing")
|
||||
end_step_percent: float = InputField(default=1.0, description="fraction of steps after which to end easing")
|
||||
# if None, then start_value is used prior to easing start
|
||||
pre_start_value: Optional[float] = InputField(default=None, description="value before easing start")
|
||||
# if None, then end value is used prior to easing end
|
||||
post_end_value: Optional[float] = InputField(default=None, description="value after easing end")
|
||||
mirror: bool = InputField(default=False, description="include mirror of easing function")
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# alt_mirror: bool = InputField(default=False, description="alternative mirroring by dual easing")
|
||||
show_easing_plot: bool = InputField(default=False, description="show easing plot")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
log_diagnostics = False
|
||||
# convert from start_step_percent to nearest step <= (steps * start_step_percent)
|
||||
# start_step = int(np.floor(self.num_steps * self.start_step_percent))
|
||||
start_step = int(np.round(self.num_steps * self.start_step_percent))
|
||||
# convert from end_step_percent to nearest step >= (steps * end_step_percent)
|
||||
# end_step = int(np.ceil((self.num_steps - 1) * self.end_step_percent))
|
||||
end_step = int(np.round((self.num_steps - 1) * self.end_step_percent))
|
||||
|
||||
# end_step = int(np.ceil(self.num_steps * self.end_step_percent))
|
||||
num_easing_steps = end_step - start_step + 1
|
||||
|
||||
# num_presteps = max(start_step - 1, 0)
|
||||
num_presteps = start_step
|
||||
num_poststeps = self.num_steps - (num_presteps + num_easing_steps)
|
||||
prelist = list(num_presteps * [self.pre_start_value])
|
||||
postlist = list(num_poststeps * [self.post_end_value])
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("start_step: " + str(start_step))
|
||||
context.logger.debug("end_step: " + str(end_step))
|
||||
context.logger.debug("num_easing_steps: " + str(num_easing_steps))
|
||||
context.logger.debug("num_presteps: " + str(num_presteps))
|
||||
context.logger.debug("num_poststeps: " + str(num_poststeps))
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
context.logger.debug("prelist: " + str(prelist))
|
||||
context.logger.debug("postlist: " + str(postlist))
|
||||
|
||||
easing_class = EASING_FUNCTIONS_MAP[self.easing]
|
||||
if log_diagnostics:
|
||||
context.logger.debug("easing class: " + str(easing_class))
|
||||
easing_list = []
|
||||
if self.mirror: # "expected" mirroring
|
||||
# if number of steps is even, squeeze duration down to (number_of_steps)/2
|
||||
# and create reverse copy of list to append
|
||||
# if number of steps is odd, squeeze duration down to ceil(number_of_steps/2)
|
||||
# and create reverse copy of list[1:end-1]
|
||||
# but if even then number_of_steps/2 === ceil(number_of_steps/2), so can just use ceil always
|
||||
|
||||
base_easing_duration = int(np.ceil(num_easing_steps / 2.0))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing duration: " + str(base_easing_duration))
|
||||
even_num_steps = num_easing_steps % 2 == 0 # even number of steps
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=base_easing_duration - 1,
|
||||
)
|
||||
base_easing_vals = []
|
||||
for step_index in range(base_easing_duration):
|
||||
easing_val = easing_function.ease(step_index)
|
||||
base_easing_vals.append(easing_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val))
|
||||
if even_num_steps:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals))
|
||||
else:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals[0:-1]))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing vals: " + str(base_easing_vals))
|
||||
context.logger.debug("mirror easing vals: " + str(mirror_easing_vals))
|
||||
easing_list = base_easing_vals + mirror_easing_vals
|
||||
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# elif self.alt_mirror: # function mirroring (unintuitive behavior (at least to me))
|
||||
# # half_ease_duration = round(num_easing_steps - 1 / 2)
|
||||
# half_ease_duration = round((num_easing_steps - 1) / 2)
|
||||
# easing_function = easing_class(start=self.start_value,
|
||||
# end=self.end_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
#
|
||||
# mirror_function = easing_class(start=self.end_value,
|
||||
# end=self.start_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
# for step_index in range(num_easing_steps):
|
||||
# if step_index <= half_ease_duration:
|
||||
# step_val = easing_function.ease(step_index)
|
||||
# else:
|
||||
# step_val = mirror_function.ease(step_index - half_ease_duration)
|
||||
# easing_list.append(step_val)
|
||||
# if log_diagnostics: logger.debug(step_index, step_val)
|
||||
#
|
||||
|
||||
else: # no mirroring (default)
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=num_easing_steps - 1,
|
||||
)
|
||||
for step_index in range(num_easing_steps):
|
||||
step_val = easing_function.ease(step_index)
|
||||
easing_list.append(step_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val))
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("easing_list size: " + str(len(easing_list)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
|
||||
param_list = prelist + easing_list + postlist
|
||||
|
||||
if self.show_easing_plot:
|
||||
plt.figure()
|
||||
plt.xlabel("Step")
|
||||
plt.ylabel("Param Value")
|
||||
plt.title("Per-Step Values Based On Easing: " + self.easing)
|
||||
plt.bar(range(len(param_list)), param_list)
|
||||
# plt.plot(param_list)
|
||||
ax = plt.gca()
|
||||
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format="png")
|
||||
buf.seek(0)
|
||||
im = PIL.Image.open(buf)
|
||||
im.show()
|
||||
buf.close()
|
||||
|
||||
# output array of size steps, each entry list[i] is param value for step i
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
@@ -4,7 +4,13 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
BoundingBoxField,
|
||||
@@ -18,6 +24,7 @@ from invokeai.app.invocations.fields import (
|
||||
InputField,
|
||||
LatentsField,
|
||||
OutputField,
|
||||
SD3ConditioningField,
|
||||
TensorField,
|
||||
UIComponent,
|
||||
)
|
||||
@@ -426,6 +433,17 @@ class FluxConditioningOutput(BaseInvocationOutput):
|
||||
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("sd3_conditioning_output")
|
||||
class SD3ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single SD3 conditioning tensor"""
|
||||
|
||||
conditioning: SD3ConditioningField = OutputField(description=FieldDescriptions.cond)
|
||||
|
||||
@classmethod
|
||||
def build(cls, conditioning_name: str) -> "SD3ConditioningOutput":
|
||||
return cls(conditioning=SD3ConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("conditioning_output")
|
||||
class ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single conditioning tensor"""
|
||||
@@ -521,3 +539,23 @@ class BoundingBoxInvocation(BaseInvocation):
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "internal"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(min_length=1, description="The images to batch over", input=Input.Direct)
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
338
invokeai/app/invocations/sd3_denoise.py
Normal file
338
invokeai/app/invocations/sd3_denoise.py
Normal file
@@ -0,0 +1,338 @@
|
||||
from typing import Callable, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
SD3ConditioningField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.sd3.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_denoise",
|
||||
title="SD3 Denoise",
|
||||
tags=["image", "sd3"],
|
||||
category="image",
|
||||
version="1.1.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run denoising process with a SD3 model."""
|
||||
|
||||
# If latents is provided, this means we are doing image-to-image.
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None, description=FieldDescriptions.latents, input=Input.Connection
|
||||
)
|
||||
# denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None, description=FieldDescriptions.denoise_mask, input=Input.Connection
|
||||
)
|
||||
denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.sd3_model, input=Input.Connection, title="Transformer"
|
||||
)
|
||||
positive_conditioning: SD3ConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_conditioning: SD3ConditioningField = InputField(
|
||||
description=FieldDescriptions.negative_cond, input=Input.Connection
|
||||
)
|
||||
cfg_scale: float | list[float] = InputField(default=3.5, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
||||
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
||||
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = self._run_diffusion(context)
|
||||
latents = latents.detach().to("cpu")
|
||||
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
|
||||
"""Prepare the inpaint mask.
|
||||
- Loads the mask
|
||||
- Resizes if necessary
|
||||
- Casts to same device/dtype as latents
|
||||
|
||||
Args:
|
||||
context (InvocationContext): The invocation context, for loading the inpaint mask.
|
||||
latents (torch.Tensor): A latent image tensor. Used to determine the target shape, device, and dtype for the
|
||||
inpaint mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor | None: Inpaint mask. Values of 0.0 represent the regions to be fully denoised, and 1.0
|
||||
represent the regions to be preserved.
|
||||
"""
|
||||
if self.denoise_mask is None:
|
||||
return None
|
||||
mask = context.tensors.load(self.denoise_mask.mask_name)
|
||||
|
||||
# The input denoise_mask contains values in [0, 1], where 0.0 represents the regions to be fully denoised, and
|
||||
# 1.0 represents the regions to be preserved.
|
||||
# We invert the mask so that the regions to be preserved are 0.0 and the regions to be denoised are 1.0.
|
||||
mask = 1.0 - mask
|
||||
|
||||
_, _, latent_height, latent_width = latents.shape
|
||||
mask = tv_resize(
|
||||
img=mask,
|
||||
size=[latent_height, latent_width],
|
||||
interpolation=tv_transforms.InterpolationMode.BILINEAR,
|
||||
antialias=False,
|
||||
)
|
||||
|
||||
mask = mask.to(device=latents.device, dtype=latents.dtype)
|
||||
return mask
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
conditioning_name: str,
|
||||
joint_attention_dim: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
sd3_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(sd3_conditioning, SD3ConditioningInfo)
|
||||
sd3_conditioning = sd3_conditioning.to(dtype=dtype, device=device)
|
||||
|
||||
t5_embeds = sd3_conditioning.t5_embeds
|
||||
if t5_embeds is None:
|
||||
t5_embeds = torch.zeros(
|
||||
(1, SD3_T5_MAX_SEQ_LEN, joint_attention_dim),
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
)
|
||||
|
||||
clip_prompt_embeds = torch.cat([sd3_conditioning.clip_l_embeds, sd3_conditioning.clip_g_embeds], dim=-1)
|
||||
clip_prompt_embeds = torch.nn.functional.pad(
|
||||
clip_prompt_embeds, (0, t5_embeds.shape[-1] - clip_prompt_embeds.shape[-1])
|
||||
)
|
||||
|
||||
prompt_embeds = torch.cat([clip_prompt_embeds, t5_embeds], dim=-2)
|
||||
pooled_prompt_embeds = torch.cat(
|
||||
[sd3_conditioning.clip_l_pooled_embeds, sd3_conditioning.clip_g_pooled_embeds], dim=-1
|
||||
)
|
||||
|
||||
return prompt_embeds, pooled_prompt_embeds
|
||||
|
||||
def _get_noise(
|
||||
self,
|
||||
num_samples: int,
|
||||
num_channels_latents: int,
|
||||
height: int,
|
||||
width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
seed: int,
|
||||
) -> torch.Tensor:
|
||||
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
|
||||
rand_device = "cpu"
|
||||
rand_dtype = torch.float16
|
||||
|
||||
return torch.randn(
|
||||
num_samples,
|
||||
num_channels_latents,
|
||||
int(height) // LATENT_SCALE_FACTOR,
|
||||
int(width) // LATENT_SCALE_FACTOR,
|
||||
device=rand_device,
|
||||
dtype=rand_dtype,
|
||||
generator=torch.Generator(device=rand_device).manual_seed(seed),
|
||||
).to(device=device, dtype=dtype)
|
||||
|
||||
def _prepare_cfg_scale(self, num_timesteps: int) -> list[float]:
|
||||
"""Prepare the CFG scale list.
|
||||
|
||||
Args:
|
||||
num_timesteps (int): The number of timesteps in the scheduler. Could be different from num_steps depending
|
||||
on the scheduler used (e.g. higher order schedulers).
|
||||
|
||||
Returns:
|
||||
list[float]: _description_
|
||||
"""
|
||||
if isinstance(self.cfg_scale, float):
|
||||
cfg_scale = [self.cfg_scale] * num_timesteps
|
||||
elif isinstance(self.cfg_scale, list):
|
||||
assert len(self.cfg_scale) == num_timesteps
|
||||
cfg_scale = self.cfg_scale
|
||||
else:
|
||||
raise ValueError(f"Invalid CFG scale type: {type(self.cfg_scale)}")
|
||||
|
||||
return cfg_scale
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = TorchDevice.choose_torch_dtype()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
|
||||
# Load/process the conditioning data.
|
||||
# TODO(ryand): Make CFG optional.
|
||||
do_classifier_free_guidance = True
|
||||
pos_prompt_embeds, pos_pooled_prompt_embeds = self._load_text_conditioning(
|
||||
context=context,
|
||||
conditioning_name=self.positive_conditioning.conditioning_name,
|
||||
joint_attention_dim=transformer_info.model.config.joint_attention_dim,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
)
|
||||
neg_prompt_embeds, neg_pooled_prompt_embeds = self._load_text_conditioning(
|
||||
context=context,
|
||||
conditioning_name=self.negative_conditioning.conditioning_name,
|
||||
joint_attention_dim=transformer_info.model.config.joint_attention_dim,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
)
|
||||
# TODO(ryand): Support both sequential and batched CFG inference.
|
||||
prompt_embeds = torch.cat([neg_prompt_embeds, pos_prompt_embeds], dim=0)
|
||||
pooled_prompt_embeds = torch.cat([neg_pooled_prompt_embeds, pos_pooled_prompt_embeds], dim=0)
|
||||
|
||||
# Prepare the timestep schedule.
|
||||
# We add an extra step to the end to account for the final timestep of 0.0.
|
||||
timesteps: list[float] = torch.linspace(1, 0, self.steps + 1).tolist()
|
||||
# Clip the timesteps schedule based on denoising_start and denoising_end.
|
||||
timesteps = clip_timestep_schedule_fractional(timesteps, self.denoising_start, self.denoising_end)
|
||||
total_steps = len(timesteps) - 1
|
||||
|
||||
# Prepare the CFG scale list.
|
||||
cfg_scale = self._prepare_cfg_scale(total_steps)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
init_latents = init_latents.to(device=device, dtype=inference_dtype)
|
||||
|
||||
# Generate initial latent noise.
|
||||
num_channels_latents = transformer_info.model.config.in_channels
|
||||
assert isinstance(num_channels_latents, int)
|
||||
noise = self._get_noise(
|
||||
num_samples=1,
|
||||
num_channels_latents=num_channels_latents,
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
seed=self.seed,
|
||||
)
|
||||
|
||||
# Prepare input latent image.
|
||||
if init_latents is not None:
|
||||
# Noise the init_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
latents = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
raise ValueError("denoising_start should be 0 when initial latents are not provided.")
|
||||
latents = noise
|
||||
|
||||
# If len(timesteps) == 1, then short-circuit. We are just noising the input latents, but not taking any
|
||||
# denoising steps.
|
||||
if len(timesteps) <= 1:
|
||||
return latents
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_mask = self._prep_inpaint_mask(context, latents)
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
if inpaint_mask is not None:
|
||||
assert init_latents is not None
|
||||
inpaint_extension = InpaintExtension(
|
||||
init_latents=init_latents,
|
||||
inpaint_mask=inpaint_mask,
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
step_callback = self._build_step_callback(context)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=0,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(timesteps[0]),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
with transformer_info.model_on_device() as (cached_weights, transformer):
|
||||
assert isinstance(transformer, SD3Transformer2DModel)
|
||||
|
||||
# 6. Denoising loop
|
||||
for step_idx, (t_curr, t_prev) in tqdm(list(enumerate(zip(timesteps[:-1], timesteps[1:], strict=True)))):
|
||||
# Expand the latents if we are doing CFG.
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
# Expand the timestep to match the latent model input.
|
||||
# Multiply by 1000 to match the default FlowMatchEulerDiscreteScheduler num_train_timesteps.
|
||||
timestep = torch.tensor([t_curr * 1000], device=device).expand(latent_model_input.shape[0])
|
||||
|
||||
noise_pred = transformer(
|
||||
hidden_states=latent_model_input,
|
||||
timestep=timestep,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
pooled_projections=pooled_prompt_embeds,
|
||||
joint_attention_kwargs=None,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
# Apply CFG.
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + cfg_scale[step_idx] * (noise_pred_cond - noise_pred_uncond)
|
||||
|
||||
# Compute the previous noisy sample x_t -> x_t-1.
|
||||
latents_dtype = latents.dtype
|
||||
latents = latents.to(dtype=torch.float32)
|
||||
latents = latents + (t_prev - t_curr) * noise_pred
|
||||
latents = latents.to(dtype=latents_dtype)
|
||||
|
||||
if inpaint_extension is not None:
|
||||
latents = inpaint_extension.merge_intermediate_latents_with_init_latents(latents, t_prev)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=step_idx + 1,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(t_curr),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
return latents
|
||||
|
||||
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
context.util.sd_step_callback(state, BaseModelType.StableDiffusion3)
|
||||
|
||||
return step_callback
|
||||
65
invokeai/app/invocations/sd3_image_to_latents.py
Normal file
65
invokeai/app/invocations/sd3_image_to_latents.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import einops
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_i2l",
|
||||
title="SD3 Image to Latents",
|
||||
tags=["image", "latents", "vae", "i2l", "sd3"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates latents from an image."""
|
||||
|
||||
image: ImageField = InputField(description="The image to encode")
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
||||
with torch.inference_mode():
|
||||
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
||||
# TODO: Use seed to make sampling reproducible.
|
||||
latents: torch.Tensor = image_tensor_dist.sample().to(dtype=vae.dtype)
|
||||
|
||||
latents = vae.config.scaling_factor * latents
|
||||
|
||||
return latents
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
74
invokeai/app/invocations/sd3_latents_to_image.py
Normal file
74
invokeai/app/invocations/sd3_latents_to_image.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_l2i",
|
||||
title="SD3 Latents to Image",
|
||||
tags=["latents", "image", "vae", "l2i", "sd3"],
|
||||
category="latents",
|
||||
version="1.3.0",
|
||||
)
|
||||
class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
latents: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
context.util.signal_progress("Running VAE")
|
||||
assert isinstance(vae, (AutoencoderKL))
|
||||
latents = latents.to(vae.device)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
tiling_context = nullcontext()
|
||||
|
||||
# clear memory as vae decode can request a lot
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
with torch.inference_mode(), tiling_context:
|
||||
# copied from diffusers pipeline
|
||||
latents = latents / vae.config.scaling_factor
|
||||
img = vae.decode(latents, return_dict=False)[0]
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
img = rearrange(img[0], "c h w -> h w c") # noqa: F821
|
||||
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
image_dto = context.images.save(image=img_pil)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
108
invokeai/app/invocations/sd3_model_loader.py
Normal file
108
invokeai/app/invocations/sd3_model_loader.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
|
||||
|
||||
@invocation_output("sd3_model_loader_output")
|
||||
class Sd3ModelLoaderOutput(BaseInvocationOutput):
|
||||
"""SD3 base model loader output."""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip_l: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP L")
|
||||
clip_g: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP G")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_model_loader",
|
||||
title="SD3 Main Model",
|
||||
tags=["model", "sd3"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a SD3 base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sd3_model,
|
||||
ui_type=UIType.SD3MainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
ui_type=UIType.T5EncoderModel,
|
||||
input=Input.Direct,
|
||||
title="T5 Encoder",
|
||||
default=None,
|
||||
)
|
||||
|
||||
clip_l_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPLEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP L Encoder",
|
||||
default=None,
|
||||
)
|
||||
|
||||
clip_g_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.clip_g_model,
|
||||
ui_type=UIType.CLIPGEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP G Encoder",
|
||||
default=None,
|
||||
)
|
||||
|
||||
vae_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.VAEModel, title="VAE", default=None
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> Sd3ModelLoaderOutput:
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = (
|
||||
self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
if self.vae_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
)
|
||||
tokenizer_l = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder_l = (
|
||||
self.clip_l_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
if self.clip_l_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
)
|
||||
tokenizer_g = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
clip_encoder_g = (
|
||||
self.clip_g_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
if self.clip_g_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
)
|
||||
tokenizer_t5 = (
|
||||
self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
if self.t5_encoder_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
)
|
||||
t5_encoder = (
|
||||
self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
if self.t5_encoder_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
)
|
||||
|
||||
return Sd3ModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip_l=CLIPField(tokenizer=tokenizer_l, text_encoder=clip_encoder_l, loras=[], skipped_layers=0),
|
||||
clip_g=CLIPField(tokenizer=tokenizer_g, text_encoder=clip_encoder_g, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
201
invokeai/app/invocations/sd3_text_encoder.py
Normal file
201
invokeai/app/invocations/sd3_text_encoder.py
Normal file
@@ -0,0 +1,201 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Iterator, Tuple
|
||||
|
||||
import torch
|
||||
from transformers import (
|
||||
CLIPTextModel,
|
||||
CLIPTextModelWithProjection,
|
||||
CLIPTokenizer,
|
||||
T5EncoderModel,
|
||||
T5Tokenizer,
|
||||
T5TokenizerFast,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import SD3ConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoRAPatcher
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, SD3ConditioningInfo
|
||||
|
||||
# The SD3 T5 Max Sequence Length set based on the default in diffusers.
|
||||
SD3_T5_MAX_SEQ_LEN = 256
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_text_encoder",
|
||||
title="SD3 Text Encoding",
|
||||
tags=["prompt", "conditioning", "sd3"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
"""Encodes and preps a prompt for a SD3 image."""
|
||||
|
||||
clip_l: CLIPField = InputField(
|
||||
title="CLIP L",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
clip_g: CLIPField = InputField(
|
||||
title="CLIP G",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
# The SD3 models were trained with text encoder dropout, so the T5 encoder can be omitted to save time/memory.
|
||||
t5_encoder: T5EncoderField | None = InputField(
|
||||
title="T5Encoder",
|
||||
default=None,
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> SD3ConditioningOutput:
|
||||
# Note: The text encoding model are run in separate functions to ensure that all model references are locally
|
||||
# scoped. This ensures that earlier models can be freed and gc'd before loading later models (if necessary).
|
||||
|
||||
clip_l_embeddings, clip_l_pooled_embeddings = self._clip_encode(context, self.clip_l)
|
||||
clip_g_embeddings, clip_g_pooled_embeddings = self._clip_encode(context, self.clip_g)
|
||||
|
||||
t5_embeddings: torch.Tensor | None = None
|
||||
if self.t5_encoder is not None:
|
||||
t5_embeddings = self._t5_encode(context, SD3_T5_MAX_SEQ_LEN)
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
SD3ConditioningInfo(
|
||||
clip_l_embeds=clip_l_embeddings,
|
||||
clip_l_pooled_embeds=clip_l_pooled_embeddings,
|
||||
clip_g_embeds=clip_g_embeddings,
|
||||
clip_g_pooled_embeds=clip_g_pooled_embeddings,
|
||||
t5_embeds=t5_embeddings,
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return SD3ConditioningOutput.build(conditioning_name)
|
||||
|
||||
def _t5_encode(self, context: InvocationContext, max_seq_len: int) -> torch.Tensor:
|
||||
assert self.t5_encoder is not None
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
):
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
|
||||
|
||||
text_inputs = t5_tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=max_seq_len,
|
||||
truncation=True,
|
||||
add_special_tokens=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = t5_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
assert isinstance(text_input_ids, torch.Tensor)
|
||||
assert isinstance(untruncated_ids, torch.Tensor)
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = t5_tokenizer.batch_decode(untruncated_ids[:, max_seq_len - 1 : -1])
|
||||
context.logger.warning(
|
||||
"The following part of your input was truncated because `max_sequence_length` is set to "
|
||||
f" {max_seq_len} tokens: {removed_text}"
|
||||
)
|
||||
|
||||
prompt_embeds = t5_text_encoder(text_input_ids.to(t5_text_encoder.device))[0]
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
|
||||
def _clip_encode(
|
||||
self, context: InvocationContext, clip_model: CLIPField, tokenizer_max_length: int = 77
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
clip_tokenizer_info = context.models.load(clip_model.tokenizer)
|
||||
clip_text_encoder_info = context.models.load(clip_model.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
clip_text_encoder_info.model_on_device() as (cached_weights, clip_text_encoder),
|
||||
clip_tokenizer_info as clip_tokenizer,
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
assert isinstance(clip_text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(clip_tokenizer, CLIPTokenizer)
|
||||
|
||||
clip_text_encoder_config = clip_text_encoder_info.config
|
||||
assert clip_text_encoder_config is not None
|
||||
|
||||
# Apply LoRA models to the CLIP encoder.
|
||||
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
|
||||
if clip_text_encoder_config.format in [ModelFormat.Diffusers]:
|
||||
# The model is non-quantized, so we can apply the LoRA weights directly into the model.
|
||||
exit_stack.enter_context(
|
||||
LoRAPatcher.apply_lora_patches(
|
||||
model=clip_text_encoder,
|
||||
patches=self._clip_lora_iterator(context, clip_model),
|
||||
prefix=FLUX_LORA_CLIP_PREFIX,
|
||||
cached_weights=cached_weights,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# There are currently no supported CLIP quantized models. Add support here if needed.
|
||||
raise ValueError(f"Unsupported model format: {clip_text_encoder_config.format}")
|
||||
|
||||
clip_text_encoder = clip_text_encoder.eval().requires_grad_(False)
|
||||
|
||||
text_inputs = clip_tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=tokenizer_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
assert isinstance(text_input_ids, torch.Tensor)
|
||||
assert isinstance(untruncated_ids, torch.Tensor)
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = clip_tokenizer.batch_decode(untruncated_ids[:, tokenizer_max_length - 1 : -1])
|
||||
context.logger.warning(
|
||||
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
||||
f" {tokenizer_max_length} tokens: {removed_text}"
|
||||
)
|
||||
prompt_embeds = clip_text_encoder(
|
||||
input_ids=text_input_ids.to(clip_text_encoder.device), output_hidden_states=True
|
||||
)
|
||||
pooled_prompt_embeds = prompt_embeds[0]
|
||||
prompt_embeds = prompt_embeds.hidden_states[-2]
|
||||
|
||||
return prompt_embeds, pooled_prompt_embeds
|
||||
|
||||
def _clip_lora_iterator(
|
||||
self, context: InvocationContext, clip_model: CLIPField
|
||||
) -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in clip_model.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, LoRAModelRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
@@ -1,9 +1,11 @@
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from transformers import AutoModelForMaskGeneration, AutoProcessor
|
||||
from transformers.models.sam import SamModel
|
||||
from transformers.models.sam.processing_sam import SamProcessor
|
||||
@@ -23,12 +25,31 @@ SEGMENT_ANYTHING_MODEL_IDS: dict[SegmentAnythingModelKey, str] = {
|
||||
}
|
||||
|
||||
|
||||
class SAMPointLabel(Enum):
|
||||
negative = -1
|
||||
neutral = 0
|
||||
positive = 1
|
||||
|
||||
|
||||
class SAMPoint(BaseModel):
|
||||
x: int = Field(..., description="The x-coordinate of the point")
|
||||
y: int = Field(..., description="The y-coordinate of the point")
|
||||
label: SAMPointLabel = Field(..., description="The label of the point")
|
||||
|
||||
|
||||
class SAMPointsField(BaseModel):
|
||||
points: list[SAMPoint] = Field(..., description="The points of the object")
|
||||
|
||||
def to_list(self) -> list[list[int]]:
|
||||
return [[point.x, point.y, point.label.value] for point in self.points]
|
||||
|
||||
|
||||
@invocation(
|
||||
"segment_anything",
|
||||
title="Segment Anything",
|
||||
tags=["prompt", "segmentation"],
|
||||
category="segmentation",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class SegmentAnythingInvocation(BaseInvocation):
|
||||
"""Runs a Segment Anything Model."""
|
||||
@@ -40,7 +61,13 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
|
||||
model: SegmentAnythingModelKey = InputField(description="The Segment Anything model to use.")
|
||||
image: ImageField = InputField(description="The image to segment.")
|
||||
bounding_boxes: list[BoundingBoxField] = InputField(description="The bounding boxes to prompt the SAM model with.")
|
||||
bounding_boxes: list[BoundingBoxField] | None = InputField(
|
||||
default=None, description="The bounding boxes to prompt the SAM model with."
|
||||
)
|
||||
point_lists: list[SAMPointsField] | None = InputField(
|
||||
default=None,
|
||||
description="The list of point lists to prompt the SAM model with. Each list of points represents a single object.",
|
||||
)
|
||||
apply_polygon_refinement: bool = InputField(
|
||||
description="Whether to apply polygon refinement to the masks. This will smooth the edges of the masks slightly and ensure that each mask consists of a single closed polygon (before merging).",
|
||||
default=True,
|
||||
@@ -55,7 +82,12 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
# The models expect a 3-channel RGB image.
|
||||
image_pil = context.images.get_pil(self.image.image_name, mode="RGB")
|
||||
|
||||
if len(self.bounding_boxes) == 0:
|
||||
if self.point_lists is not None and self.bounding_boxes is not None:
|
||||
raise ValueError("Only one of point_lists or bounding_box can be provided.")
|
||||
|
||||
if (not self.bounding_boxes or len(self.bounding_boxes) == 0) and (
|
||||
not self.point_lists or len(self.point_lists) == 0
|
||||
):
|
||||
combined_mask = torch.zeros(image_pil.size[::-1], dtype=torch.bool)
|
||||
else:
|
||||
masks = self._segment(context=context, image=image_pil)
|
||||
@@ -83,14 +115,13 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
assert isinstance(sam_processor, SamProcessor)
|
||||
return SegmentAnythingPipeline(sam_model=sam_model, sam_processor=sam_processor)
|
||||
|
||||
def _segment(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
image: Image.Image,
|
||||
) -> list[torch.Tensor]:
|
||||
def _segment(self, context: InvocationContext, image: Image.Image) -> list[torch.Tensor]:
|
||||
"""Use Segment Anything (SAM) to generate masks given an image + a set of bounding boxes."""
|
||||
# Convert the bounding boxes to the SAM input format.
|
||||
sam_bounding_boxes = [[bb.x_min, bb.y_min, bb.x_max, bb.y_max] for bb in self.bounding_boxes]
|
||||
sam_bounding_boxes = (
|
||||
[[bb.x_min, bb.y_min, bb.x_max, bb.y_max] for bb in self.bounding_boxes] if self.bounding_boxes else None
|
||||
)
|
||||
sam_points = [p.to_list() for p in self.point_lists] if self.point_lists else None
|
||||
|
||||
with (
|
||||
context.models.load_remote_model(
|
||||
@@ -98,7 +129,7 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
) as sam_pipeline,
|
||||
):
|
||||
assert isinstance(sam_pipeline, SegmentAnythingPipeline)
|
||||
masks = sam_pipeline.segment(image=image, bounding_boxes=sam_bounding_boxes)
|
||||
masks = sam_pipeline.segment(image=image, bounding_boxes=sam_bounding_boxes, point_lists=sam_points)
|
||||
|
||||
masks = self._process_masks(masks)
|
||||
if self.apply_polygon_refinement:
|
||||
@@ -141,9 +172,10 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
|
||||
return masks
|
||||
|
||||
def _filter_masks(self, masks: list[torch.Tensor], bounding_boxes: list[BoundingBoxField]) -> list[torch.Tensor]:
|
||||
def _filter_masks(
|
||||
self, masks: list[torch.Tensor], bounding_boxes: list[BoundingBoxField] | None
|
||||
) -> list[torch.Tensor]:
|
||||
"""Filter the detected masks based on the specified mask filter."""
|
||||
assert len(masks) == len(bounding_boxes)
|
||||
|
||||
if self.mask_filter == "all":
|
||||
return masks
|
||||
@@ -151,6 +183,10 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
# Find the largest mask.
|
||||
return [max(masks, key=lambda x: float(x.sum()))]
|
||||
elif self.mask_filter == "highest_box_score":
|
||||
assert (
|
||||
bounding_boxes is not None
|
||||
), "Bounding boxes must be provided to use the 'highest_box_score' mask filter."
|
||||
assert len(masks) == len(bounding_boxes)
|
||||
# Find the index of the bounding box with the highest score.
|
||||
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most
|
||||
# cases the scores should all be non-None when using this filtering mode. That being said, -1.0 is a
|
||||
|
||||
@@ -110,15 +110,26 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
except Exception as e:
|
||||
raise ImageFileDeleteException from e
|
||||
|
||||
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||
def get_path(self, image_name: str, thumbnail: bool = False) -> Path:
|
||||
path = self.__output_folder / image_name
|
||||
base_folder = self.__thumbnails_folder if thumbnail else self.__output_folder
|
||||
filename = get_thumbnail_name(image_name) if thumbnail else image_name
|
||||
|
||||
if thumbnail:
|
||||
thumbnail_name = get_thumbnail_name(image_name)
|
||||
path = self.__thumbnails_folder / thumbnail_name
|
||||
# Strip any path information from the filename
|
||||
basename = Path(filename).name
|
||||
|
||||
return path
|
||||
if basename != filename:
|
||||
raise ValueError("Invalid image name, potential directory traversal detected")
|
||||
|
||||
image_path = base_folder / basename
|
||||
|
||||
# Ensure the image path is within the base folder to prevent directory traversal
|
||||
resolved_base = base_folder.resolve()
|
||||
resolved_image_path = image_path.resolve()
|
||||
|
||||
if not resolved_image_path.is_relative_to(resolved_base):
|
||||
raise ValueError("Image path outside outputs folder, potential directory traversal detected")
|
||||
|
||||
return resolved_image_path
|
||||
|
||||
def validate_path(self, path: Union[str, Path]) -> bool:
|
||||
"""Validates the path given for an image or thumbnail."""
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ClipVariantType,
|
||||
ControlAdapterDefaultSettings,
|
||||
MainModelDefaultSettings,
|
||||
ModelFormat,
|
||||
@@ -85,7 +86,7 @@ class ModelRecordChanges(BaseModelExcludeNull):
|
||||
|
||||
# Checkpoint-specific changes
|
||||
# TODO(MM2): Should we expose these? Feels footgun-y...
|
||||
variant: Optional[ModelVariantType] = Field(description="The variant of the model.", default=None)
|
||||
variant: Optional[ModelVariantType | ClipVariantType] = Field(description="The variant of the model.", default=None)
|
||||
prediction_type: Optional[SchedulerPredictionType] = Field(
|
||||
description="The prediction type of the model.", default=None
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ from pydantic import (
|
||||
from pydantic_core import to_jsonable_python
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.invocations.fields import ImageField
|
||||
from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowWithoutID,
|
||||
@@ -51,11 +52,7 @@ class SessionQueueItemNotFoundError(ValueError):
|
||||
|
||||
# region Batch
|
||||
|
||||
BatchDataType = Union[
|
||||
StrictStr,
|
||||
float,
|
||||
int,
|
||||
]
|
||||
BatchDataType = Union[StrictStr, float, int, ImageField]
|
||||
|
||||
|
||||
class NodeFieldValue(BaseModel):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Callable, Optional, Union
|
||||
@@ -159,6 +160,10 @@ class LoggerInterface(InvocationContextInterface):
|
||||
|
||||
|
||||
class ImagesInterface(InvocationContextInterface):
|
||||
def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
|
||||
super().__init__(services, data)
|
||||
self._util = util
|
||||
|
||||
def save(
|
||||
self,
|
||||
image: Image,
|
||||
@@ -185,6 +190,8 @@ class ImagesInterface(InvocationContextInterface):
|
||||
The saved image DTO.
|
||||
"""
|
||||
|
||||
self._util.signal_progress("Saving image")
|
||||
|
||||
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
|
||||
metadata_ = None
|
||||
if metadata:
|
||||
@@ -221,7 +228,7 @@ class ImagesInterface(InvocationContextInterface):
|
||||
)
|
||||
|
||||
def get_pil(self, image_name: str, mode: IMAGE_MODES | None = None) -> Image:
|
||||
"""Gets an image as a PIL Image object.
|
||||
"""Gets an image as a PIL Image object. This method returns a copy of the image.
|
||||
|
||||
Args:
|
||||
image_name: The name of the image to get.
|
||||
@@ -233,11 +240,15 @@ class ImagesInterface(InvocationContextInterface):
|
||||
image = self._services.images.get_pil_image(image_name)
|
||||
if mode and mode != image.mode:
|
||||
try:
|
||||
# convert makes a copy!
|
||||
image = image.convert(mode)
|
||||
except ValueError:
|
||||
self._services.logger.warning(
|
||||
f"Could not convert image from {image.mode} to {mode}. Using original mode instead."
|
||||
)
|
||||
else:
|
||||
# copy the image to prevent the user from modifying the original
|
||||
image = image.copy()
|
||||
return image
|
||||
|
||||
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
|
||||
@@ -290,15 +301,15 @@ class TensorsInterface(InvocationContextInterface):
|
||||
return name
|
||||
|
||||
def load(self, name: str) -> Tensor:
|
||||
"""Loads a tensor by name.
|
||||
"""Loads a tensor by name. This method returns a copy of the tensor.
|
||||
|
||||
Args:
|
||||
name: The name of the tensor to load.
|
||||
|
||||
Returns:
|
||||
The loaded tensor.
|
||||
The tensor.
|
||||
"""
|
||||
return self._services.tensors.load(name)
|
||||
return self._services.tensors.load(name).clone()
|
||||
|
||||
|
||||
class ConditioningInterface(InvocationContextInterface):
|
||||
@@ -316,21 +327,25 @@ class ConditioningInterface(InvocationContextInterface):
|
||||
return name
|
||||
|
||||
def load(self, name: str) -> ConditioningFieldData:
|
||||
"""Loads conditioning data by name.
|
||||
"""Loads conditioning data by name. This method returns a copy of the conditioning data.
|
||||
|
||||
Args:
|
||||
name: The name of the conditioning data to load.
|
||||
|
||||
Returns:
|
||||
The loaded conditioning data.
|
||||
The conditioning data.
|
||||
"""
|
||||
|
||||
return self._services.conditioning.load(name)
|
||||
return deepcopy(self._services.conditioning.load(name))
|
||||
|
||||
|
||||
class ModelsInterface(InvocationContextInterface):
|
||||
"""Common API for loading, downloading and managing models."""
|
||||
|
||||
def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
|
||||
super().__init__(services, data)
|
||||
self._util = util
|
||||
|
||||
def exists(self, identifier: Union[str, "ModelIdentifierField"]) -> bool:
|
||||
"""Check if a model exists.
|
||||
|
||||
@@ -363,11 +378,15 @@ class ModelsInterface(InvocationContextInterface):
|
||||
|
||||
if isinstance(identifier, str):
|
||||
model = self._services.model_manager.store.get_model(identifier)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
else:
|
||||
_submodel_type = submodel_type or identifier.submodel_type
|
||||
submodel_type = submodel_type or identifier.submodel_type
|
||||
model = self._services.model_manager.store.get_model(identifier.key)
|
||||
return self._services.model_manager.load.load_model(model, _submodel_type)
|
||||
|
||||
message = f"Loading model {model.name}"
|
||||
if submodel_type:
|
||||
message += f" ({submodel_type.value})"
|
||||
self._util.signal_progress(message)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
|
||||
def load_by_attrs(
|
||||
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
|
||||
@@ -392,6 +411,10 @@ class ModelsInterface(InvocationContextInterface):
|
||||
if len(configs) > 1:
|
||||
raise ValueError(f"More than one model found with name {name}, base {base}, and type {type}")
|
||||
|
||||
message = f"Loading model {name}"
|
||||
if submodel_type:
|
||||
message += f" ({submodel_type.value})"
|
||||
self._util.signal_progress(message)
|
||||
return self._services.model_manager.load.load_model(configs[0], submodel_type)
|
||||
|
||||
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
|
||||
@@ -462,6 +485,7 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
Path to the downloaded model
|
||||
"""
|
||||
self._util.signal_progress(f"Downloading model {source}")
|
||||
return self._services.model_manager.install.download_and_cache_model(source=source)
|
||||
|
||||
def load_local_model(
|
||||
@@ -484,6 +508,8 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
A LoadedModelWithoutConfig object.
|
||||
"""
|
||||
|
||||
self._util.signal_progress(f"Loading model {model_path.name}")
|
||||
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
|
||||
|
||||
def load_remote_model(
|
||||
@@ -509,6 +535,8 @@ class ModelsInterface(InvocationContextInterface):
|
||||
A LoadedModelWithoutConfig object.
|
||||
"""
|
||||
model_path = self._services.model_manager.install.download_and_cache_model(source=str(source))
|
||||
|
||||
self._util.signal_progress(f"Loading model {source}")
|
||||
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
|
||||
|
||||
|
||||
@@ -702,12 +730,12 @@ def build_invocation_context(
|
||||
"""
|
||||
|
||||
logger = LoggerInterface(services=services, data=data)
|
||||
images = ImagesInterface(services=services, data=data)
|
||||
tensors = TensorsInterface(services=services, data=data)
|
||||
models = ModelsInterface(services=services, data=data)
|
||||
config = ConfigInterface(services=services, data=data)
|
||||
util = UtilInterface(services=services, data=data, is_canceled=is_canceled)
|
||||
conditioning = ConditioningInterface(services=services, data=data)
|
||||
models = ModelsInterface(services=services, data=data, util=util)
|
||||
images = ImagesInterface(services=services, data=data, util=util)
|
||||
boards = BoardsInterface(services=services, data=data)
|
||||
|
||||
ctx = InvocationContext(
|
||||
|
||||
@@ -0,0 +1,382 @@
|
||||
{
|
||||
"name": "SD3.5 Text to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 3.5",
|
||||
"version": "1.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD3.5, default",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"fieldName": "prompt"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"id": "e3a51d6b-8208-4d6d-b187-fcfe8b32934c",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "sd3_model_loader",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "f7b20be9-92a8-4cfb-bca4-6c3b5535c10b",
|
||||
"hash": "placeholder",
|
||||
"name": "stable-diffusion-3.5-medium",
|
||||
"base": "sd-3",
|
||||
"type": "main"
|
||||
}
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_l_model": {
|
||||
"name": "clip_l_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g_model": {
|
||||
"name": "clip_g_model",
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": -55.58689609637031,
|
||||
"y": -111.53602444662268
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 470.45870147220353,
|
||||
"y": 350.3141781644303
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "sd3_l2i",
|
||||
"version": "1.3.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1192.3097009334897,
|
||||
"y": -366.0994675072209
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 408.16054647924784,
|
||||
"y": 65.06415352118786
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 378.9283412440941,
|
||||
"y": -302.65777497352553
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "sd3_denoise",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"positive_conditioning": {
|
||||
"name": "positive_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"name": "negative_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"cfg_scale": {
|
||||
"name": "cfg_scale",
|
||||
"label": "",
|
||||
"value": 3.5
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"label": "",
|
||||
"value": 30
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 813.7814762740603,
|
||||
"y": -142.20529727605867
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cvae-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48bvae",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-3b4f7f27-cfc0-4373-a009-99c5290d0cd6t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-e17d34e7-6ed1-493c-9a85-4fcd291cb084t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ctransformer-c7539f7b-7ac5-49b9-93eb-87ede611409ftransformer",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f7e394ac-6394-4096-abcb-de0d346506b3value-c7539f7b-7ac5-49b9-93eb-87ede611409fseed",
|
||||
"type": "default",
|
||||
"source": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c7539f7b-7ac5-49b9-93eb-87ede611409flatents-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48blatents",
|
||||
"type": "default",
|
||||
"source": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-e17d34e7-6ed1-493c-9a85-4fcd291cb084conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3b4f7f27-cfc0-4373-a009-99c5290d0cd6conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -34,6 +34,25 @@ SD1_5_LATENT_RGB_FACTORS = [
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
]
|
||||
|
||||
SD3_5_LATENT_RGB_FACTORS = [
|
||||
[-0.05240681, 0.03251581, 0.0749016],
|
||||
[-0.0580572, 0.00759826, 0.05729818],
|
||||
[0.16144888, 0.01270368, -0.03768577],
|
||||
[0.14418615, 0.08460266, 0.15941818],
|
||||
[0.04894035, 0.0056485, -0.06686988],
|
||||
[0.05187166, 0.19222395, 0.06261094],
|
||||
[0.1539433, 0.04818359, 0.07103094],
|
||||
[-0.08601796, 0.09013458, 0.10893912],
|
||||
[-0.12398469, -0.06766567, 0.0033688],
|
||||
[-0.0439737, 0.07825329, 0.02258823],
|
||||
[0.03101129, 0.06382551, 0.07753657],
|
||||
[-0.01315361, 0.08554491, -0.08772475],
|
||||
[0.06464487, 0.05914605, 0.13262741],
|
||||
[-0.07863674, -0.02261737, -0.12761454],
|
||||
[-0.09923835, -0.08010759, -0.06264447],
|
||||
[-0.03392309, -0.0804029, -0.06078822],
|
||||
]
|
||||
|
||||
FLUX_LATENT_RGB_FACTORS = [
|
||||
[-0.0412, 0.0149, 0.0521],
|
||||
[0.0056, 0.0291, 0.0768],
|
||||
@@ -110,6 +129,9 @@ def stable_diffusion_step_callback(
|
||||
sdxl_latent_rgb_factors = torch.tensor(SDXL_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
sdxl_smooth_matrix = torch.tensor(SDXL_SMOOTH_MATRIX, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix)
|
||||
elif base_model == BaseModelType.StableDiffusion3:
|
||||
sd3_latent_rgb_factors = torch.tensor(SD3_5_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, sd3_latent_rgb_factors)
|
||||
else:
|
||||
v1_5_latent_rgb_factors = torch.tensor(SD1_5_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors)
|
||||
|
||||
83
invokeai/backend/flux/custom_block_processor.py
Normal file
83
invokeai/backend/flux/custom_block_processor.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import einops
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.math import attention
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
|
||||
|
||||
class CustomDoubleStreamBlockProcessor:
|
||||
"""A class containing a custom implementation of DoubleStreamBlock.forward() with additional features
|
||||
(IP-Adapter, etc.).
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _double_stream_block_forward(
|
||||
block: DoubleStreamBlock, img: torch.Tensor, txt: torch.Tensor, vec: torch.Tensor, pe: torch.Tensor
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""This function is a direct copy of DoubleStreamBlock.forward(), but it returns some of the intermediate
|
||||
values.
|
||||
"""
|
||||
img_mod1, img_mod2 = block.img_mod(vec)
|
||||
txt_mod1, txt_mod2 = block.txt_mod(vec)
|
||||
|
||||
# prepare image for attention
|
||||
img_modulated = block.img_norm1(img)
|
||||
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
|
||||
img_qkv = block.img_attn.qkv(img_modulated)
|
||||
img_q, img_k, img_v = einops.rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=block.num_heads)
|
||||
img_q, img_k = block.img_attn.norm(img_q, img_k, img_v)
|
||||
|
||||
# prepare txt for attention
|
||||
txt_modulated = block.txt_norm1(txt)
|
||||
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
|
||||
txt_qkv = block.txt_attn.qkv(txt_modulated)
|
||||
txt_q, txt_k, txt_v = einops.rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=block.num_heads)
|
||||
txt_q, txt_k = block.txt_attn.norm(txt_q, txt_k, txt_v)
|
||||
|
||||
# run actual attention
|
||||
q = torch.cat((txt_q, img_q), dim=2)
|
||||
k = torch.cat((txt_k, img_k), dim=2)
|
||||
v = torch.cat((txt_v, img_v), dim=2)
|
||||
|
||||
attn = attention(q, k, v, pe=pe)
|
||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
||||
|
||||
# calculate the img bloks
|
||||
img = img + img_mod1.gate * block.img_attn.proj(img_attn)
|
||||
img = img + img_mod2.gate * block.img_mlp((1 + img_mod2.scale) * block.img_norm2(img) + img_mod2.shift)
|
||||
|
||||
# calculate the txt bloks
|
||||
txt = txt + txt_mod1.gate * block.txt_attn.proj(txt_attn)
|
||||
txt = txt + txt_mod2.gate * block.txt_mlp((1 + txt_mod2.scale) * block.txt_norm2(txt) + txt_mod2.shift)
|
||||
return img, txt, img_q
|
||||
|
||||
@staticmethod
|
||||
def custom_double_block_forward(
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
block_index: int,
|
||||
block: DoubleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""A custom implementation of DoubleStreamBlock.forward() with additional features:
|
||||
- IP-Adapter support
|
||||
"""
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(block, img, txt, vec, pe)
|
||||
|
||||
# Apply IP-Adapter conditioning.
|
||||
for ip_adapter_extension in ip_adapter_extensions:
|
||||
img = ip_adapter_extension.run_ip_adapter(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
block_index=block_index,
|
||||
block=block,
|
||||
img_q=img_q,
|
||||
img=img,
|
||||
)
|
||||
|
||||
return img, txt
|
||||
@@ -1,3 +1,4 @@
|
||||
import math
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
@@ -7,6 +8,7 @@ from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFl
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
|
||||
@@ -16,15 +18,23 @@ def denoise(
|
||||
# model input
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
# positive text conditioning
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
# negative text conditioning
|
||||
neg_txt: torch.Tensor | None,
|
||||
neg_txt_ids: torch.Tensor | None,
|
||||
neg_vec: torch.Tensor | None,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[PipelineIntermediateState], None],
|
||||
guidance: float,
|
||||
cfg_scale: list[float],
|
||||
inpaint_extension: InpaintExtension | None,
|
||||
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension],
|
||||
pos_ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
neg_ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
):
|
||||
# step 0 is the initial state
|
||||
total_steps = len(timesteps) - 1
|
||||
@@ -37,10 +47,9 @@ def denoise(
|
||||
latents=img,
|
||||
),
|
||||
)
|
||||
step = 1
|
||||
# guidance_vec is ignored for schnell.
|
||||
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
||||
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
|
||||
for step_index, (t_curr, t_prev) in tqdm(list(enumerate(zip(timesteps[:-1], timesteps[1:], strict=True)))):
|
||||
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
||||
|
||||
# Run ControlNet models.
|
||||
@@ -48,7 +57,7 @@ def denoise(
|
||||
for controlnet_extension in controlnet_extensions:
|
||||
controlnet_residuals.append(
|
||||
controlnet_extension.run_controlnet(
|
||||
timestep_index=step - 1,
|
||||
timestep_index=step_index,
|
||||
total_num_timesteps=total_steps,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
@@ -61,7 +70,7 @@ def denoise(
|
||||
)
|
||||
|
||||
# Merge the ControlNet residuals from multiple ControlNets.
|
||||
# TODO(ryand): We may want to alculate the sum just-in-time to keep peak memory low. Keep in mind, that the
|
||||
# TODO(ryand): We may want to calculate the sum just-in-time to keep peak memory low. Keep in mind, that the
|
||||
# controlnet_residuals datastructure is efficient in that it likely contains multiple references to the same
|
||||
# tensors. Calculating the sum materializes each tensor into its own instance.
|
||||
merged_controlnet_residuals = sum_controlnet_flux_outputs(controlnet_residuals)
|
||||
@@ -74,10 +83,39 @@ def denoise(
|
||||
y=vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
total_num_timesteps=total_steps,
|
||||
controlnet_double_block_residuals=merged_controlnet_residuals.double_block_residuals,
|
||||
controlnet_single_block_residuals=merged_controlnet_residuals.single_block_residuals,
|
||||
ip_adapter_extensions=pos_ip_adapter_extensions,
|
||||
)
|
||||
|
||||
step_cfg_scale = cfg_scale[step_index]
|
||||
|
||||
# If step_cfg_scale, is 1.0, then we don't need to run the negative prediction.
|
||||
if not math.isclose(step_cfg_scale, 1.0):
|
||||
# TODO(ryand): Add option to run positive and negative predictions in a single batch for better performance
|
||||
# on systems with sufficient VRAM.
|
||||
|
||||
if neg_txt is None or neg_txt_ids is None or neg_vec is None:
|
||||
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
|
||||
|
||||
neg_pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=neg_txt,
|
||||
txt_ids=neg_txt_ids,
|
||||
y=neg_vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
total_num_timesteps=total_steps,
|
||||
controlnet_double_block_residuals=None,
|
||||
controlnet_single_block_residuals=None,
|
||||
ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
)
|
||||
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
|
||||
|
||||
preview_img = img - t_curr * pred
|
||||
img = img + (t_prev - t_curr) * pred
|
||||
|
||||
@@ -87,13 +125,12 @@ def denoise(
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=step,
|
||||
step=step_index + 1,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(t_curr),
|
||||
latents=preview_img,
|
||||
),
|
||||
)
|
||||
step += 1
|
||||
|
||||
return img
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
import math
|
||||
from typing import List, Union
|
||||
|
||||
import einops
|
||||
import torch
|
||||
from PIL import Image
|
||||
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
|
||||
|
||||
class XLabsIPAdapterExtension:
|
||||
def __init__(
|
||||
self,
|
||||
model: XlabsIpAdapterFlux,
|
||||
image_prompt_clip_embed: torch.Tensor,
|
||||
weight: Union[float, List[float]],
|
||||
begin_step_percent: float,
|
||||
end_step_percent: float,
|
||||
):
|
||||
self._model = model
|
||||
self._image_prompt_clip_embed = image_prompt_clip_embed
|
||||
self._weight = weight
|
||||
self._begin_step_percent = begin_step_percent
|
||||
self._end_step_percent = end_step_percent
|
||||
|
||||
self._image_proj: torch.Tensor | None = None
|
||||
|
||||
def _get_weight(self, timestep_index: int, total_num_timesteps: int) -> float:
|
||||
first_step = math.floor(self._begin_step_percent * total_num_timesteps)
|
||||
last_step = math.ceil(self._end_step_percent * total_num_timesteps)
|
||||
|
||||
if timestep_index < first_step or timestep_index > last_step:
|
||||
return 0.0
|
||||
|
||||
if isinstance(self._weight, list):
|
||||
return self._weight[timestep_index]
|
||||
|
||||
return self._weight
|
||||
|
||||
@staticmethod
|
||||
def run_clip_image_encoder(
|
||||
pil_image: List[Image.Image], image_encoder: CLIPVisionModelWithProjection
|
||||
) -> torch.Tensor:
|
||||
clip_image_processor = CLIPImageProcessor()
|
||||
clip_image: torch.Tensor = clip_image_processor(images=pil_image, return_tensors="pt").pixel_values
|
||||
clip_image = clip_image.to(device=image_encoder.device, dtype=image_encoder.dtype)
|
||||
clip_image_embeds = image_encoder(clip_image).image_embeds
|
||||
return clip_image_embeds
|
||||
|
||||
def run_image_proj(self, dtype: torch.dtype):
|
||||
image_prompt_clip_embed = self._image_prompt_clip_embed.to(dtype=dtype)
|
||||
self._image_proj = self._model.image_proj(image_prompt_clip_embed)
|
||||
|
||||
def run_ip_adapter(
|
||||
self,
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
block_index: int,
|
||||
block: DoubleStreamBlock,
|
||||
img_q: torch.Tensor,
|
||||
img: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""The logic in this function is based on:
|
||||
https://github.com/XLabs-AI/x-flux/blob/47495425dbed499be1e8e5a6e52628b07349cba2/src/flux/modules/layers.py#L245-L301
|
||||
"""
|
||||
weight = self._get_weight(timestep_index=timestep_index, total_num_timesteps=total_num_timesteps)
|
||||
if weight < 1e-6:
|
||||
return img
|
||||
|
||||
ip_adapter_block = self._model.ip_adapter_double_blocks.double_blocks[block_index]
|
||||
|
||||
ip_key = ip_adapter_block.ip_adapter_double_stream_k_proj(self._image_proj)
|
||||
ip_value = ip_adapter_block.ip_adapter_double_stream_v_proj(self._image_proj)
|
||||
|
||||
# Reshape projections for multi-head attention.
|
||||
ip_key = einops.rearrange(ip_key, "B L (H D) -> B H L D", H=block.num_heads)
|
||||
ip_value = einops.rearrange(ip_value, "B L (H D) -> B H L D", H=block.num_heads)
|
||||
|
||||
# Compute attention between IP projections and the latent query.
|
||||
ip_attn = torch.nn.functional.scaled_dot_product_attention(
|
||||
img_q, ip_key, ip_value, dropout_p=0.0, is_causal=False
|
||||
)
|
||||
ip_attn = einops.rearrange(ip_attn, "B H L D -> B L (H D)", H=block.num_heads)
|
||||
|
||||
img = img + weight * ip_attn
|
||||
|
||||
return img
|
||||
0
invokeai/backend/flux/ip_adapter/__init__.py
Normal file
0
invokeai/backend/flux/ip_adapter/__init__.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# This file is based on:
|
||||
# https://github.com/XLabs-AI/x-flux/blob/47495425dbed499be1e8e5a6e52628b07349cba2/src/flux/modules/layers.py#L221
|
||||
import einops
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.math import attention
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
|
||||
|
||||
class IPDoubleStreamBlockProcessor(torch.nn.Module):
|
||||
"""Attention processor for handling IP-adapter with double stream block."""
|
||||
|
||||
def __init__(self, context_dim: int, hidden_dim: int):
|
||||
super().__init__()
|
||||
|
||||
# Ensure context_dim matches the dimension of image_proj
|
||||
self.context_dim = context_dim
|
||||
self.hidden_dim = hidden_dim
|
||||
|
||||
# Initialize projections for IP-adapter
|
||||
self.ip_adapter_double_stream_k_proj = torch.nn.Linear(context_dim, hidden_dim, bias=True)
|
||||
self.ip_adapter_double_stream_v_proj = torch.nn.Linear(context_dim, hidden_dim, bias=True)
|
||||
|
||||
torch.nn.init.zeros_(self.ip_adapter_double_stream_k_proj.weight)
|
||||
torch.nn.init.zeros_(self.ip_adapter_double_stream_k_proj.bias)
|
||||
|
||||
torch.nn.init.zeros_(self.ip_adapter_double_stream_v_proj.weight)
|
||||
torch.nn.init.zeros_(self.ip_adapter_double_stream_v_proj.bias)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
attn: DoubleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
image_proj: torch.Tensor,
|
||||
ip_scale: float = 1.0,
|
||||
):
|
||||
# Prepare image for attention
|
||||
img_mod1, img_mod2 = attn.img_mod(vec)
|
||||
txt_mod1, txt_mod2 = attn.txt_mod(vec)
|
||||
|
||||
img_modulated = attn.img_norm1(img)
|
||||
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
|
||||
img_qkv = attn.img_attn.qkv(img_modulated)
|
||||
img_q, img_k, img_v = einops.rearrange(
|
||||
img_qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads, D=attn.head_dim
|
||||
)
|
||||
img_q, img_k = attn.img_attn.norm(img_q, img_k, img_v)
|
||||
|
||||
txt_modulated = attn.txt_norm1(txt)
|
||||
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
|
||||
txt_qkv = attn.txt_attn.qkv(txt_modulated)
|
||||
txt_q, txt_k, txt_v = einops.rearrange(
|
||||
txt_qkv, "B L (K H D) -> K B H L D", K=3, H=attn.num_heads, D=attn.head_dim
|
||||
)
|
||||
txt_q, txt_k = attn.txt_attn.norm(txt_q, txt_k, txt_v)
|
||||
|
||||
q = torch.cat((txt_q, img_q), dim=2)
|
||||
k = torch.cat((txt_k, img_k), dim=2)
|
||||
v = torch.cat((txt_v, img_v), dim=2)
|
||||
|
||||
attn1 = attention(q, k, v, pe=pe)
|
||||
txt_attn, img_attn = attn1[:, : txt.shape[1]], attn1[:, txt.shape[1] :]
|
||||
|
||||
# print(f"txt_attn shape: {txt_attn.size()}")
|
||||
# print(f"img_attn shape: {img_attn.size()}")
|
||||
|
||||
img = img + img_mod1.gate * attn.img_attn.proj(img_attn)
|
||||
img = img + img_mod2.gate * attn.img_mlp((1 + img_mod2.scale) * attn.img_norm2(img) + img_mod2.shift)
|
||||
|
||||
txt = txt + txt_mod1.gate * attn.txt_attn.proj(txt_attn)
|
||||
txt = txt + txt_mod2.gate * attn.txt_mlp((1 + txt_mod2.scale) * attn.txt_norm2(txt) + txt_mod2.shift)
|
||||
|
||||
# IP-adapter processing
|
||||
ip_query = img_q # latent sample query
|
||||
ip_key = self.ip_adapter_double_stream_k_proj(image_proj)
|
||||
ip_value = self.ip_adapter_double_stream_v_proj(image_proj)
|
||||
|
||||
# Reshape projections for multi-head attention
|
||||
ip_key = einops.rearrange(ip_key, "B L (H D) -> B H L D", H=attn.num_heads, D=attn.head_dim)
|
||||
ip_value = einops.rearrange(ip_value, "B L (H D) -> B H L D", H=attn.num_heads, D=attn.head_dim)
|
||||
|
||||
# Compute attention between IP projections and the latent query
|
||||
ip_attention = torch.nn.functional.scaled_dot_product_attention(
|
||||
ip_query, ip_key, ip_value, dropout_p=0.0, is_causal=False
|
||||
)
|
||||
ip_attention = einops.rearrange(ip_attention, "B H L D -> B L (H D)", H=attn.num_heads, D=attn.head_dim)
|
||||
|
||||
img = img + ip_scale * ip_attention
|
||||
|
||||
return img, txt
|
||||
52
invokeai/backend/flux/ip_adapter/state_dict_utils.py
Normal file
52
invokeai/backend/flux/ip_adapter/state_dict_utils.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterParams
|
||||
|
||||
|
||||
def is_state_dict_xlabs_ip_adapter(sd: Dict[str, Any]) -> bool:
|
||||
"""Is the state dict for an XLabs FLUX IP-Adapter model?
|
||||
|
||||
This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision.
|
||||
"""
|
||||
# If all of the expected keys are present, then this is very likely an XLabs IP-Adapter model.
|
||||
expected_keys = {
|
||||
"double_blocks.0.processor.ip_adapter_double_stream_k_proj.bias",
|
||||
"double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight",
|
||||
"double_blocks.0.processor.ip_adapter_double_stream_v_proj.bias",
|
||||
"double_blocks.0.processor.ip_adapter_double_stream_v_proj.weight",
|
||||
"ip_adapter_proj_model.norm.bias",
|
||||
"ip_adapter_proj_model.norm.weight",
|
||||
"ip_adapter_proj_model.proj.bias",
|
||||
"ip_adapter_proj_model.proj.weight",
|
||||
}
|
||||
|
||||
if expected_keys.issubset(sd.keys()):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def infer_xlabs_ip_adapter_params_from_state_dict(state_dict: dict[str, torch.Tensor]) -> XlabsIpAdapterParams:
|
||||
num_double_blocks = 0
|
||||
context_dim = 0
|
||||
hidden_dim = 0
|
||||
|
||||
# Count the number of double blocks.
|
||||
double_block_index = 0
|
||||
while f"double_blocks.{double_block_index}.processor.ip_adapter_double_stream_k_proj.weight" in state_dict:
|
||||
double_block_index += 1
|
||||
num_double_blocks = double_block_index
|
||||
|
||||
hidden_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[0]
|
||||
context_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[1]
|
||||
clip_embeddings_dim = state_dict["ip_adapter_proj_model.proj.weight"].shape[1]
|
||||
clip_extra_context_tokens = state_dict["ip_adapter_proj_model.proj.weight"].shape[0] // context_dim
|
||||
|
||||
return XlabsIpAdapterParams(
|
||||
num_double_blocks=num_double_blocks,
|
||||
context_dim=context_dim,
|
||||
hidden_dim=hidden_dim,
|
||||
clip_embeddings_dim=clip_embeddings_dim,
|
||||
clip_extra_context_tokens=clip_extra_context_tokens,
|
||||
)
|
||||
70
invokeai/backend/flux/ip_adapter/xlabs_ip_adapter_flux.py
Normal file
70
invokeai/backend/flux/ip_adapter/xlabs_ip_adapter_flux.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.ip_adapter.ip_adapter import ImageProjModel
|
||||
|
||||
|
||||
class IPDoubleStreamBlock(torch.nn.Module):
|
||||
def __init__(self, context_dim: int, hidden_dim: int):
|
||||
super().__init__()
|
||||
|
||||
self.context_dim = context_dim
|
||||
self.hidden_dim = hidden_dim
|
||||
|
||||
self.ip_adapter_double_stream_k_proj = torch.nn.Linear(context_dim, hidden_dim, bias=True)
|
||||
self.ip_adapter_double_stream_v_proj = torch.nn.Linear(context_dim, hidden_dim, bias=True)
|
||||
|
||||
|
||||
class IPAdapterDoubleBlocks(torch.nn.Module):
|
||||
def __init__(self, num_double_blocks: int, context_dim: int, hidden_dim: int):
|
||||
super().__init__()
|
||||
self.double_blocks = torch.nn.ModuleList(
|
||||
[IPDoubleStreamBlock(context_dim, hidden_dim) for _ in range(num_double_blocks)]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class XlabsIpAdapterParams:
|
||||
num_double_blocks: int
|
||||
context_dim: int
|
||||
hidden_dim: int
|
||||
|
||||
clip_embeddings_dim: int
|
||||
clip_extra_context_tokens: int
|
||||
|
||||
|
||||
class XlabsIpAdapterFlux(torch.nn.Module):
|
||||
def __init__(self, params: XlabsIpAdapterParams):
|
||||
super().__init__()
|
||||
self.image_proj = ImageProjModel(
|
||||
cross_attention_dim=params.context_dim,
|
||||
clip_embeddings_dim=params.clip_embeddings_dim,
|
||||
clip_extra_context_tokens=params.clip_extra_context_tokens,
|
||||
)
|
||||
self.ip_adapter_double_blocks = IPAdapterDoubleBlocks(
|
||||
num_double_blocks=params.num_double_blocks, context_dim=params.context_dim, hidden_dim=params.hidden_dim
|
||||
)
|
||||
|
||||
def load_xlabs_state_dict(self, state_dict: dict[str, torch.Tensor], assign: bool = False):
|
||||
"""We need this custom function to load state dicts rather than using .load_state_dict(...) because the model
|
||||
structure does not match the state_dict structure.
|
||||
"""
|
||||
# Split the state_dict into the image projection model and the double blocks.
|
||||
image_proj_sd: dict[str, torch.Tensor] = {}
|
||||
double_blocks_sd: dict[str, torch.Tensor] = {}
|
||||
for k, v in state_dict.items():
|
||||
if k.startswith("ip_adapter_proj_model."):
|
||||
image_proj_sd[k] = v
|
||||
elif k.startswith("double_blocks."):
|
||||
double_blocks_sd[k] = v
|
||||
else:
|
||||
raise ValueError(f"Unexpected key: {k}")
|
||||
|
||||
# Initialize the image projection model.
|
||||
image_proj_sd = {k.replace("ip_adapter_proj_model.", ""): v for k, v in image_proj_sd.items()}
|
||||
self.image_proj.load_state_dict(image_proj_sd, assign=assign)
|
||||
|
||||
# Initialize the double blocks.
|
||||
double_blocks_sd = {k.replace("processor.", ""): v for k, v in double_blocks_sd.items()}
|
||||
self.ip_adapter_double_blocks.load_state_dict(double_blocks_sd, assign=assign)
|
||||
@@ -5,6 +5,8 @@ from dataclasses import dataclass
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.custom_block_processor import CustomDoubleStreamBlockProcessor
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.modules.layers import (
|
||||
DoubleStreamBlock,
|
||||
EmbedND,
|
||||
@@ -88,8 +90,11 @@ class Flux(nn.Module):
|
||||
timesteps: Tensor,
|
||||
y: Tensor,
|
||||
guidance: Tensor | None,
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
controlnet_double_block_residuals: list[Tensor] | None,
|
||||
controlnet_single_block_residuals: list[Tensor] | None,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
) -> Tensor:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
@@ -111,7 +116,19 @@ class Flux(nn.Module):
|
||||
if controlnet_double_block_residuals is not None:
|
||||
assert len(controlnet_double_block_residuals) == len(self.double_blocks)
|
||||
for block_index, block in enumerate(self.double_blocks):
|
||||
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
|
||||
assert isinstance(block, DoubleStreamBlock)
|
||||
|
||||
img, txt = CustomDoubleStreamBlockProcessor.custom_double_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
block_index=block_index,
|
||||
block=block,
|
||||
img=img,
|
||||
txt=txt,
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
ip_adapter_extensions=ip_adapter_extensions,
|
||||
)
|
||||
|
||||
if controlnet_double_block_residuals is not None:
|
||||
img += controlnet_double_block_residuals[block_index]
|
||||
|
||||
@@ -168,8 +168,17 @@ def generate_img_ids(h: int, w: int, batch_size: int, device: torch.device, dtyp
|
||||
Returns:
|
||||
torch.Tensor: Image position ids.
|
||||
"""
|
||||
|
||||
if device.type == "mps":
|
||||
orig_dtype = dtype
|
||||
dtype = torch.float16
|
||||
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3, device=device, dtype=dtype)
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=device, dtype=dtype)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=device, dtype=dtype)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
|
||||
|
||||
if device.type == "mps":
|
||||
img_ids.to(orig_dtype)
|
||||
|
||||
return img_ids
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Optional
|
||||
from typing import Optional, TypeAlias
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
@@ -7,6 +7,14 @@ from transformers.models.sam.processing_sam import SamProcessor
|
||||
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
# Type aliases for the inputs to the SAM model.
|
||||
ListOfBoundingBoxes: TypeAlias = list[list[int]]
|
||||
"""A list of bounding boxes. Each bounding box is in the format [xmin, ymin, xmax, ymax]."""
|
||||
ListOfPoints: TypeAlias = list[list[int]]
|
||||
"""A list of points. Each point is in the format [x, y]."""
|
||||
ListOfPointLabels: TypeAlias = list[int]
|
||||
"""A list of SAM point labels. Each label is an integer where -1 is background, 0 is neutral, and 1 is foreground."""
|
||||
|
||||
|
||||
class SegmentAnythingPipeline(RawModel):
|
||||
"""A wrapper class for the transformers SAM model and processor that makes it compatible with the model manager."""
|
||||
@@ -27,20 +35,53 @@ class SegmentAnythingPipeline(RawModel):
|
||||
|
||||
return calc_module_size(self._sam_model)
|
||||
|
||||
def segment(self, image: Image.Image, bounding_boxes: list[list[int]]) -> torch.Tensor:
|
||||
def segment(
|
||||
self,
|
||||
image: Image.Image,
|
||||
bounding_boxes: list[list[int]] | None = None,
|
||||
point_lists: list[list[list[int]]] | None = None,
|
||||
) -> torch.Tensor:
|
||||
"""Run the SAM model.
|
||||
|
||||
Either bounding_boxes or point_lists must be provided. If both are provided, bounding_boxes will be used and
|
||||
point_lists will be ignored.
|
||||
|
||||
Args:
|
||||
image (Image.Image): The image to segment.
|
||||
bounding_boxes (list[list[int]]): The bounding box prompts. Each bounding box is in the format
|
||||
[xmin, ymin, xmax, ymax].
|
||||
point_lists (list[list[list[int]]]): The points prompts. Each point is in the format [x, y, label].
|
||||
`label` is an integer where -1 is background, 0 is neutral, and 1 is foreground.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The segmentation masks. dtype: torch.bool. shape: [num_masks, channels, height, width].
|
||||
"""
|
||||
# Add batch dimension of 1 to the bounding boxes.
|
||||
boxes = [bounding_boxes]
|
||||
inputs = self._sam_processor(images=image, input_boxes=boxes, return_tensors="pt").to(self._sam_model.device)
|
||||
|
||||
# Prep the inputs:
|
||||
# - Create a list of bounding boxes or points and labels.
|
||||
# - Add a batch dimension of 1 to the inputs.
|
||||
if bounding_boxes:
|
||||
input_boxes: list[ListOfBoundingBoxes] | None = [bounding_boxes]
|
||||
input_points: list[ListOfPoints] | None = None
|
||||
input_labels: list[ListOfPointLabels] | None = None
|
||||
elif point_lists:
|
||||
input_boxes: list[ListOfBoundingBoxes] | None = None
|
||||
input_points: list[ListOfPoints] | None = []
|
||||
input_labels: list[ListOfPointLabels] | None = []
|
||||
for point_list in point_lists:
|
||||
input_points.append([[p[0], p[1]] for p in point_list])
|
||||
input_labels.append([p[2] for p in point_list])
|
||||
|
||||
else:
|
||||
raise ValueError("Either bounding_boxes or points and labels must be provided.")
|
||||
|
||||
inputs = self._sam_processor(
|
||||
images=image,
|
||||
input_boxes=input_boxes,
|
||||
input_points=input_points,
|
||||
input_labels=input_labels,
|
||||
return_tensors="pt",
|
||||
).to(self._sam_model.device)
|
||||
outputs = self._sam_model(**inputs)
|
||||
masks = self._sam_processor.post_process_masks(
|
||||
masks=outputs.pred_masks,
|
||||
|
||||
@@ -45,8 +45,9 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
# Constants for FLUX.1
|
||||
num_double_layers = 19
|
||||
num_single_layers = 38
|
||||
# inner_dim = 3072
|
||||
# mlp_ratio = 4.0
|
||||
hidden_size = 3072
|
||||
mlp_ratio = 4.0
|
||||
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
||||
|
||||
layers: dict[str, AnyLoRALayer] = {}
|
||||
|
||||
@@ -62,30 +63,43 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
layers[dst_key] = LoRALayer.from_state_dict_values(values=value)
|
||||
assert len(src_layer_dict) == 0
|
||||
|
||||
def add_qkv_lora_layer_if_present(src_keys: list[str], dst_qkv_key: str) -> None:
|
||||
def add_qkv_lora_layer_if_present(
|
||||
src_keys: list[str],
|
||||
src_weight_shapes: list[tuple[int, int]],
|
||||
dst_qkv_key: str,
|
||||
allow_missing_keys: bool = False,
|
||||
) -> None:
|
||||
"""Handle the Q, K, V matrices for a transformer block. We need special handling because the diffusers format
|
||||
stores them in separate matrices, whereas the BFL format used internally by InvokeAI concatenates them.
|
||||
"""
|
||||
# We expect that either all src keys are present or none of them are. Verify this.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
assert all(keys_present) or not any(keys_present)
|
||||
|
||||
# If none of the keys are present, return early.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
if not any(keys_present):
|
||||
return
|
||||
|
||||
src_layer_dicts = [grouped_state_dict.pop(key) for key in src_keys]
|
||||
sub_layers: list[LoRALayer] = []
|
||||
for src_layer_dict in src_layer_dicts:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers, concat_axis=0)
|
||||
for src_key, src_weight_shape in zip(src_keys, src_weight_shapes, strict=True):
|
||||
src_layer_dict = grouped_state_dict.pop(src_key, None)
|
||||
if src_layer_dict is not None:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
|
||||
assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
else:
|
||||
if not allow_missing_keys:
|
||||
raise ValueError(f"Missing LoRA layer: '{src_key}'.")
|
||||
values = {
|
||||
"lora_up.weight": torch.zeros((src_weight_shape[0], 1)),
|
||||
"lora_down.weight": torch.zeros((1, src_weight_shape[1])),
|
||||
}
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers)
|
||||
|
||||
# time_text_embed.timestep_embedder -> time_in.
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
|
||||
@@ -118,6 +132,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"transformer_blocks.{i}.attn.to_k",
|
||||
f"transformer_blocks.{i}.attn.to_v",
|
||||
],
|
||||
[(hidden_size, hidden_size), (hidden_size, hidden_size), (hidden_size, hidden_size)],
|
||||
f"double_blocks.{i}.img_attn.qkv",
|
||||
)
|
||||
add_qkv_lora_layer_if_present(
|
||||
@@ -126,6 +141,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"transformer_blocks.{i}.attn.add_k_proj",
|
||||
f"transformer_blocks.{i}.attn.add_v_proj",
|
||||
],
|
||||
[(hidden_size, hidden_size), (hidden_size, hidden_size), (hidden_size, hidden_size)],
|
||||
f"double_blocks.{i}.txt_attn.qkv",
|
||||
)
|
||||
|
||||
@@ -175,7 +191,14 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"single_transformer_blocks.{i}.attn.to_v",
|
||||
f"single_transformer_blocks.{i}.proj_mlp",
|
||||
],
|
||||
[
|
||||
(hidden_size, hidden_size),
|
||||
(hidden_size, hidden_size),
|
||||
(hidden_size, hidden_size),
|
||||
(mlp_hidden_dim, hidden_size),
|
||||
],
|
||||
f"single_blocks.{i}.linear1",
|
||||
allow_missing_keys=True,
|
||||
)
|
||||
|
||||
# Output projections.
|
||||
|
||||
@@ -53,6 +53,7 @@ class BaseModelType(str, Enum):
|
||||
Any = "any"
|
||||
StableDiffusion1 = "sd-1"
|
||||
StableDiffusion2 = "sd-2"
|
||||
StableDiffusion3 = "sd-3"
|
||||
StableDiffusionXL = "sdxl"
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
@@ -83,8 +84,10 @@ class SubModelType(str, Enum):
|
||||
Transformer = "transformer"
|
||||
TextEncoder = "text_encoder"
|
||||
TextEncoder2 = "text_encoder_2"
|
||||
TextEncoder3 = "text_encoder_3"
|
||||
Tokenizer = "tokenizer"
|
||||
Tokenizer2 = "tokenizer_2"
|
||||
Tokenizer3 = "tokenizer_3"
|
||||
VAE = "vae"
|
||||
VAEDecoder = "vae_decoder"
|
||||
VAEEncoder = "vae_encoder"
|
||||
@@ -92,6 +95,13 @@ class SubModelType(str, Enum):
|
||||
SafetyChecker = "safety_checker"
|
||||
|
||||
|
||||
class ClipVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
L = "large"
|
||||
G = "gigantic"
|
||||
|
||||
|
||||
class ModelVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
@@ -147,6 +157,17 @@ class ModelSourceType(str, Enum):
|
||||
DEFAULTS_PRECISION = Literal["fp16", "fp32"]
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
|
||||
|
||||
class SubmodelDefinition(BaseModel):
|
||||
path_or_prefix: str
|
||||
model_type: ModelType
|
||||
variant: AnyVariant = None
|
||||
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
|
||||
class MainModelDefaultSettings(BaseModel):
|
||||
vae: str | None = Field(default=None, description="Default VAE for this model (model key)")
|
||||
vae_precision: DEFAULTS_PRECISION | None = Field(default=None, description="Default VAE precision for this model")
|
||||
@@ -193,6 +214,9 @@ class ModelConfigBase(BaseModel):
|
||||
schema["required"].extend(["key", "type", "format"])
|
||||
|
||||
model_config = ConfigDict(validate_assignment=True, json_schema_extra=json_schema_extra)
|
||||
submodels: Optional[Dict[SubModelType, SubmodelDefinition]] = Field(
|
||||
description="Loadable submodels in this model", default=None
|
||||
)
|
||||
|
||||
|
||||
class CheckpointConfigBase(ModelConfigBase):
|
||||
@@ -335,7 +359,7 @@ class MainConfigBase(ModelConfigBase):
|
||||
default_settings: Optional[MainModelDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
)
|
||||
variant: ModelVariantType = ModelVariantType.Normal
|
||||
variant: AnyVariant = ModelVariantType.Normal
|
||||
|
||||
|
||||
class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase):
|
||||
@@ -394,6 +418,8 @@ class IPAdapterBaseConfig(ModelConfigBase):
|
||||
class IPAdapterInvokeAIConfig(IPAdapterBaseConfig):
|
||||
"""Model config for IP Adapter diffusers format models."""
|
||||
|
||||
# TODO(ryand): Should we deprecate this field? From what I can tell, it hasn't been probed correctly for a long
|
||||
# time. Need to go through the history to make sure I'm understanding this fully.
|
||||
image_encoder_model_id: str
|
||||
format: Literal[ModelFormat.InvokeAI]
|
||||
|
||||
@@ -417,12 +443,33 @@ class CLIPEmbedDiffusersConfig(DiffusersConfigBase):
|
||||
|
||||
type: Literal[ModelType.CLIPEmbed] = ModelType.CLIPEmbed
|
||||
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
|
||||
variant: ClipVariantType = ClipVariantType.L
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}")
|
||||
|
||||
|
||||
class CLIPGEmbedDiffusersConfig(CLIPEmbedDiffusersConfig):
|
||||
"""Model config for CLIP-G Embeddings."""
|
||||
|
||||
variant: ClipVariantType = ClipVariantType.G
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}.{ClipVariantType.G}")
|
||||
|
||||
|
||||
class CLIPLEmbedDiffusersConfig(CLIPEmbedDiffusersConfig):
|
||||
"""Model config for CLIP-L Embeddings."""
|
||||
|
||||
variant: ClipVariantType = ClipVariantType.L
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}.{ClipVariantType.L}")
|
||||
|
||||
|
||||
class CLIPVisionDiffusersConfig(DiffusersConfigBase):
|
||||
"""Model config for CLIPVision."""
|
||||
|
||||
@@ -499,6 +546,8 @@ AnyModelConfig = Annotated[
|
||||
Annotated[SpandrelImageToImageConfig, SpandrelImageToImageConfig.get_tag()],
|
||||
Annotated[CLIPVisionDiffusersConfig, CLIPVisionDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPEmbedDiffusersConfig, CLIPEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPLEmbedDiffusersConfig, CLIPLEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPGEmbedDiffusersConfig, CLIPGEmbedDiffusersConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
||||
@@ -35,6 +35,7 @@ class ModelLoader(ModelLoaderBase):
|
||||
self._logger = logger
|
||||
self._ram_cache = ram_cache
|
||||
self._torch_dtype = TorchDevice.choose_torch_dtype()
|
||||
self._torch_device = TorchDevice.choose_torch_device()
|
||||
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
"""
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from transformers import CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
DiffusersConfigBase,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers)
|
||||
class ClipVisionLoader(ModelLoader):
|
||||
"""Class to load CLIPVision models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, DiffusersConfigBase):
|
||||
raise ValueError("Only DiffusersConfigBase models are currently supported here.")
|
||||
|
||||
if submodel_type is not None:
|
||||
raise Exception("There are no submodels in CLIP Vision models.")
|
||||
|
||||
model_path = Path(config.path)
|
||||
|
||||
model = CLIPVisionModelWithProjection.from_pretrained(
|
||||
model_path, torch_dtype=self._torch_dtype, local_files_only=True
|
||||
)
|
||||
assert isinstance(model, CLIPVisionModelWithProjection)
|
||||
|
||||
return model
|
||||
@@ -19,6 +19,10 @@ from invokeai.backend.flux.controlnet.state_dict_utils import (
|
||||
is_state_dict_xlabs_controlnet,
|
||||
)
|
||||
from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlNetFlux
|
||||
from invokeai.backend.flux.ip_adapter.state_dict_utils import infer_xlabs_ip_adapter_params_from_state_dict
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import (
|
||||
XlabsIpAdapterFlux,
|
||||
)
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.util import ae_params, params
|
||||
@@ -35,6 +39,7 @@ from invokeai.backend.model_manager.config import (
|
||||
CLIPEmbedDiffusersConfig,
|
||||
ControlNetCheckpointConfig,
|
||||
ControlNetDiffusersConfig,
|
||||
IPAdapterCheckpointConfig,
|
||||
MainBnbQuantized4bCheckpointConfig,
|
||||
MainCheckpointConfig,
|
||||
MainGGUFCheckpointConfig,
|
||||
@@ -79,7 +84,15 @@ class FluxVAELoader(ModelLoader):
|
||||
model = AutoEncoder(ae_params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
model.to(dtype=self._torch_dtype)
|
||||
# VAE is broken in float16, which mps defaults to
|
||||
if self._torch_dtype == torch.float16:
|
||||
try:
|
||||
vae_dtype = torch.tensor([1.0], dtype=torch.bfloat16, device=self._torch_device).dtype
|
||||
except TypeError:
|
||||
vae_dtype = torch.float32
|
||||
else:
|
||||
vae_dtype = self._torch_dtype
|
||||
model.to(vae_dtype)
|
||||
|
||||
return model
|
||||
|
||||
@@ -123,9 +136,9 @@ class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
|
||||
"The bnb modules are not available. Please install bitsandbytes if available on your platform."
|
||||
)
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2:
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2:
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
te2_model_path = Path(config.path) / "text_encoder_2"
|
||||
model_config = AutoConfig.from_pretrained(te2_model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
@@ -167,10 +180,10 @@ class T5EncoderCheckpointModel(ModelLoader):
|
||||
raise ValueError("Only T5EncoderConfig models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2:
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2:
|
||||
return T5EncoderModel.from_pretrained(Path(config.path) / "text_encoder_2")
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
return T5EncoderModel.from_pretrained(Path(config.path) / "text_encoder_2", torch_dtype="auto")
|
||||
|
||||
raise ValueError(
|
||||
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
@@ -352,3 +365,26 @@ class FluxControlnetModel(ModelLoader):
|
||||
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.IPAdapter, format=ModelFormat.Checkpoint)
|
||||
class FluxIpAdapterModel(ModelLoader):
|
||||
"""Class to load FLUX IP-Adapter models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, IPAdapterCheckpointConfig):
|
||||
raise ValueError(f"Unexpected model config type: {type(config)}.")
|
||||
|
||||
sd = load_file(Path(config.path))
|
||||
|
||||
params = infer_xlabs_ip_adapter_params_from_state_dict(sd)
|
||||
|
||||
with accelerate.init_empty_weights():
|
||||
model = XlabsIpAdapterFlux(params=params)
|
||||
|
||||
model.load_xlabs_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
@@ -22,7 +22,6 @@ from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T2IAdapter, format=ModelFormat.Diffusers)
|
||||
class GenericDiffusersLoader(ModelLoader):
|
||||
"""Class to load simple diffusers models."""
|
||||
|
||||
@@ -42,6 +42,7 @@ VARIANT_TO_IN_CHANNEL_MAP = {
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusionXLRefiner, type=ModelType.Main, format=ModelFormat.Diffusers
|
||||
)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion3, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@@ -51,13 +52,6 @@ VARIANT_TO_IN_CHANNEL_MAP = {
|
||||
class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
model_base_to_model_type = {
|
||||
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",
|
||||
BaseModelType.StableDiffusion2: "FrozenOpenCLIPEmbedder",
|
||||
BaseModelType.StableDiffusionXL: "SDXL",
|
||||
BaseModelType.StableDiffusionXLRefiner: "SDXL-Refiner",
|
||||
}
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
@@ -117,8 +111,6 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
load_class = load_classes[config.base][config.variant]
|
||||
except KeyError as e:
|
||||
raise Exception(f"No diffusers pipeline known for base={config.base}, variant={config.variant}") from e
|
||||
prediction_type = config.prediction_type.value
|
||||
upcast_attention = config.upcast_attention
|
||||
|
||||
# Without SilenceWarnings we get log messages like this:
|
||||
# site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
||||
@@ -129,13 +121,7 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
# ['text_model.embeddings.position_ids']
|
||||
|
||||
with SilenceWarnings():
|
||||
pipeline = load_class.from_single_file(
|
||||
config.path,
|
||||
torch_dtype=self._torch_dtype,
|
||||
prediction_type=prediction_type,
|
||||
upcast_attention=upcast_attention,
|
||||
load_safety_checker=False,
|
||||
)
|
||||
pipeline = load_class.from_single_file(config.path, torch_dtype=self._torch_dtype)
|
||||
|
||||
if not submodel_type:
|
||||
return pipeline
|
||||
|
||||
@@ -20,7 +20,7 @@ from typing import Optional
|
||||
|
||||
import requests
|
||||
from huggingface_hub import HfApi, configure_http_backend, hf_hub_url
|
||||
from huggingface_hub.utils._errors import RepositoryNotFoundError, RevisionNotFoundError
|
||||
from huggingface_hub.errors import RepositoryNotFoundError, RevisionNotFoundError
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests.sessions import Session
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Literal, Optional, Union
|
||||
from typing import Any, Callable, Dict, Literal, Optional, Union
|
||||
|
||||
import safetensors.torch
|
||||
import spandrel
|
||||
@@ -14,6 +14,7 @@ from invokeai.backend.flux.controlnet.state_dict_utils import (
|
||||
is_state_dict_instantx_controlnet,
|
||||
is_state_dict_xlabs_controlnet,
|
||||
)
|
||||
from invokeai.backend.flux.ip_adapter.state_dict_utils import is_state_dict_xlabs_ip_adapter
|
||||
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
)
|
||||
@@ -21,6 +22,7 @@ from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import i
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
ControlAdapterDefaultSettings,
|
||||
InvalidModelConfigException,
|
||||
@@ -32,8 +34,15 @@ from invokeai.backend.model_manager.config import (
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
SubmodelDefinition,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import ConfigLoader
|
||||
from invokeai.backend.model_manager.util.model_util import (
|
||||
get_clip_variant_type,
|
||||
lora_token_vector_length,
|
||||
read_checkpoint_meta,
|
||||
)
|
||||
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
@@ -111,6 +120,7 @@ class ModelProbe(object):
|
||||
"StableDiffusionXLPipeline": ModelType.Main,
|
||||
"StableDiffusionXLImg2ImgPipeline": ModelType.Main,
|
||||
"StableDiffusionXLInpaintPipeline": ModelType.Main,
|
||||
"StableDiffusion3Pipeline": ModelType.Main,
|
||||
"LatentConsistencyModelPipeline": ModelType.Main,
|
||||
"AutoencoderKL": ModelType.VAE,
|
||||
"AutoencoderTiny": ModelType.VAE,
|
||||
@@ -121,8 +131,12 @@ class ModelProbe(object):
|
||||
"CLIPTextModel": ModelType.CLIPEmbed,
|
||||
"T5EncoderModel": ModelType.T5Encoder,
|
||||
"FluxControlNetModel": ModelType.ControlNet,
|
||||
"SD3Transformer2DModel": ModelType.Main,
|
||||
"CLIPTextModelWithProjection": ModelType.CLIPEmbed,
|
||||
}
|
||||
|
||||
TYPE2VARIANT: Dict[ModelType, Callable[[str], Optional[AnyVariant]]] = {ModelType.CLIPEmbed: get_clip_variant_type}
|
||||
|
||||
@classmethod
|
||||
def register_probe(
|
||||
cls, format: Literal["diffusers", "checkpoint", "onnx"], model_type: ModelType, probe_class: type[ProbeBase]
|
||||
@@ -169,7 +183,10 @@ class ModelProbe(object):
|
||||
fields["path"] = model_path.as_posix()
|
||||
fields["type"] = fields.get("type") or model_type
|
||||
fields["base"] = fields.get("base") or probe.get_base_type()
|
||||
fields["variant"] = fields.get("variant") or probe.get_variant_type()
|
||||
variant_func = cls.TYPE2VARIANT.get(fields["type"], None)
|
||||
fields["variant"] = (
|
||||
fields.get("variant") or (variant_func and variant_func(model_path.as_posix())) or probe.get_variant_type()
|
||||
)
|
||||
fields["prediction_type"] = fields.get("prediction_type") or probe.get_scheduler_prediction_type()
|
||||
fields["image_encoder_model_id"] = fields.get("image_encoder_model_id") or probe.get_image_encoder_model_id()
|
||||
fields["name"] = fields.get("name") or cls.get_model_name(model_path)
|
||||
@@ -216,6 +233,10 @@ class ModelProbe(object):
|
||||
and fields["prediction_type"] == SchedulerPredictionType.VPrediction
|
||||
)
|
||||
|
||||
get_submodels = getattr(probe, "get_submodels", None)
|
||||
if fields["base"] == BaseModelType.StableDiffusion3 and callable(get_submodels):
|
||||
fields["submodels"] = get_submodels()
|
||||
|
||||
model_info = ModelConfigFactory.make_config(fields) # , key=fields.get("key", None))
|
||||
return model_info
|
||||
|
||||
@@ -243,8 +264,6 @@ class ModelProbe(object):
|
||||
"cond_stage_model.",
|
||||
"first_stage_model.",
|
||||
"model.diffusion_model.",
|
||||
# FLUX models in the official BFL format contain keys with the "double_blocks." prefix.
|
||||
"double_blocks.",
|
||||
# Some FLUX checkpoint files contain transformer keys prefixed with "model.diffusion_model".
|
||||
# This prefix is typically used to distinguish between multiple models bundled in a single file.
|
||||
"model.diffusion_model.double_blocks.",
|
||||
@@ -252,6 +271,10 @@ class ModelProbe(object):
|
||||
):
|
||||
# Keys starting with double_blocks are associated with Flux models
|
||||
return ModelType.Main
|
||||
# FLUX models in the official BFL format contain keys with the "double_blocks." prefix, but we must be
|
||||
# careful to avoid false positives on XLabs FLUX IP-Adapter models.
|
||||
elif key.startswith("double_blocks.") and "ip_adapter" not in key:
|
||||
return ModelType.Main
|
||||
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
|
||||
return ModelType.VAE
|
||||
elif key.startswith(("lora_te_", "lora_unet_")):
|
||||
@@ -274,7 +297,14 @@ class ModelProbe(object):
|
||||
)
|
||||
):
|
||||
return ModelType.ControlNet
|
||||
elif key.startswith(("image_proj.", "ip_adapter.")):
|
||||
elif key.startswith(
|
||||
(
|
||||
"image_proj.",
|
||||
"ip_adapter.",
|
||||
# XLabs FLUX IP-Adapter models have keys startinh with "ip_adapter_proj_model.".
|
||||
"ip_adapter_proj_model.",
|
||||
)
|
||||
):
|
||||
return ModelType.IPAdapter
|
||||
elif key in {"emb_params", "string_to_param"}:
|
||||
return ModelType.TextualInversion
|
||||
@@ -452,8 +482,9 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"normal": "normalbae_image_processor",
|
||||
"sketch": "pidi_image_processor",
|
||||
"scribble": "lineart_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"lineart anime": "lineart_anime_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
@@ -672,6 +703,10 @@ class IPAdapterCheckpointProbe(CheckpointProbeBase):
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
|
||||
if is_state_dict_xlabs_ip_adapter(checkpoint):
|
||||
return BaseModelType.Flux
|
||||
|
||||
for key in checkpoint.keys():
|
||||
if not key.startswith(("image_proj.", "ip_adapter.")):
|
||||
continue
|
||||
@@ -732,18 +767,33 @@ class FolderProbeBase(ProbeBase):
|
||||
|
||||
class PipelineFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
with open(self.model_path / "unet" / "config.json", "r") as file:
|
||||
unet_conf = json.load(file)
|
||||
if unet_conf["cross_attention_dim"] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif unet_conf["cross_attention_dim"] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif unet_conf["cross_attention_dim"] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
elif unet_conf["cross_attention_dim"] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
# Handle pipelines with a UNet (i.e SD 1.x, SD2, SDXL).
|
||||
config_path = self.model_path / "unet" / "config.json"
|
||||
if config_path.exists():
|
||||
with open(config_path) as file:
|
||||
unet_conf = json.load(file)
|
||||
if unet_conf["cross_attention_dim"] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif unet_conf["cross_attention_dim"] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif unet_conf["cross_attention_dim"] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
elif unet_conf["cross_attention_dim"] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
# Handle pipelines with a transformer (i.e. SD3).
|
||||
config_path = self.model_path / "transformer" / "config.json"
|
||||
if config_path.exists():
|
||||
with open(config_path) as file:
|
||||
transformer_conf = json.load(file)
|
||||
if transformer_conf["_class_name"] == "SD3Transformer2DModel":
|
||||
return BaseModelType.StableDiffusion3
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
def get_scheduler_prediction_type(self) -> SchedulerPredictionType:
|
||||
with open(self.model_path / "scheduler" / "scheduler_config.json", "r") as file:
|
||||
@@ -755,6 +805,23 @@ class PipelineFolderProbe(FolderProbeBase):
|
||||
else:
|
||||
raise InvalidModelConfigException("Unknown scheduler prediction type: {scheduler_conf['prediction_type']}")
|
||||
|
||||
def get_submodels(self) -> Dict[SubModelType, SubmodelDefinition]:
|
||||
config = ConfigLoader.load_config(self.model_path, config_name="model_index.json")
|
||||
submodels: Dict[SubModelType, SubmodelDefinition] = {}
|
||||
for key, value in config.items():
|
||||
if key.startswith("_") or not (isinstance(value, list) and len(value) == 2):
|
||||
continue
|
||||
model_loader = str(value[1])
|
||||
if model_type := ModelProbe.CLASS2TYPE.get(model_loader):
|
||||
variant_func = ModelProbe.TYPE2VARIANT.get(model_type, None)
|
||||
submodels[SubModelType(key)] = SubmodelDefinition(
|
||||
path_or_prefix=(self.model_path / key).resolve().as_posix(),
|
||||
model_type=model_type,
|
||||
variant=variant_func and variant_func((self.model_path / key).as_posix()),
|
||||
)
|
||||
|
||||
return submodels
|
||||
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
# This only works for pipelines! Any kind of
|
||||
# exception results in our returning the
|
||||
|
||||
@@ -13,6 +13,9 @@ class StarterModelWithoutDependencies(BaseModel):
|
||||
type: ModelType
|
||||
format: Optional[ModelFormat] = None
|
||||
is_installed: bool = False
|
||||
# allows us to track what models a user has installed across name changes within starter models
|
||||
# if you update a starter model name, please add the old one to this list for that starter model
|
||||
previous_names: list[str] = []
|
||||
|
||||
|
||||
class StarterModel(StarterModelWithoutDependencies):
|
||||
@@ -25,22 +28,6 @@ class StarterModelBundles(BaseModel):
|
||||
models: list[StarterModel]
|
||||
|
||||
|
||||
ip_adapter_sd_image_encoder = StarterModel(
|
||||
name="IP Adapter SD1.5 Image Encoder",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="InvokeAI/ip_adapter_sd_image_encoder",
|
||||
description="IP Adapter SD Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
|
||||
ip_adapter_sdxl_image_encoder = StarterModel(
|
||||
name="IP Adapter SDXL Image Encoder",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="InvokeAI/ip_adapter_sdxl_image_encoder",
|
||||
description="IP Adapter SDXL Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
|
||||
cyberrealistic_negative = StarterModel(
|
||||
name="CyberRealistic Negative v3",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
@@ -49,6 +36,32 @@ cyberrealistic_negative = StarterModel(
|
||||
type=ModelType.TextualInversion,
|
||||
)
|
||||
|
||||
# region CLIP Image Encoders
|
||||
ip_adapter_sd_image_encoder = StarterModel(
|
||||
name="IP Adapter SD1.5 Image Encoder",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="InvokeAI/ip_adapter_sd_image_encoder",
|
||||
description="IP Adapter SD Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
ip_adapter_sdxl_image_encoder = StarterModel(
|
||||
name="IP Adapter SDXL Image Encoder",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="InvokeAI/ip_adapter_sdxl_image_encoder",
|
||||
description="IP Adapter SDXL Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
# Note: This model is installed from the same source as the CLIPEmbed model below. The model contains both the image
|
||||
# encoder and the text encoder, but we need separate model entries so that they get loaded correctly.
|
||||
clip_vit_l_image_encoder = StarterModel(
|
||||
name="clip-vit-large-patch14",
|
||||
base=BaseModelType.Any,
|
||||
source="InvokeAI/clip-vit-large-patch14",
|
||||
description="CLIP ViT-L Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region TextEncoders
|
||||
t5_base_encoder = StarterModel(
|
||||
name="t5_base_encoder",
|
||||
@@ -127,6 +140,22 @@ flux_dev = StarterModel(
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
sd35_medium = StarterModel(
|
||||
name="SD3.5 Medium",
|
||||
base=BaseModelType.StableDiffusion3,
|
||||
source="stabilityai/stable-diffusion-3.5-medium",
|
||||
description="Medium SD3.5 Model: ~15GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[],
|
||||
)
|
||||
sd35_large = StarterModel(
|
||||
name="SD3.5 Large",
|
||||
base=BaseModelType.StableDiffusion3,
|
||||
source="stabilityai/stable-diffusion-3.5-large",
|
||||
description="Large SD3.5 Model: ~19G",
|
||||
type=ModelType.Main,
|
||||
dependencies=[],
|
||||
)
|
||||
cyberrealistic_sd1 = StarterModel(
|
||||
name="CyberRealistic v4.1",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
@@ -186,6 +215,16 @@ dreamshaper_sdxl = StarterModel(
|
||||
type=ModelType.Main,
|
||||
dependencies=[sdxl_fp16_vae_fix],
|
||||
)
|
||||
|
||||
archvis_sdxl = StarterModel(
|
||||
name="Architecture (RealVisXL5)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="SG161222/RealVisXL_V5.0",
|
||||
description="A photorealistic model, with architecture among its many use cases",
|
||||
type=ModelType.Main,
|
||||
dependencies=[sdxl_fp16_vae_fix],
|
||||
)
|
||||
|
||||
sdxl_refiner = StarterModel(
|
||||
name="SDXL Refiner",
|
||||
base=BaseModelType.StableDiffusionXLRefiner,
|
||||
@@ -223,36 +262,48 @@ easy_neg_sd1 = StarterModel(
|
||||
# endregion
|
||||
# region IP Adapter
|
||||
ip_adapter_sd1 = StarterModel(
|
||||
name="IP Adapter",
|
||||
name="Standard Reference (IP Adapter)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_sd15/resolve/main/ip-adapter_sd15.safetensors",
|
||||
description="IP-Adapter for SD 1.5 models",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
previous_names=["IP Adapter"],
|
||||
)
|
||||
ip_adapter_plus_sd1 = StarterModel(
|
||||
name="IP Adapter Plus",
|
||||
name="Precise Reference (IP Adapter Plus)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_plus_sd15/resolve/main/ip-adapter-plus_sd15.safetensors",
|
||||
description="Refined IP-Adapter for SD 1.5 models",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
previous_names=["IP Adapter Plus"],
|
||||
)
|
||||
ip_adapter_plus_face_sd1 = StarterModel(
|
||||
name="IP Adapter Plus Face",
|
||||
name="Face Reference (IP Adapter Plus Face)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15/resolve/main/ip-adapter-plus-face_sd15.safetensors",
|
||||
description="Refined IP-Adapter for SD 1.5 models, adapted for faces",
|
||||
description="References images with a higher degree of precision, adapted for faces",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
previous_names=["IP Adapter Plus Face"],
|
||||
)
|
||||
ip_adapter_sdxl = StarterModel(
|
||||
name="IP Adapter SDXL",
|
||||
name="Standard Reference (IP Adapter ViT-H)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h/resolve/main/ip-adapter_sdxl_vit-h.safetensors",
|
||||
description="IP-Adapter for SDXL models",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter-v2/resolve/main/ip_adapter.safetensors",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[clip_vit_l_image_encoder],
|
||||
)
|
||||
# endregion
|
||||
# region ControlNet
|
||||
@@ -271,157 +322,162 @@ qr_code_cnet_sdxl = StarterModel(
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
canny_sd1 = StarterModel(
|
||||
name="canny",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_canny",
|
||||
description="ControlNet weights trained on sd-1.5 with canny conditioning.",
|
||||
description="Uses detected edges in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["canny"],
|
||||
)
|
||||
inpaint_cnet_sd1 = StarterModel(
|
||||
name="inpaint",
|
||||
name="Inpainting",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_inpaint",
|
||||
description="ControlNet weights trained on sd-1.5 with canny conditioning, inpaint version",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["inpaint"],
|
||||
)
|
||||
mlsd_sd1 = StarterModel(
|
||||
name="mlsd",
|
||||
name="Line Drawing (mlsd)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_mlsd",
|
||||
description="ControlNet weights trained on sd-1.5 with canny conditioning, MLSD version",
|
||||
description="Uses straight line detection for controlling the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["mlsd"],
|
||||
)
|
||||
depth_sd1 = StarterModel(
|
||||
name="depth",
|
||||
name="Depth Map",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11f1p_sd15_depth",
|
||||
description="ControlNet weights trained on sd-1.5 with depth conditioning",
|
||||
description="Uses depth information in the image to control the depth in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["depth"],
|
||||
)
|
||||
normal_bae_sd1 = StarterModel(
|
||||
name="normal_bae",
|
||||
name="Lighting Detection (Normals)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_normalbae",
|
||||
description="ControlNet weights trained on sd-1.5 with normalbae image conditioning",
|
||||
description="Uses detected lighting information to guide the lighting of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["normal_bae"],
|
||||
)
|
||||
seg_sd1 = StarterModel(
|
||||
name="seg",
|
||||
name="Segmentation Map",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_seg",
|
||||
description="ControlNet weights trained on sd-1.5 with seg image conditioning",
|
||||
description="Uses segmentation maps to guide the structure of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["seg"],
|
||||
)
|
||||
lineart_sd1 = StarterModel(
|
||||
name="lineart",
|
||||
name="Lineart",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_lineart",
|
||||
description="ControlNet weights trained on sd-1.5 with lineart image conditioning",
|
||||
description="Uses lineart detection to guide the lighting of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["lineart"],
|
||||
)
|
||||
lineart_anime_sd1 = StarterModel(
|
||||
name="lineart_anime",
|
||||
name="Lineart Anime",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15s2_lineart_anime",
|
||||
description="ControlNet weights trained on sd-1.5 with anime image conditioning",
|
||||
description="Uses anime lineart detection to guide the lighting of the composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["lineart_anime"],
|
||||
)
|
||||
openpose_sd1 = StarterModel(
|
||||
name="openpose",
|
||||
name="Pose Detection (openpose)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_openpose",
|
||||
description="ControlNet weights trained on sd-1.5 with openpose image conditioning",
|
||||
description="Uses pose information to control the pose of human characters in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["openpose"],
|
||||
)
|
||||
scribble_sd1 = StarterModel(
|
||||
name="scribble",
|
||||
name="Contour Detection (scribble)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_scribble",
|
||||
description="ControlNet weights trained on sd-1.5 with scribble image conditioning",
|
||||
description="Uses edges, contours, or line art in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["scribble"],
|
||||
)
|
||||
softedge_sd1 = StarterModel(
|
||||
name="softedge",
|
||||
name="Soft Edge Detection (softedge)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11p_sd15_softedge",
|
||||
description="ControlNet weights trained on sd-1.5 with soft edge conditioning",
|
||||
description="Uses a soft edge detection map to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["softedge"],
|
||||
)
|
||||
shuffle_sd1 = StarterModel(
|
||||
name="shuffle",
|
||||
name="Remix (shuffle)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11e_sd15_shuffle",
|
||||
description="ControlNet weights trained on sd-1.5 with shuffle image conditioning",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["shuffle"],
|
||||
)
|
||||
tile_sd1 = StarterModel(
|
||||
name="tile",
|
||||
name="Tile",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11f1e_sd15_tile",
|
||||
description="ControlNet weights trained on sd-1.5 with tiled image conditioning",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
ip2p_sd1 = StarterModel(
|
||||
name="ip2p",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="lllyasviel/control_v11e_sd15_ip2p",
|
||||
description="ControlNet weights trained on sd-1.5 with ip2p conditioning.",
|
||||
description="Uses image data to replicate exact colors/structure in the resulting generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["tile"],
|
||||
)
|
||||
canny_sdxl = StarterModel(
|
||||
name="canny-sdxl",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-canny-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with canny conditioning, by Xinsir.",
|
||||
description="Uses detected edges in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["canny-sdxl"],
|
||||
)
|
||||
depth_sdxl = StarterModel(
|
||||
name="depth-sdxl",
|
||||
name="Depth Map",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="diffusers/controlNet-depth-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with depth conditioning.",
|
||||
description="Uses depth information in the image to control the depth in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["depth-sdxl"],
|
||||
)
|
||||
softedge_sdxl = StarterModel(
|
||||
name="softedge-dexined-sdxl",
|
||||
name="Soft Edge Detection (softedge)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="SargeZT/controlNet-sd-xl-1.0-softedge-dexined",
|
||||
description="ControlNet weights trained on sdxl-1.0 with dexined soft edge preprocessing.",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
depth_zoe_16_sdxl = StarterModel(
|
||||
name="depth-16bit-zoe-sdxl",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="SargeZT/controlNet-sd-xl-1.0-depth-16bit-zoe",
|
||||
description="ControlNet weights trained on sdxl-1.0 with Zoe's preprocessor (16 bits).",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
depth_zoe_32_sdxl = StarterModel(
|
||||
name="depth-zoe-sdxl",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="diffusers/controlNet-zoe-depth-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with Zoe's preprocessor (32 bits).",
|
||||
description="Uses a soft edge detection map to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["softedge-dexined-sdxl"],
|
||||
)
|
||||
openpose_sdxl = StarterModel(
|
||||
name="openpose-sdxl",
|
||||
name="Pose Detection (openpose)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-openpose-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 compatible with the DWPose processor by Xinsir.",
|
||||
description="Uses pose information to control the pose of human characters in the generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["openpose-sdxl", "controlnet-openpose-sdxl"],
|
||||
)
|
||||
scribble_sdxl = StarterModel(
|
||||
name="scribble-sdxl",
|
||||
name="Contour Detection (scribble)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-scribble-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 compatible with various lineart processors and black/white sketches by Xinsir.",
|
||||
description="Uses edges, contours, or line art in the image to control composition.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["scribble-sdxl", "controlnet-scribble-sdxl"],
|
||||
)
|
||||
tile_sdxl = StarterModel(
|
||||
name="tile-sdxl",
|
||||
name="Tile",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="xinsir/controlNet-tile-sdxl-1.0",
|
||||
description="ControlNet weights trained on sdxl-1.0 with tiled image conditioning",
|
||||
description="Uses image data to replicate exact colors/structure in the resulting generation.",
|
||||
type=ModelType.ControlNet,
|
||||
previous_names=["tile-sdxl"],
|
||||
)
|
||||
union_cnet_sdxl = StarterModel(
|
||||
name="Multi-Guidance Detection (Union Pro)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="InvokeAI/Xinsir-SDXL_Controlnet_Union",
|
||||
description="A unified ControlNet for SDXL model that supports 10+ control types",
|
||||
type=ModelType.ControlNet,
|
||||
)
|
||||
union_cnet_flux = StarterModel(
|
||||
@@ -434,60 +490,52 @@ union_cnet_flux = StarterModel(
|
||||
# endregion
|
||||
# region T2I Adapter
|
||||
t2i_canny_sd1 = StarterModel(
|
||||
name="canny-sd15",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_canny_sd15v2",
|
||||
description="T2I Adapter weights trained on sd-1.5 with canny conditioning.",
|
||||
description="Uses detected edges in the image to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["canny-sd15"],
|
||||
)
|
||||
t2i_sketch_sd1 = StarterModel(
|
||||
name="sketch-sd15",
|
||||
name="Sketch",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_sketch_sd15v2",
|
||||
description="T2I Adapter weights trained on sd-1.5 with sketch conditioning.",
|
||||
description="Uses a sketch to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["sketch-sd15"],
|
||||
)
|
||||
t2i_depth_sd1 = StarterModel(
|
||||
name="depth-sd15",
|
||||
name="Depth Map",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_depth_sd15v2",
|
||||
description="T2I Adapter weights trained on sd-1.5 with depth conditioning.",
|
||||
type=ModelType.T2IAdapter,
|
||||
)
|
||||
t2i_zoe_depth_sd1 = StarterModel(
|
||||
name="zoedepth-sd15",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="TencentARC/t2iadapter_zoedepth_sd15v1",
|
||||
description="T2I Adapter weights trained on sd-1.5 with zoe depth conditioning.",
|
||||
description="Uses depth information in the image to control the depth in the generation.",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["depth-sd15"],
|
||||
)
|
||||
t2i_canny_sdxl = StarterModel(
|
||||
name="canny-sdxl",
|
||||
name="Hard Edge Detection (canny)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-canny-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with canny conditioning.",
|
||||
type=ModelType.T2IAdapter,
|
||||
)
|
||||
t2i_zoe_depth_sdxl = StarterModel(
|
||||
name="zoedepth-sdxl",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-depth-zoe-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with zoe depth conditioning.",
|
||||
description="Uses detected edges in the image to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["canny-sdxl"],
|
||||
)
|
||||
t2i_lineart_sdxl = StarterModel(
|
||||
name="lineart-sdxl",
|
||||
name="Lineart",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-lineart-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with lineart conditioning.",
|
||||
description="Uses lineart detection to guide the lighting of the composition.",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["lineart-sdxl"],
|
||||
)
|
||||
t2i_sketch_sdxl = StarterModel(
|
||||
name="sketch-sdxl",
|
||||
name="Sketch",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="TencentARC/t2i-adapter-sketch-sdxl-1.0",
|
||||
description="T2I Adapter weights trained on sdxl-1.0 with sketch conditioning.",
|
||||
description="Uses a sketch to control composition",
|
||||
type=ModelType.T2IAdapter,
|
||||
previous_names=["sketch-sdxl"],
|
||||
)
|
||||
# endregion
|
||||
# region SpandrelImageToImage
|
||||
@@ -537,6 +585,8 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
flux_dev_quantized,
|
||||
flux_schnell,
|
||||
flux_dev,
|
||||
sd35_medium,
|
||||
sd35_large,
|
||||
cyberrealistic_sd1,
|
||||
rev_animated_sd1,
|
||||
dreamshaper_8_sd1,
|
||||
@@ -545,6 +595,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
deliberate_inpainting_sd1,
|
||||
juggernaut_sdxl,
|
||||
dreamshaper_sdxl,
|
||||
archvis_sdxl,
|
||||
sdxl_refiner,
|
||||
sdxl_fp16_vae_fix,
|
||||
flux_vae,
|
||||
@@ -555,6 +606,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
ip_adapter_plus_sd1,
|
||||
ip_adapter_plus_face_sd1,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_flux,
|
||||
qr_code_cnet_sd1,
|
||||
qr_code_cnet_sdxl,
|
||||
canny_sd1,
|
||||
@@ -570,22 +622,18 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
softedge_sd1,
|
||||
shuffle_sd1,
|
||||
tile_sd1,
|
||||
ip2p_sd1,
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
depth_zoe_16_sdxl,
|
||||
depth_zoe_32_sdxl,
|
||||
openpose_sdxl,
|
||||
scribble_sdxl,
|
||||
tile_sdxl,
|
||||
union_cnet_sdxl,
|
||||
union_cnet_flux,
|
||||
t2i_canny_sd1,
|
||||
t2i_sketch_sd1,
|
||||
t2i_depth_sd1,
|
||||
t2i_zoe_depth_sd1,
|
||||
t2i_canny_sdxl,
|
||||
t2i_zoe_depth_sdxl,
|
||||
t2i_lineart_sdxl,
|
||||
t2i_sketch_sdxl,
|
||||
realesrgan_x4,
|
||||
@@ -616,7 +664,6 @@ sd1_bundle: list[StarterModel] = [
|
||||
softedge_sd1,
|
||||
shuffle_sd1,
|
||||
tile_sd1,
|
||||
ip2p_sd1,
|
||||
swinir,
|
||||
]
|
||||
|
||||
@@ -627,8 +674,6 @@ sdxl_bundle: list[StarterModel] = [
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
depth_zoe_16_sdxl,
|
||||
depth_zoe_32_sdxl,
|
||||
openpose_sdxl,
|
||||
scribble_sdxl,
|
||||
tile_sdxl,
|
||||
@@ -642,6 +687,7 @@ flux_bundle: list[StarterModel] = [
|
||||
t5_8b_quantized_encoder,
|
||||
clip_l_encoder,
|
||||
union_cnet_flux,
|
||||
ip_adapter_flux,
|
||||
]
|
||||
|
||||
STARTER_BUNDLES: dict[str, list[StarterModel]] = {
|
||||
|
||||
@@ -8,6 +8,7 @@ import safetensors
|
||||
import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
|
||||
from invokeai.backend.model_manager.config import ClipVariantType
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
|
||||
|
||||
@@ -165,3 +166,25 @@ def convert_bundle_to_flux_transformer_checkpoint(
|
||||
del transformer_state_dict[k]
|
||||
|
||||
return original_state_dict
|
||||
|
||||
|
||||
def get_clip_variant_type(location: str) -> Optional[ClipVariantType]:
|
||||
try:
|
||||
path = Path(location)
|
||||
config_path = path / "config.json"
|
||||
if not config_path.exists():
|
||||
config_path = path / "text_encoder" / "config.json"
|
||||
if not config_path.exists():
|
||||
return ClipVariantType.L
|
||||
with open(config_path) as file:
|
||||
clip_conf = json.load(file)
|
||||
hidden_size = clip_conf.get("hidden_size", -1)
|
||||
match hidden_size:
|
||||
case 1280:
|
||||
return ClipVariantType.G
|
||||
case 768:
|
||||
return ClipVariantType.L
|
||||
case _:
|
||||
return ClipVariantType.L
|
||||
except Exception:
|
||||
return ClipVariantType.L
|
||||
|
||||
@@ -85,6 +85,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
"""Select the proper variant files from a list of HuggingFace repo_id paths."""
|
||||
result: set[Path] = set()
|
||||
subfolder_weights: dict[Path, list[SubfolderCandidate]] = {}
|
||||
safetensors_detected = False
|
||||
for path in files:
|
||||
if path.suffix in [".onnx", ".pb", ".onnx_data"]:
|
||||
if variant == ModelRepoVariant.ONNX:
|
||||
@@ -119,19 +120,27 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
# We prefer safetensors over other file formats and an exact variant match. We'll score each file based on
|
||||
# variant and format and select the best one.
|
||||
|
||||
if safetensors_detected and path.suffix == ".bin":
|
||||
continue
|
||||
|
||||
parent = path.parent
|
||||
score = 0
|
||||
|
||||
if path.suffix == ".safetensors":
|
||||
safetensors_detected = True
|
||||
if parent in subfolder_weights:
|
||||
subfolder_weights[parent] = [sfc for sfc in subfolder_weights[parent] if sfc.path.suffix != ".bin"]
|
||||
score += 1
|
||||
|
||||
candidate_variant_label = path.suffixes[0] if len(path.suffixes) == 2 else None
|
||||
|
||||
# Some special handling is needed here if there is not an exact match and if we cannot infer the variant
|
||||
# from the file name. In this case, we only give this file a point if the requested variant is FP32 or DEFAULT.
|
||||
if candidate_variant_label == f".{variant}" or (
|
||||
not candidate_variant_label and variant in [ModelRepoVariant.FP32, ModelRepoVariant.Default]
|
||||
):
|
||||
if (
|
||||
variant is not ModelRepoVariant.Default
|
||||
and candidate_variant_label
|
||||
and candidate_variant_label.startswith(f".{variant.value}")
|
||||
) or (not candidate_variant_label and variant in [ModelRepoVariant.FP32, ModelRepoVariant.Default]):
|
||||
score += 1
|
||||
|
||||
if parent not in subfolder_weights:
|
||||
@@ -146,7 +155,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
# Check if at least one of the files has the explicit fp16 variant.
|
||||
at_least_one_fp16 = False
|
||||
for candidate in candidate_list:
|
||||
if len(candidate.path.suffixes) == 2 and candidate.path.suffixes[0] == ".fp16":
|
||||
if len(candidate.path.suffixes) == 2 and candidate.path.suffixes[0].startswith(".fp16"):
|
||||
at_least_one_fp16 = True
|
||||
break
|
||||
|
||||
@@ -162,7 +171,16 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
# candidate.
|
||||
highest_score_candidate = max(candidate_list, key=lambda candidate: candidate.score)
|
||||
if highest_score_candidate:
|
||||
result.add(highest_score_candidate.path)
|
||||
pattern = r"^(.*?)-\d+-of-\d+(\.\w+)$"
|
||||
match = re.match(pattern, highest_score_candidate.path.as_posix())
|
||||
if match:
|
||||
for candidate in candidate_list:
|
||||
if candidate.path.as_posix().startswith(match.group(1)) and candidate.path.as_posix().endswith(
|
||||
match.group(2)
|
||||
):
|
||||
result.add(candidate.path)
|
||||
else:
|
||||
result.add(highest_score_candidate.path)
|
||||
|
||||
# If one of the architecture-related variants was specified and no files matched other than
|
||||
# config and text files then we return an empty list
|
||||
|
||||
@@ -54,6 +54,11 @@ GGML_TENSOR_OP_TABLE = {
|
||||
torch.ops.aten.mul.Tensor: dequantize_and_run, # pyright: ignore
|
||||
}
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
GGML_TENSOR_OP_TABLE.update(
|
||||
{torch.ops.aten.linear.default: dequantize_and_run} # pyright: ignore
|
||||
)
|
||||
|
||||
|
||||
class GGMLTensor(torch.Tensor):
|
||||
"""A torch.Tensor sub-class holding a quantized GGML tensor.
|
||||
|
||||
0
invokeai/backend/sd3/__init__.py
Normal file
0
invokeai/backend/sd3/__init__.py
Normal file
0
invokeai/backend/sd3/extensions/__init__.py
Normal file
0
invokeai/backend/sd3/extensions/__init__.py
Normal file
58
invokeai/backend/sd3/extensions/inpaint_extension.py
Normal file
58
invokeai/backend/sd3/extensions/inpaint_extension.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import torch
|
||||
|
||||
|
||||
class InpaintExtension:
|
||||
"""A class for managing inpainting with SD3."""
|
||||
|
||||
def __init__(self, init_latents: torch.Tensor, inpaint_mask: torch.Tensor, noise: torch.Tensor):
|
||||
"""Initialize InpaintExtension.
|
||||
|
||||
Args:
|
||||
init_latents (torch.Tensor): The initial latents (i.e. un-noised at timestep 0).
|
||||
inpaint_mask (torch.Tensor): A mask specifying which elements to inpaint. Range [0, 1]. Values of 1 will be
|
||||
re-generated. Values of 0 will remain unchanged. Values between 0 and 1 can be used to blend the
|
||||
inpainted region with the background.
|
||||
noise (torch.Tensor): The noise tensor used to noise the init_latents.
|
||||
"""
|
||||
assert init_latents.dim() == inpaint_mask.dim() == noise.dim() == 4
|
||||
assert init_latents.shape[-2:] == inpaint_mask.shape[-2:] == noise.shape[-2:]
|
||||
|
||||
self._init_latents = init_latents
|
||||
self._inpaint_mask = inpaint_mask
|
||||
self._noise = noise
|
||||
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
|
||||
return mask
|
||||
|
||||
def merge_intermediate_latents_with_init_latents(
|
||||
self, intermediate_latents: torch.Tensor, t_prev: float
|
||||
) -> torch.Tensor:
|
||||
"""Merge the intermediate latents with the initial latents for the current timestep using the inpaint mask. I.e.
|
||||
update the intermediate latents to keep the regions that are not being inpainted on the correct noise
|
||||
trajectory.
|
||||
|
||||
This function should be called after each denoising step.
|
||||
"""
|
||||
|
||||
mask = self._apply_mask_gradient_adjustment(t_prev)
|
||||
|
||||
# Noise the init latents for the current timestep.
|
||||
noised_init_latents = self._noise * t_prev + (1.0 - t_prev) * self._init_latents
|
||||
|
||||
# Merge the intermediate latents with the noised_init_latents using the inpaint_mask.
|
||||
return intermediate_latents * mask + noised_init_latents * (1.0 - mask)
|
||||
@@ -499,6 +499,22 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
for idx, value in enumerate(single_t2i_adapter_data.adapter_state):
|
||||
accum_adapter_state[idx] += value * t2i_adapter_weight
|
||||
|
||||
# Hack: force compatibility with irregular resolutions by padding the feature map with zeros
|
||||
for idx, tensor in enumerate(accum_adapter_state):
|
||||
# The tensor size is supposed to be some integer downscale factor of the latents size.
|
||||
# Internally, the unet will pad the latents before downscaling between levels when it is no longer divisible by its downscale factor.
|
||||
# If the latent size does not scale down evenly, we need to pad the tensor so that it matches the the downscaled padded latents later on.
|
||||
scale_factor = latents.size()[-1] // tensor.size()[-1]
|
||||
required_padding_width = math.ceil(latents.size()[-1] / scale_factor) - tensor.size()[-1]
|
||||
required_padding_height = math.ceil(latents.size()[-2] / scale_factor) - tensor.size()[-2]
|
||||
tensor = torch.nn.functional.pad(
|
||||
tensor,
|
||||
(0, required_padding_width, 0, required_padding_height, 0, 0, 0, 0),
|
||||
mode="constant",
|
||||
value=0,
|
||||
)
|
||||
accum_adapter_state[idx] = tensor
|
||||
|
||||
down_intrablock_additional_residuals = accum_adapter_state
|
||||
|
||||
# Handle inpainting models.
|
||||
|
||||
@@ -49,9 +49,32 @@ class FLUXConditioningInfo:
|
||||
return self
|
||||
|
||||
|
||||
@dataclass
|
||||
class SD3ConditioningInfo:
|
||||
clip_l_pooled_embeds: torch.Tensor
|
||||
clip_l_embeds: torch.Tensor
|
||||
clip_g_pooled_embeds: torch.Tensor
|
||||
clip_g_embeds: torch.Tensor
|
||||
t5_embeds: torch.Tensor | None
|
||||
|
||||
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
|
||||
self.clip_l_pooled_embeds = self.clip_l_pooled_embeds.to(device=device, dtype=dtype)
|
||||
self.clip_l_embeds = self.clip_l_embeds.to(device=device, dtype=dtype)
|
||||
self.clip_g_pooled_embeds = self.clip_g_pooled_embeds.to(device=device, dtype=dtype)
|
||||
self.clip_g_embeds = self.clip_g_embeds.to(device=device, dtype=dtype)
|
||||
if self.t5_embeds is not None:
|
||||
self.t5_embeds = self.t5_embeds.to(device=device, dtype=dtype)
|
||||
return self
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConditioningFieldData:
|
||||
conditionings: List[BasicConditioningInfo] | List[SDXLConditioningInfo] | List[FLUXConditioningInfo]
|
||||
conditionings: (
|
||||
List[BasicConditioningInfo]
|
||||
| List[SDXLConditioningInfo]
|
||||
| List[FLUXConditioningInfo]
|
||||
| List[SD3ConditioningInfo]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -33,7 +33,7 @@ class PreviewExt(ExtensionBase):
|
||||
def initial_preview(self, ctx: DenoiseContext):
|
||||
self.callback(
|
||||
PipelineIntermediateState(
|
||||
step=-1,
|
||||
step=0,
|
||||
order=ctx.scheduler.order,
|
||||
total_steps=len(ctx.inputs.timesteps),
|
||||
timestep=int(ctx.scheduler.config.num_train_timesteps), # TODO: is there any code which uses it?
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
import diffusers
|
||||
import torch
|
||||
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
||||
from diffusers.loaders import FromOriginalControlNetMixin
|
||||
from diffusers.loaders.single_file_model import FromOriginalModelMixin
|
||||
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
|
||||
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
|
||||
from diffusers.models.embeddings import (
|
||||
@@ -32,7 +32,9 @@ from invokeai.backend.util.logging import InvokeAILogger
|
||||
logger = InvokeAILogger.get_logger(__name__)
|
||||
|
||||
|
||||
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
|
||||
# NOTE(ryand): I'm not the origina author of this code, but for future reference, it appears that this class was copied
|
||||
# from diffusers in order to add support for the encoder_attention_mask argument.
|
||||
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
|
||||
"""
|
||||
A ControlNet model.
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ const config: KnipConfig = {
|
||||
'src/services/api/schema.ts',
|
||||
'src/features/nodes/types/v1/**',
|
||||
'src/features/nodes/types/v2/**',
|
||||
'src/features/parameters/types/parameterSchemas.ts',
|
||||
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
|
||||
@@ -52,13 +52,13 @@
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.4.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^1.4.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.0.3",
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.42",
|
||||
"@invoke-ai/ui-library": "^0.0.43",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
@@ -114,8 +114,7 @@
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"ts-toolbelt": "^9.6.0"
|
||||
"react-dom": "^18.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
@@ -149,8 +148,8 @@
|
||||
"prettier": "^3.3.3",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"storybook": "^8.3.4",
|
||||
"ts-toolbelt": "^9.6.0",
|
||||
"tsafe": "^1.7.5",
|
||||
"type-fest": "^4.26.1",
|
||||
"typescript": "^5.6.2",
|
||||
"vite": "^5.4.8",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.2",
|
||||
|
||||
115
invokeai/frontend/web/pnpm-lock.yaml
generated
115
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -5,27 +5,27 @@ settings:
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
dependencies:
|
||||
'@atlaskit/pragmatic-drag-and-drop':
|
||||
specifier: ^1.4.0
|
||||
version: 1.4.0
|
||||
'@atlaskit/pragmatic-drag-and-drop-auto-scroll':
|
||||
specifier: ^1.4.0
|
||||
version: 1.4.0
|
||||
'@atlaskit/pragmatic-drag-and-drop-hitbox':
|
||||
specifier: ^1.0.3
|
||||
version: 1.0.3
|
||||
'@dagrejs/dagre':
|
||||
specifier: ^1.1.4
|
||||
version: 1.1.4
|
||||
'@dagrejs/graphlib':
|
||||
specifier: ^2.2.4
|
||||
version: 2.2.4
|
||||
'@dnd-kit/core':
|
||||
specifier: ^6.1.0
|
||||
version: 6.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@dnd-kit/sortable':
|
||||
specifier: ^8.0.0
|
||||
version: 8.0.0(@dnd-kit/core@6.1.0)(react@18.3.1)
|
||||
'@dnd-kit/utilities':
|
||||
specifier: ^3.2.2
|
||||
version: 3.2.2(react@18.3.1)
|
||||
'@fontsource-variable/inter':
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.42
|
||||
version: 0.0.42(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.43
|
||||
version: 0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -277,12 +277,12 @@ devDependencies:
|
||||
storybook:
|
||||
specifier: ^8.3.4
|
||||
version: 8.3.4
|
||||
ts-toolbelt:
|
||||
specifier: ^9.6.0
|
||||
version: 9.6.0
|
||||
tsafe:
|
||||
specifier: ^1.7.5
|
||||
version: 1.7.5
|
||||
type-fest:
|
||||
specifier: ^4.26.1
|
||||
version: 4.26.1
|
||||
typescript:
|
||||
specifier: ^5.6.2
|
||||
version: 5.6.2
|
||||
@@ -319,6 +319,28 @@ packages:
|
||||
'@jridgewell/trace-mapping': 0.3.25
|
||||
dev: true
|
||||
|
||||
/@atlaskit/pragmatic-drag-and-drop-auto-scroll@1.4.0:
|
||||
resolution: {integrity: sha512-5GoikoTSW13UX76F9TDeWB8x3jbbGlp/Y+3aRkHe1MOBMkrWkwNpJ42MIVhhX/6NSeaZiPumP0KbGJVs2tOWSQ==}
|
||||
dependencies:
|
||||
'@atlaskit/pragmatic-drag-and-drop': 1.4.0
|
||||
'@babel/runtime': 7.25.7
|
||||
dev: false
|
||||
|
||||
/@atlaskit/pragmatic-drag-and-drop-hitbox@1.0.3:
|
||||
resolution: {integrity: sha512-/Sbu/HqN2VGLYBhnsG7SbRNg98XKkbF6L7XDdBi+izRybfaK1FeMfodPpm/xnBHPJzwYMdkE0qtLyv6afhgMUA==}
|
||||
dependencies:
|
||||
'@atlaskit/pragmatic-drag-and-drop': 1.4.0
|
||||
'@babel/runtime': 7.25.7
|
||||
dev: false
|
||||
|
||||
/@atlaskit/pragmatic-drag-and-drop@1.4.0:
|
||||
resolution: {integrity: sha512-qRY3PTJIcxfl/QB8Gwswz+BRvlmgAC5pB+J2hL6dkIxgqAgVwOhAamMUKsrOcFU/axG2Q7RbNs1xfoLKDuhoPg==}
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.7
|
||||
bind-event-listener: 3.0.0
|
||||
raf-schd: 4.0.3
|
||||
dev: false
|
||||
|
||||
/@babel/code-frame@7.25.7:
|
||||
resolution: {integrity: sha512-0xZJFNE5XMpENsgfHYTw8FbX4kv53mFLn2i3XPoq69LyhYSCBJtitaHx9QnsVTrsogI4Z3+HtEfZ2/GFPOtf5g==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
@@ -980,49 +1002,6 @@ packages:
|
||||
engines: {node: '>17.0.0'}
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/accessibility@3.1.0(react@18.3.1):
|
||||
resolution: {integrity: sha512-ea7IkhKvlJUv9iSHJOnxinBcoOI3ppGnnL+VDJ75O45Nss6HtZd8IdN8touXPDtASfeI2T2LImb8VOZcL47wjQ==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/core@6.1.0(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-J3cQBClB4TVxwGo3KEjssGEXNJqGVWx17aRTZ1ob0FliR5IjYgTxl5YJbKTzA6IzrtelotH19v6y7uoIRUZPSg==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
react-dom: '>=16.8.0'
|
||||
dependencies:
|
||||
'@dnd-kit/accessibility': 3.1.0(react@18.3.1)
|
||||
'@dnd-kit/utilities': 3.2.2(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/sortable@8.0.0(@dnd-kit/core@6.1.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==}
|
||||
peerDependencies:
|
||||
'@dnd-kit/core': ^6.1.0
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
'@dnd-kit/core': 6.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@dnd-kit/utilities': 3.2.2(react@18.3.1)
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/utilities@3.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@emotion/babel-plugin@11.12.0:
|
||||
resolution: {integrity: sha512-y2WQb+oP8Jqvvclh8Q55gLUyb7UFvgv7eJfsj7td5TToBrIUtPay2kMrZi4xjq9qw2vD0ZR5fSho0yqoFgX7Rw==}
|
||||
dependencies:
|
||||
@@ -1696,20 +1675,20 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.42(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-OuDXRipBO5mu+Nv4qN8cd8MiwiGBdq6h4PirVgPI9/ltbdcIzePgUJ0dJns26lflHSTRWW38I16wl4YTw3mNWA==}
|
||||
/@invoke-ai/ui-library@0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-t3fPYyks07ue3dEBPJuTHbeDLnDckDCOrtvc07mMDbLOnlPEZ0StaeiNGH+oO8qLzAuMAlSTdswgHfzTc2MmPw==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.1.0
|
||||
@@ -4313,6 +4292,10 @@ packages:
|
||||
open: 8.4.2
|
||||
dev: true
|
||||
|
||||
/bind-event-listener@3.0.0:
|
||||
resolution: {integrity: sha512-PJvH288AWQhKs2v9zyfYdPzlPqf5bXbGMmhmUIY9x4dAUGIWgomO771oBQNwJnMQSnUIXhKu6sgzpBRXTlvb8Q==}
|
||||
dev: false
|
||||
|
||||
/bl@4.1.0:
|
||||
resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==}
|
||||
dependencies:
|
||||
@@ -7557,6 +7540,10 @@ packages:
|
||||
resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
|
||||
dev: true
|
||||
|
||||
/raf-schd@4.0.3:
|
||||
resolution: {integrity: sha512-tQkJl2GRWh83ui2DiPTJz9wEiMN20syf+5oKfB03yYP7ioZcJwsIK8FjrtLwH1m7C7e+Tt2yYBlrOpdT+dyeIQ==}
|
||||
dev: false
|
||||
|
||||
/raf-throttle@2.0.6:
|
||||
resolution: {integrity: sha512-C7W6hy78A+vMmk5a/B6C5szjBHrUzWJkVyakjKCK59Uy2CcA7KhO1JUvvH32IXYFIcyJ3FMKP3ZzCc2/71I6Vg==}
|
||||
dev: false
|
||||
@@ -8830,10 +8817,6 @@ packages:
|
||||
resolution: {integrity: sha512-tLJxacIQUM82IR7JO1UUkKlYuUTmoY9HBJAmNWFzheSlDS5SPMcNIepejHJa4BpPQLAcbRhRf3GDJzyj6rbKvA==}
|
||||
dev: false
|
||||
|
||||
/ts-toolbelt@9.6.0:
|
||||
resolution: {integrity: sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==}
|
||||
dev: true
|
||||
|
||||
/tsafe@1.7.5:
|
||||
resolution: {integrity: sha512-tbNyyBSbwfbilFfiuXkSOj82a6++ovgANwcoqBAcO9/REPoZMEQoE8kWPeO0dy5A2D/2Lajr8Ohue5T0ifIvLQ==}
|
||||
dev: true
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 895 KiB |
@@ -93,7 +93,10 @@
|
||||
"placeholderSelectAModel": "Modell auswählen",
|
||||
"reset": "Zurücksetzen",
|
||||
"none": "Keine",
|
||||
"new": "Neu"
|
||||
"new": "Neu",
|
||||
"ok": "OK",
|
||||
"close": "Schließen",
|
||||
"clipboard": "Zwischenablage"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -156,7 +159,11 @@
|
||||
"displayBoardSearch": "Board durchsuchen",
|
||||
"displaySearch": "Bild suchen",
|
||||
"go": "Los",
|
||||
"jump": "Springen"
|
||||
"jump": "Springen",
|
||||
"assetsTab": "Dateien, die Sie zur Verwendung in Ihren Projekten hochgeladen haben.",
|
||||
"imagesTab": "Bilder, die Sie in Invoke erstellt und gespeichert haben.",
|
||||
"boardsSettings": "Ordnereinstellungen",
|
||||
"imagesSettings": "Galeriebildereinstellungen"
|
||||
},
|
||||
"hotkeys": {
|
||||
"noHotkeysFound": "Kein Hotkey gefunden",
|
||||
@@ -267,6 +274,18 @@
|
||||
"applyFilter": {
|
||||
"title": "Filter anwenden",
|
||||
"desc": "Wende den ausstehenden Filter auf die ausgewählte Ebene an."
|
||||
},
|
||||
"cancelFilter": {
|
||||
"title": "Filter abbrechen",
|
||||
"desc": "Den ausstehenden Filter abbrechen."
|
||||
},
|
||||
"applyTransform": {
|
||||
"desc": "Die ausstehende Transformation auf die ausgewählte Ebene anwenden.",
|
||||
"title": "Transformation anwenden"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "Transformation abbrechen",
|
||||
"desc": "Die ausstehende Transformation abbrechen."
|
||||
}
|
||||
},
|
||||
"viewer": {
|
||||
@@ -517,14 +536,12 @@
|
||||
"addModels": "Model hinzufügen",
|
||||
"deleteModelImage": "Lösche Model Bild",
|
||||
"huggingFaceRepoID": "HuggingFace Repo ID",
|
||||
"hfToken": "HuggingFace Schlüssel",
|
||||
"huggingFacePlaceholder": "besitzer/model-name",
|
||||
"modelSettings": "Modelleinstellungen",
|
||||
"typePhraseHere": "Phrase hier eingeben",
|
||||
"spandrelImageToImage": "Bild zu Bild (Spandrel)",
|
||||
"starterModels": "Einstiegsmodelle",
|
||||
"t5Encoder": "T5-Kodierer",
|
||||
"useDefaultSettings": "Standardeinstellungen verwenden",
|
||||
"uploadImage": "Bild hochladen",
|
||||
"urlOrLocalPath": "URL oder lokaler Pfad",
|
||||
"install": "Installieren",
|
||||
@@ -563,7 +580,18 @@
|
||||
"scanResults": "Ergebnisse des Scans",
|
||||
"urlOrLocalPathHelper": "URLs sollten auf eine einzelne Datei deuten. Lokale Pfade können zusätzlich auch auf einen Ordner für ein einzelnes Diffusers-Modell hinweisen.",
|
||||
"inplaceInstallDesc": "Installieren Sie Modelle, ohne die Dateien zu kopieren. Wenn Sie das Modell verwenden, wird es direkt von seinem Speicherort geladen. Wenn deaktiviert, werden die Dateien während der Installation in das von Invoke verwaltete Modellverzeichnis kopiert.",
|
||||
"scanFolderHelper": "Der Ordner wird rekursiv nach Modellen durchsucht. Dies kann bei sehr großen Ordnern etwas dauern."
|
||||
"scanFolderHelper": "Der Ordner wird rekursiv nach Modellen durchsucht. Dies kann bei sehr großen Ordnern etwas dauern.",
|
||||
"includesNModels": "Enthält {{n}} Modelle und deren Abhängigkeiten",
|
||||
"starterBundles": "Starterpakete",
|
||||
"installingXModels_one": "{{count}} Modell wird installiert",
|
||||
"installingXModels_other": "{{count}} Modelle werden installiert",
|
||||
"skippingXDuplicates_one": ", überspringe {{count}} Duplikat",
|
||||
"skippingXDuplicates_other": ", überspringe {{count}} Duplikate",
|
||||
"installingModel": "Modell wird installiert",
|
||||
"loraTriggerPhrases": "LoRA-Auslösephrasen",
|
||||
"installingBundle": "Bündel wird installiert",
|
||||
"triggerPhrases": "Auslösephrasen",
|
||||
"mainModelTriggerPhrases": "Hauptmodell-Auslösephrasen"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -649,10 +677,41 @@
|
||||
"toast": {
|
||||
"uploadFailed": "Hochladen fehlgeschlagen",
|
||||
"imageCopied": "Bild kopiert",
|
||||
"parametersNotSet": "Parameter nicht festgelegt",
|
||||
"parametersNotSet": "Parameter nicht zurückgerufen",
|
||||
"addedToBoard": "Dem Board hinzugefügt",
|
||||
"loadedWithWarnings": "Workflow mit Warnungen geladen",
|
||||
"imageSaved": "Bild gespeichert"
|
||||
"imageSaved": "Bild gespeichert",
|
||||
"linkCopied": "Link kopiert",
|
||||
"problemCopyingLayer": "Ebene kann nicht kopiert werden",
|
||||
"problemSavingLayer": "Ebene kann nicht gespeichert werden",
|
||||
"parameterSetDesc": "{{parameter}} zurückgerufen",
|
||||
"imageUploaded": "Bild hochgeladen",
|
||||
"problemCopyingImage": "Bild kann nicht kopiert werden",
|
||||
"parameterNotSetDesc": "{{parameter}} kann nicht zurückgerufen werden",
|
||||
"prunedQueue": "Warteschlange bereinigt",
|
||||
"modelAddedSimple": "Modell zur Warteschlange hinzugefügt",
|
||||
"parametersSet": "Parameter zurückgerufen",
|
||||
"imageNotLoadedDesc": "Bild konnte nicht gefunden werden",
|
||||
"setControlImage": "Als Kontrollbild festlegen",
|
||||
"sentToUpscale": "An Vergrößerung gesendet",
|
||||
"parameterNotSetDescWithMessage": "{{parameter}} kann nicht zurückgerufen werden: {{message}}",
|
||||
"unableToLoadImageMetadata": "Bildmetadaten können nicht geladen werden",
|
||||
"unableToLoadImage": "Bild kann nicht geladen werden",
|
||||
"serverError": "Serverfehler",
|
||||
"parameterNotSet": "Parameter nicht zurückgerufen",
|
||||
"sessionRef": "Sitzung: {{sessionId}}",
|
||||
"problemDownloadingImage": "Bild kann nicht heruntergeladen werden",
|
||||
"parameters": "Parameter",
|
||||
"parameterSet": "Parameter zurückgerufen",
|
||||
"importFailed": "Import fehlgeschlagen",
|
||||
"importSuccessful": "Import erfolgreich",
|
||||
"setNodeField": "Als Knotenfeld festlegen",
|
||||
"somethingWentWrong": "Etwas ist schief gelaufen",
|
||||
"workflowLoaded": "Arbeitsablauf geladen",
|
||||
"workflowDeleted": "Arbeitsablauf gelöscht",
|
||||
"errorCopied": "Fehler kopiert",
|
||||
"layerCopiedToClipboard": "Ebene in die Zwischenablage kopiert",
|
||||
"sentToCanvas": "An Leinwand gesendet"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Bild hochladen",
|
||||
@@ -667,7 +726,8 @@
|
||||
"about": "Über",
|
||||
"submitSupportTicket": "Support-Ticket senden",
|
||||
"toggleRightPanel": "Rechtes Bedienfeld umschalten (G)",
|
||||
"toggleLeftPanel": "Linkes Bedienfeld umschalten (T)"
|
||||
"toggleLeftPanel": "Linkes Bedienfeld umschalten (T)",
|
||||
"uploadImages": "Bild(er) hochladen"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Board automatisch erstellen",
|
||||
@@ -702,13 +762,14 @@
|
||||
"shared": "Geteilte Ordner",
|
||||
"archiveBoard": "Ordner archivieren",
|
||||
"archived": "Archiviert",
|
||||
"noBoards": "Kein {boardType}} Ordner",
|
||||
"noBoards": "Kein {{boardType}} Ordner",
|
||||
"hideBoards": "Ordner verstecken",
|
||||
"viewBoards": "Ordner ansehen",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt.",
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand."
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -795,7 +856,6 @@
|
||||
"width": "Breite",
|
||||
"createdBy": "Erstellt von",
|
||||
"steps": "Schritte",
|
||||
"seamless": "Nahtlos",
|
||||
"positivePrompt": "Positiver Prompt",
|
||||
"generationMode": "Generierungsmodus",
|
||||
"Threshold": "Rauschen-Schwelle",
|
||||
@@ -811,7 +871,10 @@
|
||||
"parameterSet": "Parameter {{parameter}} setzen",
|
||||
"recallParameter": "{{label}} Abrufen",
|
||||
"parsingFailed": "Parsing Fehlgeschlagen",
|
||||
"canvasV2Metadata": "Leinwand"
|
||||
"canvasV2Metadata": "Leinwand",
|
||||
"guidance": "Führung",
|
||||
"seamlessXAxis": "Nahtlose X Achse",
|
||||
"seamlessYAxis": "Nahtlose Y Achse"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -1137,7 +1200,21 @@
|
||||
"workflowNotes": "Notizen",
|
||||
"workflowTags": "Tags",
|
||||
"workflowVersion": "Version",
|
||||
"saveToGallery": "In Galerie speichern"
|
||||
"saveToGallery": "In Galerie speichern",
|
||||
"noWorkflows": "Keine Arbeitsabläufe",
|
||||
"noMatchingWorkflows": "Keine passenden Arbeitsabläufe",
|
||||
"unknownErrorValidatingWorkflow": "Unbekannter Fehler beim Validieren des Arbeitsablaufes",
|
||||
"inputFieldTypeParseError": "Typ des Eingabefelds {{node}}.{{field}} kann nicht analysiert werden ({{message}})",
|
||||
"workflowSettings": "Arbeitsablauf Editor Einstellungen",
|
||||
"unableToLoadWorkflow": "Arbeitsablauf kann nicht geladen werden",
|
||||
"viewMode": "In linearen Ansicht verwenden",
|
||||
"unableToValidateWorkflow": "Arbeitsablauf kann nicht validiert werden",
|
||||
"outputFieldTypeParseError": "Typ des Ausgabefelds {{node}}.{{field}} kann nicht analysiert werden ({{message}})",
|
||||
"unableToGetWorkflowVersion": "Version des Arbeitsablaufschemas kann nicht bestimmt werden",
|
||||
"unknownFieldType": "$t(nodes.unknownField) Typ: {{type}}",
|
||||
"unknownField": "Unbekanntes Feld",
|
||||
"unableToUpdateNodes_one": "{{count}} Knoten kann nicht aktualisiert werden",
|
||||
"unableToUpdateNodes_other": "{{count}} Knoten können nicht aktualisiert werden"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Korrektur für hohe Auflösungen",
|
||||
@@ -1267,15 +1344,7 @@
|
||||
"enableLogging": "Protokollierung aktivieren"
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Was gibt's Neues",
|
||||
"canvasV2Announcement": {
|
||||
"fluxSupport": "Unterstützung für Flux-Modelle",
|
||||
"newCanvas": "Eine leistungsstarke neue Kontrollfläche",
|
||||
"newLayerTypes": "Neue Ebenentypen für noch mehr Kontrolle",
|
||||
"readReleaseNotes": "Anmerkungen zu dieser Version lesen",
|
||||
"watchReleaseVideo": "Video über diese Version anzeigen",
|
||||
"watchUiUpdatesOverview": "Interface-Updates Übersicht"
|
||||
}
|
||||
"whatsNewInInvoke": "Was gibt's Neues"
|
||||
},
|
||||
"stylePresets": {
|
||||
"name": "Name",
|
||||
@@ -1326,7 +1395,13 @@
|
||||
"pullBboxIntoLayerOk": "Bbox in die Ebene gezogen",
|
||||
"saveBboxToGallery": "Bbox in Galerie speichern",
|
||||
"tool": {
|
||||
"bbox": "Bbox"
|
||||
"bbox": "Bbox",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radiergummi",
|
||||
"colorPicker": "Farbwähler",
|
||||
"view": "Ansicht",
|
||||
"rectangle": "Rechteck",
|
||||
"move": "Verschieben"
|
||||
},
|
||||
"transform": {
|
||||
"fitToBbox": "An Bbox anpassen",
|
||||
@@ -1445,7 +1520,30 @@
|
||||
"layer_one": "Ebene",
|
||||
"layer_other": "Ebenen",
|
||||
"layer_withCount_one": "Ebene ({{count}})",
|
||||
"layer_withCount_other": "Ebenen ({{count}})"
|
||||
"layer_withCount_other": "Ebenen ({{count}})",
|
||||
"fill": {
|
||||
"fillStyle": "Füllstil",
|
||||
"diagonal": "Diagonal",
|
||||
"vertical": "Vertikal",
|
||||
"fillColor": "Füllfarbe",
|
||||
"grid": "Raster",
|
||||
"solid": "Solide",
|
||||
"crosshatch": "Kreuzschraffur",
|
||||
"horizontal": "Horizontal"
|
||||
},
|
||||
"filter": {
|
||||
"apply": "Anwenden",
|
||||
"reset": "Zurücksetzen",
|
||||
"cancel": "Abbrechen",
|
||||
"spandrel_filter": {
|
||||
"label": "Bild-zu-Bild Modell",
|
||||
"description": "Ein Bild-zu-Bild Modell auf der ausgewählten Ebene ausführen.",
|
||||
"model": "Modell"
|
||||
},
|
||||
"filters": "Filter",
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
}
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -94,6 +94,7 @@
|
||||
"close": "Close",
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"clipboard": "Clipboard",
|
||||
"on": "On",
|
||||
"off": "Off",
|
||||
"or": "or",
|
||||
@@ -121,6 +122,7 @@
|
||||
"goTo": "Go to",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"loadingImage": "Loading Image",
|
||||
"loadingModel": "Loading Model",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
"inpaint": "inpaint",
|
||||
@@ -173,7 +175,8 @@
|
||||
"placeholderSelectAModel": "Select a model",
|
||||
"reset": "Reset",
|
||||
"none": "None",
|
||||
"new": "New"
|
||||
"new": "New",
|
||||
"generating": "Generating"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -681,7 +684,8 @@
|
||||
"recallParameters": "Recall Parameters",
|
||||
"recallParameter": "Recall {{label}}",
|
||||
"scheduler": "Scheduler",
|
||||
"seamless": "Seamless",
|
||||
"seamlessXAxis": "Seamless X Axis",
|
||||
"seamlessYAxis": "Seamless Y Axis",
|
||||
"seed": "Seed",
|
||||
"steps": "Steps",
|
||||
"strength": "Image to image strength",
|
||||
@@ -702,6 +706,8 @@
|
||||
"baseModel": "Base Model",
|
||||
"cancel": "Cancel",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"clipLEmbed": "CLIP-L Embed",
|
||||
"clipGEmbed": "CLIP-G Embed",
|
||||
"config": "Config",
|
||||
"convert": "Convert",
|
||||
"convertingModelBegin": "Converting Model. Please wait.",
|
||||
@@ -712,8 +718,12 @@
|
||||
"convertToDiffusersHelpText4": "This is a one time process only. It might take around 30s-60s depending on the specifications of your computer.",
|
||||
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 2GB-7GB in size.",
|
||||
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
||||
"noDefaultSettings": "No default settings configured for this model. Visit the Model Manager to add default settings.",
|
||||
"defaultSettings": "Default Settings",
|
||||
"defaultSettingsSaved": "Default Settings Saved",
|
||||
"defaultSettingsOutOfSync": "Some settings do not match the model's defaults:",
|
||||
"restoreDefaultSettings": "Click to use the model's default settings.",
|
||||
"usingDefaultSettings": "Using model's default settings",
|
||||
"delete": "Delete",
|
||||
"deleteConfig": "Delete Config",
|
||||
"deleteModel": "Delete Model",
|
||||
@@ -727,7 +737,17 @@
|
||||
"huggingFacePlaceholder": "owner/model-name",
|
||||
"huggingFaceRepoID": "HuggingFace Repo ID",
|
||||
"huggingFaceHelper": "If multiple models are found in this repo, you will be prompted to select one to install.",
|
||||
"hfToken": "HuggingFace Token",
|
||||
"hfTokenLabel": "HuggingFace Token (Required for some models)",
|
||||
"hfTokenHelperText": "A HF token is required to use some models. Click here to create or get your token.",
|
||||
"hfTokenInvalid": "Invalid or Missing HF Token",
|
||||
"hfForbidden": "You do not have access to this HF model",
|
||||
"hfForbiddenErrorMessage": "We recommend visiting the repo page on HuggingFace.com. The owner may require acceptance of terms in order to download.",
|
||||
"hfTokenInvalidErrorMessage": "Invalid or missing HuggingFace token.",
|
||||
"hfTokenRequired": "You are trying to download a model that requires a valid HuggingFace Token.",
|
||||
"hfTokenInvalidErrorMessage2": "Update it in the ",
|
||||
"hfTokenUnableToVerify": "Unable to Verify HF Token",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Unable to verify HuggingFace token. This is likely due to a network error. Please try again later.",
|
||||
"hfTokenSaved": "HF Token Saved",
|
||||
"imageEncoderModelId": "Image Encoder Model ID",
|
||||
"includesNModels": "Includes {{n}} models and their dependencies",
|
||||
"installQueue": "Install Queue",
|
||||
@@ -798,7 +818,6 @@
|
||||
"uploadImage": "Upload Image",
|
||||
"urlOrLocalPath": "URL or Local Path",
|
||||
"urlOrLocalPathHelper": "URLs should point to a single file. Local paths can point to a single file or folder for a single diffusers model.",
|
||||
"useDefaultSettings": "Use Default Settings",
|
||||
"vae": "VAE",
|
||||
"vaePrecision": "VAE Precision",
|
||||
"variant": "Variant",
|
||||
@@ -982,6 +1001,7 @@
|
||||
"controlNetControlMode": "Control Mode",
|
||||
"copyImage": "Copy Image",
|
||||
"denoisingStrength": "Denoising Strength",
|
||||
"disabledNoRasterContent": "Disabled (No Raster Content)",
|
||||
"downloadImage": "Download Image",
|
||||
"general": "General",
|
||||
"guidance": "Guidance",
|
||||
@@ -995,8 +1015,11 @@
|
||||
"addingImagesTo": "Adding images to",
|
||||
"invoke": "Invoke",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} empty collection",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: too few items, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: too many items, maximum {{maxItems}}",
|
||||
"noModelSelected": "No model selected",
|
||||
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
|
||||
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
|
||||
@@ -1005,10 +1028,11 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox height is {{height}}",
|
||||
"canvasIsFiltering": "Canvas is filtering",
|
||||
"canvasIsTransforming": "Canvas is transforming",
|
||||
"canvasIsRasterizing": "Canvas is rasterizing",
|
||||
"canvasIsCompositing": "Canvas is compositing",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
"canvasIsCompositing": "Canvas is busy (compositing)",
|
||||
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
@@ -1032,6 +1056,7 @@
|
||||
"patchmatchDownScaleSize": "Downscale",
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"positivePromptPlaceholder": "Positive Prompt",
|
||||
"recallMetadata": "Recall Metadata",
|
||||
"iterations": "Iterations",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
@@ -1108,6 +1133,9 @@
|
||||
"enableInformationalPopovers": "Enable Informational Popovers",
|
||||
"informationalPopoversDisabled": "Informational Popovers Disabled",
|
||||
"informationalPopoversDisabledDesc": "Informational popovers have been disabled. Enable them in Settings.",
|
||||
"enableModelDescriptions": "Enable Model Descriptions in Dropdowns",
|
||||
"modelDescriptionsDisabled": "Model Descriptions in Dropdowns Disabled",
|
||||
"modelDescriptionsDisabledDesc": "Model descriptions in dropdowns have been disabled. Enable them in Settings.",
|
||||
"enableInvisibleWatermark": "Enable Invisible Watermark",
|
||||
"enableNSFWChecker": "Enable NSFW Checker",
|
||||
"general": "General",
|
||||
@@ -1117,6 +1145,7 @@
|
||||
"resetWebUI": "Reset Web UI",
|
||||
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
|
||||
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
|
||||
"showDetailedInvocationProgress": "Show Progress Details",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
"ui": "User Interface",
|
||||
"clearIntermediatesDisabled": "Queue must be empty to clear intermediates",
|
||||
@@ -1251,6 +1280,33 @@
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraphs": ["Adjust the mask."]
|
||||
},
|
||||
"inpainting": {
|
||||
"heading": "Inpainting",
|
||||
"paragraphs": ["Controls which area is modified, guided by Denoising Strength."]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "Raster Layer",
|
||||
"paragraphs": ["Pixel-based content of your canvas, used during image generation."]
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "Regional Guidance",
|
||||
"paragraphs": ["Brush to guide where elements from global prompts should appear."]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "Regional Guidance and Regional Reference Image",
|
||||
"paragraphs": [
|
||||
"For Regional Guidance, brush to guide where elements from global prompts should appear.",
|
||||
"For Regional Reference Image, brush to apply a reference image to specific areas."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "Global Reference Image",
|
||||
"paragraphs": ["Applies a reference image to influence the entire generation."]
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "Regional Reference Image",
|
||||
"paragraphs": ["Brush to apply a reference image to specific areas."]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet",
|
||||
"paragraphs": [
|
||||
@@ -1271,7 +1327,7 @@
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processor",
|
||||
"paragraphs": [
|
||||
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
|
||||
"Method of processing the input image to guide the generation process. Different processors will provide different effects or styles in your generated images."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1366,8 +1422,9 @@
|
||||
"paramDenoisingStrength": {
|
||||
"heading": "Denoising Strength",
|
||||
"paragraphs": [
|
||||
"How much noise is added to the input image.",
|
||||
"0 will result in an identical image, while 1 will result in a completely new image."
|
||||
"Controls how much the generated image varies from the raster layer(s).",
|
||||
"Lower strength stays closer to the combined visible raster layers. Higher strength relies more on the global prompt.",
|
||||
"When there are no raster layers with visible content, this setting is ignored."
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
@@ -1606,21 +1663,24 @@
|
||||
"newControlLayerError": "Problem Creating Control Layer",
|
||||
"newRasterLayerOk": "Created Raster Layer",
|
||||
"newRasterLayerError": "Problem Creating Raster Layer",
|
||||
"newFromImage": "New from Image",
|
||||
"pullBboxIntoLayerOk": "Bbox Pulled Into Layer",
|
||||
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
|
||||
"pullBboxIntoReferenceImageError": "Problem Pulling BBox Into ReferenceImage",
|
||||
"regionIsEmpty": "Selected region is empty",
|
||||
"mergeVisible": "Merge Visible",
|
||||
"mergeVisibleOk": "Merged visible layers",
|
||||
"mergeVisibleError": "Error merging visible layers",
|
||||
"mergeDown": "Merge Down",
|
||||
"mergeVisibleOk": "Merged layers",
|
||||
"mergeVisibleError": "Error merging layers",
|
||||
"mergingLayers": "Merging layers",
|
||||
"clearHistory": "Clear History",
|
||||
"bboxOverlay": "Show Bbox Overlay",
|
||||
"resetCanvas": "Reset Canvas",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
"outputOnlyMaskedRegions": "Output Only Masked Regions",
|
||||
"outputOnlyMaskedRegions": "Output Only Generated Regions",
|
||||
"addLayer": "Add Layer",
|
||||
"duplicate": "Duplicate",
|
||||
"moveToFront": "Move to Front",
|
||||
@@ -1648,6 +1708,8 @@
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) as $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) as $t(controlLayers.controlLayer)",
|
||||
"referenceImage": "Reference Image",
|
||||
"regionalReferenceImage": "Regional Reference Image",
|
||||
"globalReferenceImage": "Global Reference Image",
|
||||
@@ -1688,8 +1750,18 @@
|
||||
"layer_other": "Layers",
|
||||
"layer_withCount_one": "Layer ({{count}})",
|
||||
"layer_withCount_other": "Layers ({{count}})",
|
||||
"convertToControlLayer": "Convert to Control Layer",
|
||||
"convertToRasterLayer": "Convert to Raster Layer",
|
||||
"convertRasterLayerTo": "Convert $t(controlLayers.rasterLayer) To",
|
||||
"convertControlLayerTo": "Convert $t(controlLayers.controlLayer) To",
|
||||
"convertInpaintMaskTo": "Convert $t(controlLayers.inpaintMask) To",
|
||||
"convertRegionalGuidanceTo": "Convert $t(controlLayers.regionalGuidance) To",
|
||||
"copyRasterLayerTo": "Copy $t(controlLayers.rasterLayer) To",
|
||||
"copyControlLayerTo": "Copy $t(controlLayers.controlLayer) To",
|
||||
"copyInpaintMaskTo": "Copy $t(controlLayers.inpaintMask) To",
|
||||
"copyRegionalGuidanceTo": "Copy $t(controlLayers.regionalGuidance) To",
|
||||
"newRasterLayer": "New $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "New $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "New $t(controlLayers.inpaintMask)",
|
||||
"newRegionalGuidance": "New $t(controlLayers.regionalGuidance)",
|
||||
"transparency": "Transparency",
|
||||
"enableTransparencyEffect": "Enable Transparency Effect",
|
||||
"disableTransparencyEffect": "Disable Transparency Effect",
|
||||
@@ -1713,16 +1785,18 @@
|
||||
"newGallerySessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be sent to the gallery.",
|
||||
"newCanvasSession": "New Canvas Session",
|
||||
"newCanvasSessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be staged on the canvas.",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"controlMode": {
|
||||
"controlMode": "Control Mode",
|
||||
"balanced": "Balanced",
|
||||
"balanced": "Balanced (recommended)",
|
||||
"prompt": "Prompt",
|
||||
"control": "Control",
|
||||
"megaControl": "Mega Control"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "IP Adapter Method",
|
||||
"full": "Full",
|
||||
"full": "Style and Composition",
|
||||
"style": "Style Only",
|
||||
"composition": "Composition Only"
|
||||
},
|
||||
@@ -1754,6 +1828,9 @@
|
||||
"process": "Process",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel",
|
||||
"advanced": "Advanced",
|
||||
"processingLayerWith": "Processing layer with the {{type}} filter.",
|
||||
"forMoreControl": "For more control, click Advanced below.",
|
||||
"spandrel_filter": {
|
||||
"label": "Image-to-Image Model",
|
||||
"description": "Run an image-to-image model on the selected layer.",
|
||||
@@ -1842,6 +1919,25 @@
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel"
|
||||
},
|
||||
"selectObject": {
|
||||
"selectObject": "Select Object",
|
||||
"pointType": "Point Type",
|
||||
"invertSelection": "Invert Selection",
|
||||
"include": "Include",
|
||||
"exclude": "Exclude",
|
||||
"neutral": "Neutral",
|
||||
"apply": "Apply",
|
||||
"reset": "Reset",
|
||||
"saveAs": "Save As",
|
||||
"cancel": "Cancel",
|
||||
"process": "Process",
|
||||
"help1": "Select a single target object. Add <Bold>Include</Bold> and <Bold>Exclude</Bold> points to indicate which parts of the layer are part of the target object.",
|
||||
"help2": "Start with one <Bold>Include</Bold> point within the target object. Add more points to refine the selection. Fewer points typically produce better results.",
|
||||
"help3": "Invert the selection to select everything except the target object.",
|
||||
"clickToAdd": "Click on the layer to add a point",
|
||||
"dragToMove": "Drag a point to move it",
|
||||
"clickToRemove": "Click on a point to remove it"
|
||||
},
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"label": "Snap to Grid",
|
||||
@@ -1852,10 +1948,10 @@
|
||||
"label": "Preserve Masked Region",
|
||||
"alert": "Preserving Masked Region"
|
||||
},
|
||||
"isolatedPreview": "Isolated Preview",
|
||||
"isolatedStagingPreview": "Isolated Staging Preview",
|
||||
"isolatedFilteringPreview": "Isolated Filtering Preview",
|
||||
"isolatedTransformingPreview": "Isolated Transforming Preview",
|
||||
"isolatedPreview": "Isolated Preview",
|
||||
"isolatedLayerPreview": "Isolated Layer Preview",
|
||||
"isolatedLayerPreviewDesc": "Whether to show only this layer when performing operations like filtering or transforming.",
|
||||
"invertBrushSizeScrollDirection": "Invert Scroll for Brush Size",
|
||||
"pressureSensitivity": "Pressure Sensitivity"
|
||||
},
|
||||
@@ -1881,6 +1977,8 @@
|
||||
"newRegionalReferenceImage": "New Regional Reference Image",
|
||||
"newControlLayer": "New Control Layer",
|
||||
"newRasterLayer": "New Raster Layer",
|
||||
"newInpaintMask": "New Inpaint Mask",
|
||||
"newRegionalGuidance": "New Regional Guidance",
|
||||
"cropCanvasToBbox": "Crop Canvas to Bbox"
|
||||
},
|
||||
"stagingArea": {
|
||||
@@ -1910,7 +2008,9 @@
|
||||
"upscaleModelDesc": "Upscale (image to image) model",
|
||||
"missingUpscaleInitialImage": "Missing initial image for upscaling",
|
||||
"missingUpscaleModel": "Missing upscale model",
|
||||
"missingTileControlNetModel": "No valid tile ControlNet models installed"
|
||||
"missingTileControlNetModel": "No valid tile ControlNet models installed",
|
||||
"incompatibleBaseModel": "Unsupported main model architecture for upscaling",
|
||||
"incompatibleBaseModelDesc": "Upscaling is supported for SD1.5 and SDXL architecture models only. Change the main model to enable upscaling."
|
||||
},
|
||||
"stylePresets": {
|
||||
"active": "Active",
|
||||
@@ -2013,13 +2113,12 @@
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"canvasV2Announcement": {
|
||||
"newCanvas": "A powerful new control canvas",
|
||||
"newLayerTypes": "New layer types for even more control",
|
||||
"fluxSupport": "Support for the Flux family of models",
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchReleaseVideo": "Watch Release Video",
|
||||
"watchUiUpdatesOverview": "Watch UI Updates Overview"
|
||||
}
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: Support for SD 3.5 Medium and Large.",
|
||||
"<StrongComponent>Canvas</StrongComponent>: Streamlined Control Layer processing and improved default Control settings."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
"watchUiUpdatesOverview": "Watch UI Updates Overview"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"back": "Atrás",
|
||||
"loading": "Cargando",
|
||||
"postprocessing": "Postprocesado",
|
||||
"postprocessing": "Postprocesamiento",
|
||||
"txt2img": "De texto a imagen",
|
||||
"accept": "Aceptar",
|
||||
"cancel": "Cancelar",
|
||||
@@ -64,7 +64,7 @@
|
||||
"prevPage": "Página Anterior",
|
||||
"red": "Rojo",
|
||||
"alpha": "Transparencia",
|
||||
"outputs": "Salidas",
|
||||
"outputs": "Resultados",
|
||||
"learnMore": "Aprende más",
|
||||
"enabled": "Activado",
|
||||
"disabled": "Desactivado",
|
||||
@@ -73,7 +73,32 @@
|
||||
"created": "Creado",
|
||||
"save": "Guardar",
|
||||
"unknownError": "Error Desconocido",
|
||||
"blue": "Azul"
|
||||
"blue": "Azul",
|
||||
"clipboard": "Portapapeles",
|
||||
"loadingImage": "Cargando la imagen",
|
||||
"inpaint": "inpaint",
|
||||
"ipAdapter": "Adaptador IP",
|
||||
"t2iAdapter": "Adaptador T2I",
|
||||
"apply": "Aplicar",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"off": "Apagar",
|
||||
"generating": "Generando",
|
||||
"ok": "De acuerdo",
|
||||
"placeholderSelectAModel": "Seleccionar un modelo",
|
||||
"reset": "Restablecer",
|
||||
"none": "Ninguno",
|
||||
"new": "Nuevo",
|
||||
"dontShowMeThese": "No mostrar estos",
|
||||
"loadingModel": "Cargando el modelo",
|
||||
"view": "Ver",
|
||||
"edit": "Editar",
|
||||
"safetensors": "Safetensors",
|
||||
"toResolve": "Para resolver",
|
||||
"localSystem": "Sistema local",
|
||||
"notInstalled": "No $t(common.installed)",
|
||||
"outpaint": "outpaint",
|
||||
"simple": "Sencillo",
|
||||
"close": "Cerrar"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -85,7 +110,63 @@
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
"autoAssignBoardOnClick": "Asignar automática tableros al hacer clic",
|
||||
"gallery": "Galería",
|
||||
"noImageSelected": "Sin imágenes seleccionadas",
|
||||
"bulkDownloadRequestFailed": "Error al preparar la descarga",
|
||||
"oldestFirst": "La más antigua primero",
|
||||
"sideBySide": "conjuntamente",
|
||||
"selectForCompare": "Seleccionar para comparar",
|
||||
"alwaysShowImageSizeBadge": "Mostrar siempre las dimensiones de la imagen",
|
||||
"currentlyInUse": "Esta imagen se utiliza actualmente con las siguientes funciones:",
|
||||
"unableToLoad": "No se puede cargar la galería",
|
||||
"selectAllOnPage": "Seleccionar todo en la página",
|
||||
"selectAnImageToCompare": "Seleccione una imagen para comparar",
|
||||
"bulkDownloadFailed": "Error en la descarga",
|
||||
"compareHelp2": "Presione <Kbd> M </Kbd> para recorrer los modos de comparación.",
|
||||
"move": "Mover",
|
||||
"copy": "Copiar",
|
||||
"drop": "Gota",
|
||||
"displayBoardSearch": "Tablero de búsqueda",
|
||||
"deleteSelection": "Borrar selección",
|
||||
"downloadSelection": "Descargar selección",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"searchImages": "Búsqueda por metadatos",
|
||||
"swapImages": "Intercambiar imágenes",
|
||||
"sortDirection": "Orden de clasificación",
|
||||
"showStarredImagesFirst": "Mostrar imágenes destacadas primero",
|
||||
"go": "Ir",
|
||||
"bulkDownloadRequested": "Preparando la descarga",
|
||||
"image": "imagen",
|
||||
"compareHelp4": "Presione <Kbd> Z </Kbd> o <Kbd> Esc </Kbd> para salir.",
|
||||
"viewerImage": "Ver imagen",
|
||||
"dropOrUpload": "$t(gallery.drop) o cargar",
|
||||
"displaySearch": "Buscar imagen",
|
||||
"download": "Descargar",
|
||||
"exitBoardSearch": "Finalizar búsqueda",
|
||||
"exitSearch": "Salir de la búsqueda de imágenes",
|
||||
"featuresWillReset": "Si elimina esta imagen, dichas funciones se restablecerán inmediatamente.",
|
||||
"jump": "Omitir",
|
||||
"loading": "Cargando",
|
||||
"newestFirst": "La más nueva primero",
|
||||
"unstarImage": "Dejar de ser favorita",
|
||||
"bulkDownloadRequestedDesc": "Su solicitud de descarga se está preparando. Esto puede tardar unos minutos.",
|
||||
"hover": "Desplazar",
|
||||
"compareHelp1": "Mantenga presionada la tecla <Kbd> Alt </Kbd> mientras hace clic en una imagen de la galería o utiliza las teclas de flecha para cambiar la imagen de comparación.",
|
||||
"stretchToFit": "Estirar para encajar",
|
||||
"exitCompare": "Salir de la comparación",
|
||||
"starImage": "Imágenes favoritas",
|
||||
"dropToUpload": "$t(gallery.drop) para cargar",
|
||||
"slider": "Deslizador",
|
||||
"assetsTab": "Archivos que has cargado para utilizarlos en tus proyectos.",
|
||||
"imagesTab": "Imágenes que ha creado y guardado en Invoke.",
|
||||
"compareImage": "Comparar imagen",
|
||||
"boardsSettings": "Ajustes de los tableros",
|
||||
"imagesSettings": "Configuración de imágenes de la galería",
|
||||
"compareHelp3": "Presione <Kbd> C </Kbd> para intercambiar las imágenes comparadas.",
|
||||
"showArchivedBoards": "Mostrar paneles archivados",
|
||||
"closeViewer": "Cerrar visor",
|
||||
"openViewer": "Abrir visor"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestor de Modelos",
|
||||
@@ -131,7 +212,13 @@
|
||||
"modelDeleted": "Modelo eliminado",
|
||||
"modelDeleteFailed": "Error al borrar el modelo",
|
||||
"settings": "Ajustes",
|
||||
"syncModels": "Sincronizar las plantillas"
|
||||
"syncModels": "Sincronizar las plantillas",
|
||||
"clipEmbed": "Incrustar CLIP",
|
||||
"addModels": "Añadir modelos",
|
||||
"advanced": "Avanzado",
|
||||
"clipGEmbed": "Incrustar CLIP-G",
|
||||
"cancel": "Cancelar",
|
||||
"clipLEmbed": "Incrustar CLIP-L"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Imágenes",
|
||||
@@ -158,19 +245,19 @@
|
||||
"useSeed": "Usar Semilla",
|
||||
"useAll": "Usar Todo",
|
||||
"info": "Información",
|
||||
"showOptionsPanel": "Mostrar panel de opciones",
|
||||
"showOptionsPanel": "Mostrar panel lateral (O o T)",
|
||||
"symmetry": "Simetría",
|
||||
"copyImage": "Copiar la imagen",
|
||||
"general": "General",
|
||||
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||
"seamlessXAxis": "Eje x",
|
||||
"seamlessYAxis": "Eje y",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"scheduler": "Programador",
|
||||
"positivePromptPlaceholder": "Prompt Positivo",
|
||||
"negativePromptPlaceholder": "Prompt Negativo",
|
||||
"controlNetControlMode": "Modo de control",
|
||||
"clipSkip": "Omitir el CLIP",
|
||||
"maskBlur": "Difuminar",
|
||||
"maskBlur": "Desenfoque de máscara",
|
||||
"patchmatchDownScaleSize": "Reducir a escala",
|
||||
"coherenceMode": "Modo"
|
||||
},
|
||||
@@ -202,16 +289,19 @@
|
||||
"serverError": "Error en el servidor",
|
||||
"canceled": "Procesando la cancelación",
|
||||
"connected": "Conectado al servidor",
|
||||
"uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG",
|
||||
"parameterSet": "Conjunto de parámetros",
|
||||
"parameterNotSet": "Parámetro no configurado",
|
||||
"uploadFailedInvalidUploadDesc": "Deben ser imágenes PNG o JPEG.",
|
||||
"parameterSet": "Parámetro recuperado",
|
||||
"parameterNotSet": "Parámetro no recuperado",
|
||||
"problemCopyingImage": "No se puede copiar la imagen",
|
||||
"errorCopied": "Error al copiar",
|
||||
"baseModelChanged": "Modelo base cambiado",
|
||||
"addedToBoard": "Añadido al tablero",
|
||||
"addedToBoard": "Se agregó a los activos del panel {{name}}",
|
||||
"baseModelChangedCleared_one": "Borrado o desactivado {{count}} submodelo incompatible",
|
||||
"baseModelChangedCleared_many": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles"
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"addedToUncategorized": "Añadido a los activos del tablero $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Las imágenes subidas se añadirán a los activos del panel {{boardName}}.",
|
||||
"layerCopiedToClipboard": "Capa copiada en el portapapeles"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Activar la barra de progreso",
|
||||
@@ -226,7 +316,8 @@
|
||||
"mode": "Modo",
|
||||
"submitSupportTicket": "Enviar Ticket de Soporte",
|
||||
"toggleRightPanel": "Activar o desactivar el panel derecho (G)",
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)"
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)",
|
||||
"uploadImages": "Cargar imagen(es)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -238,7 +329,8 @@
|
||||
"showMinimapnodes": "Mostrar el minimapa",
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON",
|
||||
"boardAccessError": "No se puede encontrar el panel {{board_id}}, se está restableciendo al valor predeterminado"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
@@ -255,7 +347,7 @@
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar solo el panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
@@ -263,29 +355,33 @@
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan",
|
||||
"noMatching": "Sin paneles coincidentes",
|
||||
"imagesWithCount_one": "{{count}} imagen",
|
||||
"imagesWithCount_many": "{{count}} imágenes",
|
||||
"imagesWithCount_other": "{{count}} imágenes",
|
||||
"assetsWithCount_one": "{{count}} activo",
|
||||
"assetsWithCount_many": "{{count}} activos",
|
||||
"assetsWithCount_other": "{{count}} activos",
|
||||
"hideBoards": "Ocultar Paneles",
|
||||
"addPrivateBoard": "Agregar un tablero privado",
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"hideBoards": "Ocultar paneles",
|
||||
"addPrivateBoard": "Agregar un panel privado",
|
||||
"addSharedBoard": "Añadir panel compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archiveBoard": "Archivar panel",
|
||||
"archived": "Archivado",
|
||||
"selectedForAutoAdd": "Seleccionado para agregar automáticamente",
|
||||
"unarchiveBoard": "Desarchivar el tablero",
|
||||
"noBoards": "No hay tableros {{boardType}}",
|
||||
"shared": "Carpetas compartidas",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los tableros eliminados no se pueden restaurar. Al elegir \"Eliminar solo tablero\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen."
|
||||
"unarchiveBoard": "Desarchivar el panel",
|
||||
"noBoards": "No hay paneles {{boardType}}",
|
||||
"shared": "Paneles compartidos",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al elegir \"Eliminar solo el panel\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen.",
|
||||
"viewBoards": "Ver paneles",
|
||||
"private": "Paneles privados",
|
||||
"updateBoardError": "No se pudo actualizar el panel"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
"infillTab": "Relleno",
|
||||
"coherenceTab": "Parámetros de la coherencia"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
@@ -309,7 +405,10 @@
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"upscaling": "Upscaling",
|
||||
"gallery": "Galería",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"queue": {
|
||||
@@ -317,12 +416,81 @@
|
||||
"front": "Delante",
|
||||
"batchQueuedDesc_one": "Se agregó {{count}} sesión a {{direction}} la cola",
|
||||
"batchQueuedDesc_many": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola"
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"clearQueueAlertDialog": "Al vaciar la cola se cancela inmediatamente cualquier elemento de procesamiento y se vaciará la cola por completo. Los filtros pendientes se cancelarán.",
|
||||
"time": "Tiempo",
|
||||
"clearFailed": "Error al vaciar la cola",
|
||||
"cancelFailed": "Error al cancelar el elemento",
|
||||
"resumeFailed": "Error al reanudar el proceso",
|
||||
"pause": "Pausar",
|
||||
"pauseTooltip": "Pausar el proceso",
|
||||
"cancelBatchSucceeded": "Lote cancelado",
|
||||
"pruneSucceeded": "Se purgaron {{item_count}} elementos completados de la cola",
|
||||
"pruneFailed": "Error al purgar la cola",
|
||||
"cancelBatchFailed": "Error al cancelar los lotes",
|
||||
"pauseFailed": "Error al pausar el proceso",
|
||||
"status": "Estado",
|
||||
"origin": "Origen",
|
||||
"destination": "Destino",
|
||||
"generations_one": "Generación",
|
||||
"generations_many": "Generaciones",
|
||||
"generations_other": "Generaciones",
|
||||
"resume": "Reanudar",
|
||||
"queueEmpty": "Cola vacía",
|
||||
"cancelItem": "Cancelar elemento",
|
||||
"cancelBatch": "Cancelar lote",
|
||||
"openQueue": "Abrir la cola",
|
||||
"completed": "Completado",
|
||||
"enqueueing": "Añadir lotes a la cola",
|
||||
"clear": "Limpiar",
|
||||
"pauseSucceeded": "Proceso pausado",
|
||||
"resumeSucceeded": "Proceso reanudado",
|
||||
"resumeTooltip": "Reanudar proceso",
|
||||
"cancel": "Cancelar",
|
||||
"cancelTooltip": "Cancelar artículo actual",
|
||||
"pruneTooltip": "Purgar {{item_count}} elementos completados",
|
||||
"batchQueued": "Lote en cola",
|
||||
"pending": "Pendiente",
|
||||
"item": "Elemento",
|
||||
"total": "Total",
|
||||
"in_progress": "En proceso",
|
||||
"failed": "Fallido",
|
||||
"completedIn": "Completado en",
|
||||
"upscaling": "Upscaling",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"workflows": "Flujo de trabajo",
|
||||
"other": "Otro",
|
||||
"queueFront": "Añadir al principio de la cola",
|
||||
"gallery": "Galería",
|
||||
"batchFieldValues": "Valores de procesamiento por lotes",
|
||||
"session": "Sesión",
|
||||
"notReady": "La cola aún no está lista",
|
||||
"graphQueued": "Gráfico en cola",
|
||||
"clearQueueAlertDialog2": "¿Estás seguro que deseas vaciar la cola?",
|
||||
"next": "Siguiente",
|
||||
"iterations_one": "Interacción",
|
||||
"iterations_many": "Interacciones",
|
||||
"iterations_other": "Interacciones",
|
||||
"current": "Actual",
|
||||
"queue": "Cola",
|
||||
"queueBack": "Añadir a la cola",
|
||||
"cancelSucceeded": "Elemento cancelado",
|
||||
"clearTooltip": "Cancelar y limpiar todos los elementos",
|
||||
"clearSucceeded": "Cola vaciada",
|
||||
"canceled": "Cancelado",
|
||||
"batch": "Lote",
|
||||
"graphFailedToQueue": "Error al poner el gráfico en cola",
|
||||
"batchFailedToQueue": "Error al poner en cola el lote",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_many": "Prompts",
|
||||
"prompts_other": "Prompts",
|
||||
"prune": "Eliminar"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invitar compañeros de equipo",
|
||||
"shareAccess": "Compartir acceso",
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haz clic aquí o visita invoke.com/pricing para obtener más detalles."
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haga clic aquí o visite invoke.com/pricing para obtener más detalles."
|
||||
},
|
||||
"controlLayers": {
|
||||
"layer_one": "Capa",
|
||||
@@ -330,6 +498,415 @@
|
||||
"layer_other": "Capas",
|
||||
"layer_withCount_one": "({{count}}) capa",
|
||||
"layer_withCount_many": "({{count}}) capas",
|
||||
"layer_withCount_other": "({{count}}) capas"
|
||||
"layer_withCount_other": "({{count}}) capas",
|
||||
"copyToClipboard": "Copiar al portapapeles"
|
||||
},
|
||||
"whatsNew": {
|
||||
"readReleaseNotes": "Leer las notas de la versión",
|
||||
"watchRecentReleaseVideos": "Ver videos de versiones recientes",
|
||||
"watchUiUpdatesOverview": "Descripción general de las actualizaciones de la interfaz de usuario de Watch",
|
||||
"whatsNewInInvoke": "Novedades en Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large.",
|
||||
"<StrongComponent>Lienzo</StrongComponent>: Se ha simplificado el procesamiento de la capa de control y se ha mejorado la configuración predeterminada del control."
|
||||
]
|
||||
},
|
||||
"invocationCache": {
|
||||
"enableFailed": "Error al activar la cache",
|
||||
"cacheSize": "Tamaño de la caché",
|
||||
"hits": "Accesos a la caché",
|
||||
"invocationCache": "Caché",
|
||||
"misses": "Errores de la caché",
|
||||
"clear": "Limpiar",
|
||||
"maxCacheSize": "Tamaño máximo de la caché",
|
||||
"enableSucceeded": "Cache activada",
|
||||
"clearFailed": "Error al borrar la cache",
|
||||
"enable": "Activar",
|
||||
"useCache": "Uso de la caché",
|
||||
"disableSucceeded": "Caché desactivada",
|
||||
"clearSucceeded": "Caché borrada",
|
||||
"disable": "Desactivar",
|
||||
"disableFailed": "Error al desactivar la caché"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "Solución de alta resolución",
|
||||
"enableHrf": "Activar corrección de alta resolución",
|
||||
"metadata": {
|
||||
"enabled": "Corrección de alta resolución activada",
|
||||
"strength": "Forzar la corrección de alta resolución",
|
||||
"method": "Método de corrección de alta resolución"
|
||||
},
|
||||
"upscaleMethod": "Método de expansión"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Añadir activador de los avisos",
|
||||
"compatibleEmbeddings": "Incrustaciones compatibles",
|
||||
"noMatchingTriggers": "No hay activadores coincidentes"
|
||||
},
|
||||
"hotkeys": {
|
||||
"hotkeys": "Atajo del teclado",
|
||||
"canvas": {
|
||||
"selectViewTool": {
|
||||
"desc": "Selecciona la herramienta de Visualización.",
|
||||
"title": "Visualización"
|
||||
},
|
||||
"cancelFilter": {
|
||||
"title": "Cancelar el filtro",
|
||||
"desc": "Cancelar el filtro pendiente."
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "Aplicar la transformación",
|
||||
"desc": "Aplicar la transformación pendiente a la capa seleccionada."
|
||||
},
|
||||
"applyFilter": {
|
||||
"desc": "Aplicar el filtro pendiente a la capa seleccionada.",
|
||||
"title": "Aplicar filtro"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "Pincel",
|
||||
"desc": "Selecciona la herramienta pincel."
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"desc": "Seleccionar la herramienta de selección del marco.",
|
||||
"title": "Selección del marco"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"desc": "Selecciona la herramienta Mover.",
|
||||
"title": "Mover"
|
||||
},
|
||||
"selectRectTool": {
|
||||
"title": "Rectángulo",
|
||||
"desc": "Selecciona la herramienta Rectángulo."
|
||||
},
|
||||
"decrementToolWidth": {
|
||||
"title": "Reducir el ancho de la herramienta",
|
||||
"desc": "Disminuye la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"incrementToolWidth": {
|
||||
"title": "Incrementar la anchura de la herramienta",
|
||||
"desc": "Aumenta la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "Ajustar bordes al lienzo",
|
||||
"desc": "Escala y posiciona la vista para ajustarla a los bodes."
|
||||
},
|
||||
"fitLayersToCanvas": {
|
||||
"title": "Ajustar capas al lienzo",
|
||||
"desc": "Escala y posiciona la vista para que se ajuste a todas las capas visibles."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Establecer color en blanco",
|
||||
"desc": "Establece el color actual de la herramienta en blanco."
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "Restablecer capa",
|
||||
"desc": "Restablecer la capa seleccionada. Solo se aplica a Máscara de retoque y Guía regional."
|
||||
},
|
||||
"setZoomTo400Percent": {
|
||||
"desc": "Ajuste la aplicación del lienzo al 400%.",
|
||||
"title": "Ampliar al 400%"
|
||||
},
|
||||
"transformSelected": {
|
||||
"desc": "Transformar la capa seleccionada.",
|
||||
"title": "Transformar"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "Selector de color",
|
||||
"desc": "Seleccione la herramienta de selección de color."
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "Borrador",
|
||||
"desc": "Selecciona la herramienta Borrador."
|
||||
},
|
||||
"setZoomTo100Percent": {
|
||||
"title": "Ampliar al 100%",
|
||||
"desc": "Ajuste ampliar el lienzo al 100%."
|
||||
},
|
||||
"undo": {
|
||||
"title": "Deshacer",
|
||||
"desc": "Deshacer la última acción en el lienzo."
|
||||
},
|
||||
"nextEntity": {
|
||||
"desc": "Seleccione la siguiente capa de la lista.",
|
||||
"title": "Capa siguiente"
|
||||
},
|
||||
"redo": {
|
||||
"title": "Rehacer",
|
||||
"desc": "Rehacer la última acción en el lienzo."
|
||||
},
|
||||
"prevEntity": {
|
||||
"title": "Capa anterior",
|
||||
"desc": "Seleccione la capa anterior de la lista."
|
||||
},
|
||||
"title": "Lienzo",
|
||||
"setZoomTo200Percent": {
|
||||
"title": "Ampliar al 200%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 200%."
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"title": "Ampliar al 800%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 800%."
|
||||
},
|
||||
"filterSelected": {
|
||||
"desc": "Filtra la capa seleccionada. Solo se aplica a las capas Ráster y Control.",
|
||||
"title": "Filtrar"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "Cancelar transformación",
|
||||
"desc": "Cancelar la transformación pendiente."
|
||||
},
|
||||
"deleteSelected": {
|
||||
"title": "Borrar la capa",
|
||||
"desc": "Borrar la capa seleccionada."
|
||||
},
|
||||
"quickSwitch": {
|
||||
"desc": "Cambiar entre las dos últimas capas seleccionadas. Si una capa está seleccionada, cambia siempre entre ella y la última capa no seleccionada.",
|
||||
"title": "Cambio rápido de capa"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"selectModelsTab": {
|
||||
"title": "Seleccione la pestaña Modelos",
|
||||
"desc": "Selecciona la pestaña Modelos."
|
||||
},
|
||||
"focusPrompt": {
|
||||
"desc": "Mueve el foco del cursor a la indicación positiva.",
|
||||
"title": "Enfoque"
|
||||
},
|
||||
"toggleLeftPanel": {
|
||||
"title": "Alternar panel izquierdo",
|
||||
"desc": "Mostrar u ocultar el panel izquierdo."
|
||||
},
|
||||
"selectQueueTab": {
|
||||
"title": "Seleccione la pestaña Cola",
|
||||
"desc": "Seleccione la pestaña Cola."
|
||||
},
|
||||
"selectCanvasTab": {
|
||||
"title": "Seleccione la pestaña Lienzo",
|
||||
"desc": "Selecciona la pestaña Lienzo."
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "Vaciar cola",
|
||||
"desc": "Cancelar y variar todos los elementos de la cola."
|
||||
},
|
||||
"selectUpscalingTab": {
|
||||
"title": "Selecciona la pestaña Ampliar",
|
||||
"desc": "Selecciona la pestaña Aumento de escala."
|
||||
},
|
||||
"togglePanels": {
|
||||
"desc": "Muestra u oculta los paneles izquierdo y derecho a la vez.",
|
||||
"title": "Alternar paneles"
|
||||
},
|
||||
"toggleRightPanel": {
|
||||
"title": "Alternar panel derecho",
|
||||
"desc": "Mostrar u ocultar el panel derecho."
|
||||
},
|
||||
"invokeFront": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al principio de la cola.",
|
||||
"title": "Invocar (frente)"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "Cancelar",
|
||||
"desc": "Cancelar el elemento de la cola que se está procesando."
|
||||
},
|
||||
"invoke": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al final de la cola.",
|
||||
"title": "Invocar"
|
||||
},
|
||||
"title": "Aplicación",
|
||||
"selectWorkflowsTab": {
|
||||
"title": "Seleccione la pestaña Flujos de trabajo",
|
||||
"desc": "Selecciona la pestaña Flujos de trabajo."
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "Reiniciar la posición del panel",
|
||||
"desc": "Restablece los paneles izquierdo y derecho a su tamaño y disposición por defecto."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"addNode": {
|
||||
"title": "Añadir nodo",
|
||||
"desc": "Abrir añadir nodo."
|
||||
},
|
||||
"selectAll": {
|
||||
"title": "Seleccionar todo",
|
||||
"desc": "Seleccione todos los nodos y enlaces."
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todos los nodos y enlaces seleccionados.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"undo": {
|
||||
"desc": "Deshaga la última acción.",
|
||||
"title": "Deshacer"
|
||||
},
|
||||
"redo": {
|
||||
"desc": "Rehacer la última acción.",
|
||||
"title": "Rehacer"
|
||||
},
|
||||
"pasteSelection": {
|
||||
"desc": "Pegar nodos y bordes copiados.",
|
||||
"title": "Pegar"
|
||||
},
|
||||
"title": "Flujos de trabajo",
|
||||
"copySelection": {
|
||||
"desc": "Copiar nodos y bordes seleccionados.",
|
||||
"title": "Copiar"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
"desc": "Pega los nodos copiados, los enlaces y todos los enlaces conectados a los nodos copiados.",
|
||||
"title": "Pegar con enlaces"
|
||||
}
|
||||
},
|
||||
"viewer": {
|
||||
"useSize": {
|
||||
"title": "Usar dimensiones",
|
||||
"desc": "Utiliza las dimensiones de la imagen actual como el tamaño del borde."
|
||||
},
|
||||
"remix": {
|
||||
"title": "Remezcla",
|
||||
"desc": "Recupera todos los metadatos excepto la semilla de la imagen actual."
|
||||
},
|
||||
"loadWorkflow": {
|
||||
"desc": "Carga el flujo de trabajo guardado de la imagen actual (si tiene uno).",
|
||||
"title": "Cargar flujo de trabajo"
|
||||
},
|
||||
"recallAll": {
|
||||
"desc": "Recupera todos los metadatos de la imagen actual.",
|
||||
"title": "Recuperar todos los metadatos"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"desc": "Recuerde las indicaciones positivas y negativas de la imagen actual.",
|
||||
"title": "Recordatorios"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "Recuperar semilla",
|
||||
"desc": "Recupera la semilla de la imagen actual."
|
||||
},
|
||||
"runPostprocessing": {
|
||||
"title": "Ejecutar posprocesamiento",
|
||||
"desc": "Ejecutar el posprocesamiento seleccionado en la imagen actual."
|
||||
},
|
||||
"toggleMetadata": {
|
||||
"title": "Mostrar/ocultar los metadatos",
|
||||
"desc": "Mostrar u ocultar la superposición de metadatos de la imagen actual."
|
||||
},
|
||||
"nextComparisonMode": {
|
||||
"desc": "Desplácese por los modos de comparación.",
|
||||
"title": "Siguiente comparación"
|
||||
},
|
||||
"title": "Visor de imágenes",
|
||||
"toggleViewer": {
|
||||
"title": "Mostrar/Ocultar el visor de imágenes",
|
||||
"desc": "Mostrar u ocultar el visor de imágenes. Solo disponible en la pestaña Lienzo."
|
||||
},
|
||||
"swapImages": {
|
||||
"title": "Intercambiar imágenes en la comparación",
|
||||
"desc": "Intercambia las imágenes que se están comparando."
|
||||
}
|
||||
},
|
||||
"gallery": {
|
||||
"clearSelection": {
|
||||
"title": "Limpiar selección",
|
||||
"desc": "Borrar la selección actual, si hay alguna."
|
||||
},
|
||||
"galleryNavUp": {
|
||||
"title": "Subir",
|
||||
"desc": "Navega hacia arriba en la cuadrícula de la galería y selecciona esa imagen. Si estás en la parte superior de la página, ve a la página anterior."
|
||||
},
|
||||
"galleryNavLeft": {
|
||||
"title": "Izquierda",
|
||||
"desc": "Navegue hacia la izquierda en la rejilla de la galería, seleccionando esa imagen. Si está en la primera imagen de la fila, vaya a la fila anterior. Si está en la primera imagen de la página, vaya a la página anterior."
|
||||
},
|
||||
"galleryNavDown": {
|
||||
"title": "Bajar",
|
||||
"desc": "Navegue hacia abajo en la parrilla de la galería, seleccionando esa imagen. Si se encuentra al final de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavRight": {
|
||||
"title": "A la derecha",
|
||||
"desc": "Navegue hacia la derecha en la rejilla de la galería, seleccionando esa imagen. Si está en la última imagen de la fila, vaya a la fila siguiente. Si está en la última imagen de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavUpAlt": {
|
||||
"desc": "Igual que arriba, pero selecciona la imagen de comparación, abriendo el modo de comparación si no está ya abierto.",
|
||||
"title": "Arriba (Comparar imagen)"
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todas las imágenes seleccionadas. Por defecto, se le pedirá que confirme la eliminación. Si las imágenes están actualmente en uso en la aplicación, se te avisará.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"title": "Galería",
|
||||
"selectAllOnPage": {
|
||||
"title": "Seleccionar todo en la página",
|
||||
"desc": "Seleccionar todas las imágenes en la página actual."
|
||||
}
|
||||
},
|
||||
"searchHotkeys": "Buscar teclas de acceso rápido",
|
||||
"noHotkeysFound": "Sin teclas de acceso rápido",
|
||||
"clearSearch": "Limpiar la búsqueda"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Orientación",
|
||||
"createdBy": "Creado por",
|
||||
"noImageDetails": "Sin detalles en la imagen",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"height": "Altura",
|
||||
"imageDimensions": "Dimensiones de la imagen",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"generationMode": "Modo de generación",
|
||||
"scheduler": "Programador",
|
||||
"width": "Ancho",
|
||||
"Threshold": "Umbral de ruido",
|
||||
"canvasV2Metadata": "Lienzo",
|
||||
"metadata": "Metadatos",
|
||||
"model": "Modelo",
|
||||
"allPrompts": "Todas las indicaciones",
|
||||
"cfgScale": "Escala CFG",
|
||||
"imageDetails": "Detalles de la imagen",
|
||||
"negativePrompt": "Indicación negativa",
|
||||
"noMetaData": "Sin metadatos",
|
||||
"parameterSet": "Parámetro {{parameter}} establecido",
|
||||
"vae": "Autocodificador",
|
||||
"workflow": "Flujo de trabajo",
|
||||
"seed": "Semilla",
|
||||
"strength": "Forzar imagen a imagen",
|
||||
"recallParameters": "Parámetros de recuperación",
|
||||
"recallParameter": "Recuperar {{label}}",
|
||||
"steps": "Pasos",
|
||||
"noRecallParameters": "Sin parámetros para recuperar",
|
||||
"parsingFailed": "Error al analizar"
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
"debug": "Depurar",
|
||||
"info": "Información",
|
||||
"warn": "Advertir",
|
||||
"fatal": "Grave",
|
||||
"error": "Error",
|
||||
"trace": "Rastro",
|
||||
"logLevel": "Nivel del registro"
|
||||
},
|
||||
"enableLogging": "Activar registro",
|
||||
"logNamespaces": {
|
||||
"workflows": "Flujos de trabajo",
|
||||
"system": "Sistema",
|
||||
"metadata": "Metadatos",
|
||||
"gallery": "Galería",
|
||||
"logNamespaces": "Espacios para los nombres de registro",
|
||||
"generation": "Generación",
|
||||
"events": "Eventos",
|
||||
"canvas": "Lienzo",
|
||||
"config": "Ajustes",
|
||||
"models": "Modelos",
|
||||
"queue": "Cola"
|
||||
}
|
||||
},
|
||||
"newUserExperience": {
|
||||
"downloadStarterModels": "Descargar modelos de inicio",
|
||||
"toGetStarted": "Para empezar, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en <StrongComponent>Lienzo</StrongComponent>.",
|
||||
"importModels": "Importar modelos",
|
||||
"noModelsInstalled": "Parece que no tienes ningún modelo instalado",
|
||||
"gettingStartedSeries": "¿Desea más orientación? Consulte nuestra <LinkComponent>Serie de introducción</LinkComponent> para obtener consejos sobre cómo aprovechar todo el potencial de Invoke Studio.",
|
||||
"toGetStartedLocal": "Para empezar, asegúrate de descargar o importar los modelos necesarios para ejecutar Invoke. A continuación, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en el <StrongComponent>Lienzo</StrongComponent>."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
"reportBugLabel": "Signaler un bug",
|
||||
"settingsLabel": "Paramètres",
|
||||
"img2img": "Image vers Image",
|
||||
"nodes": "Processus",
|
||||
"upload": "Télécharger",
|
||||
"nodes": "Workflows",
|
||||
"upload": "Importer",
|
||||
"load": "Charger",
|
||||
"back": "Retour",
|
||||
"statusDisconnected": "Hors ligne",
|
||||
@@ -51,7 +51,7 @@
|
||||
"green": "Vert",
|
||||
"delete": "Supprimer",
|
||||
"simple": "Simple",
|
||||
"template": "Modèle",
|
||||
"template": "Template",
|
||||
"advanced": "Avancé",
|
||||
"copy": "Copier",
|
||||
"saveAs": "Enregistrer sous",
|
||||
@@ -95,7 +95,8 @@
|
||||
"positivePrompt": "Prompt Positif",
|
||||
"negativePrompt": "Prompt Négatif",
|
||||
"ok": "Ok",
|
||||
"close": "Fermer"
|
||||
"close": "Fermer",
|
||||
"clipboard": "Presse-papier"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Taille de l'image",
|
||||
@@ -117,8 +118,8 @@
|
||||
"bulkDownloadRequestFailed": "Problème lors de la préparation du téléchargement",
|
||||
"copy": "Copier",
|
||||
"autoAssignBoardOnClick": "Assigner automatiquement une Planche lors du clic",
|
||||
"dropToUpload": "$t(gallery.drop) pour Charger",
|
||||
"dropOrUpload": "$t(gallery.drop) ou Séléctioner",
|
||||
"dropToUpload": "$t(gallery.drop) pour Importer",
|
||||
"dropOrUpload": "$t(gallery.drop) ou Importer",
|
||||
"oldestFirst": "Plus Ancien en premier",
|
||||
"deleteImagePermanent": "Les Images supprimées ne peuvent pas être restorées.",
|
||||
"displaySearch": "Recherche d'Image",
|
||||
@@ -161,7 +162,7 @@
|
||||
"unstarImage": "Retirer le marquage de l'Image",
|
||||
"viewerImage": "Visualisation de l'Image",
|
||||
"imagesSettings": "Paramètres des images de la galerie",
|
||||
"assetsTab": "Fichiers que vous avez chargé pour vos projets.",
|
||||
"assetsTab": "Fichiers que vous avez importés pour vos projets.",
|
||||
"imagesTab": "Images que vous avez créées et enregistrées dans Invoke.",
|
||||
"boardsSettings": "Paramètres des planches"
|
||||
},
|
||||
@@ -219,7 +220,6 @@
|
||||
"typePhraseHere": "Écrire une phrase ici",
|
||||
"cancel": "Annuler",
|
||||
"defaultSettingsSaved": "Paramètres par défaut enregistrés",
|
||||
"hfToken": "Token HuggingFace",
|
||||
"imageEncoderModelId": "ID du modèle d'encodeur d'image",
|
||||
"path": "Chemin sur le disque",
|
||||
"repoVariant": "Variante de dépôt",
|
||||
@@ -243,7 +243,7 @@
|
||||
"noModelsInstalled": "Aucun modèle installé",
|
||||
"urlOrLocalPath": "URL ou chemin local",
|
||||
"prune": "Vider",
|
||||
"uploadImage": "Charger une image",
|
||||
"uploadImage": "Importer une image",
|
||||
"addModels": "Ajouter des modèles",
|
||||
"install": "Installer",
|
||||
"localOnly": "local uniquement",
|
||||
@@ -254,7 +254,6 @@
|
||||
"loraModels": "LoRAs",
|
||||
"main": "Principal",
|
||||
"urlOrLocalPathHelper": "Les URL doivent pointer vers un seul fichier. Les chemins locaux peuvent pointer vers un seul fichier ou un dossier pour un seul modèle de diffuseurs.",
|
||||
"useDefaultSettings": "Utiliser les paramètres par défaut",
|
||||
"modelImageUpdateFailed": "Mise à jour de l'image du modèle échouée",
|
||||
"loraTriggerPhrases": "Phrases de déclenchement LoRA",
|
||||
"mainModelTriggerPhrases": "Phrases de déclenchement du modèle principal",
|
||||
@@ -273,24 +272,39 @@
|
||||
"spandrelImageToImage": "Image vers Image (Spandrel)",
|
||||
"starterModelsInModelManager": "Les modèles de démarrage peuvent être trouvés dans le gestionnaire de modèles",
|
||||
"t5Encoder": "Encodeur T5",
|
||||
"learnMoreAboutSupportedModels": "En savoir plus sur les modèles que nous prenons en charge"
|
||||
"learnMoreAboutSupportedModels": "En savoir plus sur les modèles que nous prenons en charge",
|
||||
"includesNModels": "Contient {{n}} modèles et leurs dépendances",
|
||||
"starterBundles": "Packs de démarrages",
|
||||
"starterBundleHelpText": "Installe facilement tous les modèles nécessaire pour démarrer avec un modèle de base, incluant un modèle principal, ControlNets, IP Adapters et plus encore. Choisir un pack igniorera tous les modèles déjà installés.",
|
||||
"installingXModels_one": "En cours d'installation de {{count}} modèle",
|
||||
"installingXModels_many": "En cours d'installation de {{count}} modèles",
|
||||
"installingXModels_other": "En cours d'installation de {{count}} modèles",
|
||||
"skippingXDuplicates_one": ", en ignorant {{count}} doublon",
|
||||
"skippingXDuplicates_many": ", en ignorant {{count}} doublons",
|
||||
"skippingXDuplicates_other": ", en ignorant {{count}} doublons",
|
||||
"installingModel": "Modèle en cours d'installation",
|
||||
"installingBundle": "Pack en cours d'installation",
|
||||
"noDefaultSettings": "Aucun paramètre par défaut configuré pour ce modèle. Visitez le Gestionnaire de Modèles pour ajouter des paramètres par défaut.",
|
||||
"usingDefaultSettings": "Utilisation des paramètres par défaut du modèle",
|
||||
"defaultSettingsOutOfSync": "Certain paramètres ne correspondent pas aux valeurs par défaut du modèle :",
|
||||
"restoreDefaultSettings": "Cliquez pour utiliser les paramètres par défaut du modèle."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Images",
|
||||
"steps": "Etapes",
|
||||
"cfgScale": "CFG Echelle",
|
||||
"steps": "Étapes",
|
||||
"cfgScale": "Échelle CFG",
|
||||
"width": "Largeur",
|
||||
"height": "Hauteur",
|
||||
"seed": "Graine",
|
||||
"shuffle": "Mélanger la graine",
|
||||
"shuffle": "Nouvelle graine",
|
||||
"noiseThreshold": "Seuil de Bruit",
|
||||
"perlinNoise": "Bruit de Perlin",
|
||||
"type": "Type",
|
||||
"strength": "Force",
|
||||
"upscaling": "Agrandissement",
|
||||
"scale": "Echelle",
|
||||
"scale": "Échelle",
|
||||
"imageFit": "Ajuster Image Initiale à la Taille de Sortie",
|
||||
"scaleBeforeProcessing": "Echelle Avant Traitement",
|
||||
"scaleBeforeProcessing": "Échelle Avant Traitement",
|
||||
"scaledWidth": "Larg. Échelle",
|
||||
"scaledHeight": "Haut. Échelle",
|
||||
"infillMethod": "Méthode de Remplissage",
|
||||
@@ -411,35 +425,38 @@
|
||||
"clearIntermediatesWithCount_other": "Effacé {{count}} Intermédiaires",
|
||||
"informationalPopoversDisabled": "Pop-ups d'information désactivés",
|
||||
"informationalPopoversDisabledDesc": "Les pop-ups d'information ont été désactivés. Activez-les dans les paramètres.",
|
||||
"confirmOnNewSession": "Confirmer lors d'une nouvelle session"
|
||||
"confirmOnNewSession": "Confirmer lors d'une nouvelle session",
|
||||
"modelDescriptionsDisabledDesc": "Les descriptions des modèles dans les menus déroulants ont été désactivées. Activez-les dans les paramètres.",
|
||||
"enableModelDescriptions": "Activer les descriptions de modèle dans les menus déroulants",
|
||||
"modelDescriptionsDisabled": "Descriptions de modèle dans les menus déroulants désactivés"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "Téléchargement échoué",
|
||||
"uploadFailed": "Importation échouée",
|
||||
"imageCopied": "Image copiée",
|
||||
"parametersNotSet": "Paramètres non rappelés",
|
||||
"serverError": "Erreur du serveur",
|
||||
"uploadFailedInvalidUploadDesc": "Doit être une unique image PNG ou JPEG",
|
||||
"uploadFailedInvalidUploadDesc": "Doit être des images au format PNG ou JPEG.",
|
||||
"problemCopyingImage": "Impossible de copier l'image",
|
||||
"parameterSet": "Paramètre Rappelé",
|
||||
"parameterNotSet": "Paramètre non Rappelé",
|
||||
"canceled": "Traitement annulé",
|
||||
"addedToBoard": "Ajouté à la planche",
|
||||
"workflowLoaded": "Processus chargé",
|
||||
"addedToBoard": "Ajouté aux ressources de la planche {{name}}",
|
||||
"workflowLoaded": "Workflow chargé",
|
||||
"connected": "Connecté au serveur",
|
||||
"setNodeField": "Définir comme champ de nœud",
|
||||
"imageUploadFailed": "Échec de l'importation de l'image",
|
||||
"loadedWithWarnings": "Processus chargé avec des avertissements",
|
||||
"loadedWithWarnings": "Workflow chargé avec des avertissements",
|
||||
"imageUploaded": "Image importée",
|
||||
"modelAddedSimple": "Modèle ajouté à la file d'attente",
|
||||
"setControlImage": "Définir comme image de contrôle",
|
||||
"workflowDeleted": "Processus supprimé",
|
||||
"workflowDeleted": "Workflow supprimé",
|
||||
"baseModelChangedCleared_one": "Effacé ou désactivé {{count}} sous-modèle incompatible",
|
||||
"baseModelChangedCleared_many": "Effacé ou désactivé {{count}} sous-modèles incompatibles",
|
||||
"baseModelChangedCleared_other": "Effacé ou désactivé {{count}} sous-modèles incompatibles",
|
||||
"invalidUpload": "Téléchargement invalide",
|
||||
"invalidUpload": "Importation invalide",
|
||||
"problemDownloadingImage": "Impossible de télécharger l'image",
|
||||
"problemRetrievingWorkflow": "Problème de récupération du processus",
|
||||
"problemDeletingWorkflow": "Problème de suppression du processus",
|
||||
"problemRetrievingWorkflow": "Problème de récupération du Workflow",
|
||||
"problemDeletingWorkflow": "Problème de suppression du Workflow",
|
||||
"prunedQueue": "File d'attente vidée",
|
||||
"parameters": "Paramètres",
|
||||
"modelImportCanceled": "Importation du modèle annulée",
|
||||
@@ -468,10 +485,15 @@
|
||||
"baseModelChanged": "Modèle de base changé",
|
||||
"problemSavingLayer": "Impossible d'enregistrer la couche",
|
||||
"imageNotLoadedDesc": "Image introuvable",
|
||||
"linkCopied": "Lien copié"
|
||||
"linkCopied": "Lien copié",
|
||||
"imagesWillBeAddedTo": "Les images Importées seront ajoutées au ressources de la Planche {{boardName}}.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Doit être au maximum une image PNG ou JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Doit être au maximum {{count}} images PNG ou JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Doit être au maximum {{count}} images PNG ou JPEG.",
|
||||
"addedToUncategorized": "Ajouté aux ressources de la planche $t(boards.uncategorized)"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Charger une image",
|
||||
"uploadImage": "Importer une image",
|
||||
"reset": "Réinitialiser",
|
||||
"nextImage": "Image suivante",
|
||||
"previousImage": "Image précédente",
|
||||
@@ -483,7 +505,8 @@
|
||||
"submitSupportTicket": "Envoyer un ticket de support",
|
||||
"resetUI": "$t(accessibility.reset) l'Interface Utilisateur",
|
||||
"toggleRightPanel": "Afficher/Masquer le panneau de droite (G)",
|
||||
"toggleLeftPanel": "Afficher/Masquer le panneau de gauche (T)"
|
||||
"toggleLeftPanel": "Afficher/Masquer le panneau de gauche (T)",
|
||||
"uploadImages": "Importer Image(s)"
|
||||
},
|
||||
"boards": {
|
||||
"move": "Déplacer",
|
||||
@@ -533,7 +556,7 @@
|
||||
"accordions": {
|
||||
"advanced": {
|
||||
"title": "Avancé",
|
||||
"options": "$t(accordions.advanced.title) Options"
|
||||
"options": "Options $t(accordions.advanced.title)"
|
||||
},
|
||||
"image": {
|
||||
"title": "Image"
|
||||
@@ -614,7 +637,7 @@
|
||||
"graphQueued": "Graph ajouté à la file d'attente",
|
||||
"other": "Autre",
|
||||
"generation": "Génération",
|
||||
"workflows": "Processus",
|
||||
"workflows": "Workflows",
|
||||
"batchFailedToQueue": "Impossible d'ajouter le Lot dans à la file d'attente",
|
||||
"graphFailedToQueue": "Impossible d'ajouter le graph à la file d'attente",
|
||||
"item": "Élément",
|
||||
@@ -687,8 +710,8 @@
|
||||
"desc": "Rappelle toutes les métadonnées pour l'image actuelle."
|
||||
},
|
||||
"loadWorkflow": {
|
||||
"title": "Charger le processus",
|
||||
"desc": "Charge le processus enregistré de l'image actuelle (s'il en a un)."
|
||||
"title": "Ouvrir un Workflow",
|
||||
"desc": "Charge le workflow enregistré lié à l'image actuelle (s'il en a un)."
|
||||
},
|
||||
"recallSeed": {
|
||||
"desc": "Rappelle la graine pour l'image actuelle.",
|
||||
@@ -739,8 +762,8 @@
|
||||
"desc": "Séléctionne l'onglet Agrandissement."
|
||||
},
|
||||
"selectWorkflowsTab": {
|
||||
"desc": "Sélectionne l'onglet Processus.",
|
||||
"title": "Sélectionner l'onglet Processus"
|
||||
"desc": "Sélectionne l'onglet Workflows.",
|
||||
"title": "Sélectionner l'onglet Workflows"
|
||||
},
|
||||
"togglePanels": {
|
||||
"desc": "Affiche ou masque les panneaux gauche et droit en même temps.",
|
||||
@@ -946,11 +969,11 @@
|
||||
},
|
||||
"undo": {
|
||||
"title": "Annuler",
|
||||
"desc": "Annule la dernière action de processus."
|
||||
"desc": "Annule la dernière action de workflow."
|
||||
},
|
||||
"redo": {
|
||||
"title": "Rétablir",
|
||||
"desc": "Rétablit la dernière action de processus."
|
||||
"desc": "Rétablit la dernière action de workflow."
|
||||
},
|
||||
"addNode": {
|
||||
"desc": "Ouvre le menu d'ajout de nœud.",
|
||||
@@ -968,7 +991,7 @@
|
||||
"desc": "Colle les nœuds et les connections copiés.",
|
||||
"title": "Coller"
|
||||
},
|
||||
"title": "Processus"
|
||||
"title": "Workflows"
|
||||
}
|
||||
},
|
||||
"popovers": {
|
||||
@@ -1355,6 +1378,43 @@
|
||||
"Des valeurs de guidage élevées peuvent entraîner une saturation excessive, et un guidage élevé ou faible peut entraîner des résultats de génération déformés. Le guidage ne s'applique qu'aux modèles FLUX DEV."
|
||||
],
|
||||
"heading": "Guidage"
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "Image de Référence Globale",
|
||||
"paragraphs": [
|
||||
"Applique une image de référence pour influencer l'ensemble de la génération."
|
||||
]
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "Image de Référence Régionale",
|
||||
"paragraphs": [
|
||||
"Pinceau pour appliquer une image de référence à des zones spécifiques."
|
||||
]
|
||||
},
|
||||
"inpainting": {
|
||||
"heading": "Inpainting",
|
||||
"paragraphs": [
|
||||
"Contrôle la zone qui est modifiée, guidé par la force de débruitage."
|
||||
]
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "Guide Régional",
|
||||
"paragraphs": [
|
||||
"Pinceau pour guider l'emplacement des éléments provenant des prompts globaux."
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "Guide régional et image de référence régionale",
|
||||
"paragraphs": [
|
||||
"Pour le Guide Régional, utilisez le pinceau pour indiquer où les éléments des prompts globaux doivent apparaître.",
|
||||
"Pour l'image de référence régionale, pinceau pour appliquer une image de référence à des zones spécifiques."
|
||||
]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "Couche Rastérisation",
|
||||
"paragraphs": [
|
||||
"Contenu basé sur les pixels de votre toile, utilisé lors de la génération d'images."
|
||||
]
|
||||
}
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
@@ -1375,12 +1435,11 @@
|
||||
"positivePrompt": "Prompt Positif",
|
||||
"allPrompts": "Tous les Prompts",
|
||||
"negativePrompt": "Prompt Négatif",
|
||||
"seamless": "Sans jointure",
|
||||
"metadata": "Métadonné",
|
||||
"scheduler": "Planificateur",
|
||||
"imageDetails": "Détails de l'Image",
|
||||
"seed": "Graine",
|
||||
"workflow": "Processus",
|
||||
"workflow": "Workflow",
|
||||
"width": "Largeur",
|
||||
"Threshold": "Seuil de bruit",
|
||||
"noMetaData": "Aucune métadonnée trouvée",
|
||||
@@ -1400,13 +1459,14 @@
|
||||
"parameterSet": "Paramètre {{parameter}} défini",
|
||||
"parsingFailed": "L'analyse a échoué",
|
||||
"recallParameter": "Rappeler {{label}}",
|
||||
"canvasV2Metadata": "Toile"
|
||||
"canvasV2Metadata": "Toile",
|
||||
"guidance": "Guide"
|
||||
},
|
||||
"sdxl": {
|
||||
"freePromptStyle": "Écriture de Prompt manuelle",
|
||||
"concatPromptStyle": "Lier Prompt & Style",
|
||||
"negStylePrompt": "Prompt Négatif",
|
||||
"posStylePrompt": "Prompt Positif",
|
||||
"negStylePrompt": "Style Prompt Négatif",
|
||||
"posStylePrompt": "Style Prompt Positif",
|
||||
"refinerStart": "Démarrer le Refiner",
|
||||
"denoisingStrength": "Force de débruitage",
|
||||
"steps": "Étapes",
|
||||
@@ -1428,8 +1488,8 @@
|
||||
"hideMinimapnodes": "Masquer MiniCarte",
|
||||
"zoomOutNodes": "Dézoomer",
|
||||
"zoomInNodes": "Zoomer",
|
||||
"downloadWorkflow": "Télécharger processus en JSON",
|
||||
"loadWorkflow": "Charger le processus",
|
||||
"downloadWorkflow": "Exporter le Workflow au format JSON",
|
||||
"loadWorkflow": "Charger un Workflow",
|
||||
"reloadNodeTemplates": "Recharger les modèles de nœuds",
|
||||
"animatedEdges": "Connexions animées",
|
||||
"cannotConnectToSelf": "Impossible de se connecter à soi-même",
|
||||
@@ -1452,16 +1512,16 @@
|
||||
"float": "Flottant",
|
||||
"mismatchedVersion": "Nœud invalide : le nœud {{node}} de type {{type}} a une version incompatible (essayez de mettre à jour ?)",
|
||||
"missingTemplate": "Nœud invalide : le nœud {{node}} de type {{type}} modèle manquant (non installé ?)",
|
||||
"noWorkflow": "Pas de processus",
|
||||
"noWorkflow": "Pas de Workflow",
|
||||
"validateConnectionsHelp": "Prévenir la création de connexions invalides et l'invocation de graphes invalides",
|
||||
"workflowSettings": "Paramètres de l'Éditeur de Processus",
|
||||
"workflowValidation": "Erreur de validation du processus",
|
||||
"workflowSettings": "Paramètres de l'Éditeur de Workflow",
|
||||
"workflowValidation": "Erreur de validation du Workflow",
|
||||
"executionStateInProgress": "En cours",
|
||||
"node": "Noeud",
|
||||
"scheduler": "Planificateur",
|
||||
"notes": "Notes",
|
||||
"notesDescription": "Ajouter des notes sur votre processus",
|
||||
"unableToLoadWorkflow": "Impossible de charger le processus",
|
||||
"notesDescription": "Ajouter des notes sur votre workflow",
|
||||
"unableToLoadWorkflow": "Impossible de charger le Workflow",
|
||||
"addNode": "Ajouter un nœud",
|
||||
"problemSettingTitle": "Problème lors de définition du Titre",
|
||||
"connectionWouldCreateCycle": "La connexion créerait un cycle",
|
||||
@@ -1484,7 +1544,7 @@
|
||||
"noOutputRecorded": "Aucun résultat enregistré",
|
||||
"removeLinearView": "Retirer de la vue linéaire",
|
||||
"snapToGrid": "Aligner sur la grille",
|
||||
"workflow": "Processus",
|
||||
"workflow": "Workflow",
|
||||
"updateApp": "Mettre à jour l'application",
|
||||
"updateNode": "Mettre à jour le nœud",
|
||||
"nodeOutputs": "Sorties de nœud",
|
||||
@@ -1497,7 +1557,7 @@
|
||||
"string": "Chaîne de caractères",
|
||||
"workflowName": "Nom",
|
||||
"snapToGridHelp": "Aligner les nœuds sur la grille lors du déplacement",
|
||||
"unableToValidateWorkflow": "Impossible de valider le processus",
|
||||
"unableToValidateWorkflow": "Impossible de valider le Workflow",
|
||||
"validateConnections": "Valider les connexions et le graphique",
|
||||
"unableToUpdateNodes_one": "Impossible de mettre à jour {{count}} nœud",
|
||||
"unableToUpdateNodes_many": "Impossible de mettre à jour {{count}} nœuds",
|
||||
@@ -1510,15 +1570,15 @@
|
||||
"nodePack": "Paquet de nœuds",
|
||||
"sourceNodeDoesNotExist": "Connexion invalide : le nœud source/de sortie {{node}} n'existe pas",
|
||||
"sourceNodeFieldDoesNotExist": "Connexion invalide : {{node}}.{{field}} n'existe pas",
|
||||
"unableToGetWorkflowVersion": "Impossible d'obtenir la version du schéma de processus",
|
||||
"newWorkflowDesc2": "Votre processus actuel comporte des modifications non enregistrées.",
|
||||
"unableToGetWorkflowVersion": "Impossible d'obtenir la version du schéma du Workflow",
|
||||
"newWorkflowDesc2": "Votre workflow actuel comporte des modifications non enregistrées.",
|
||||
"deletedInvalidEdge": "Connexion invalide supprimé {{source}} -> {{target}}",
|
||||
"targetNodeDoesNotExist": "Connexion invalide : le nœud cible/entrée {{node}} n'existe pas",
|
||||
"targetNodeFieldDoesNotExist": "Connexion invalide : le champ {{node}}.{{field}} n'existe pas",
|
||||
"nodeVersion": "Version du noeud",
|
||||
"clearWorkflowDesc2": "Votre processus actuel comporte des modifications non enregistrées.",
|
||||
"clearWorkflow": "Effacer le Processus",
|
||||
"clearWorkflowDesc": "Effacer ce processus et en commencer un nouveau ?",
|
||||
"clearWorkflowDesc2": "Votre workflow actuel comporte des modifications non enregistrées.",
|
||||
"clearWorkflow": "Effacer le Workflow",
|
||||
"clearWorkflowDesc": "Effacer ce workflow et en commencer un nouveau ?",
|
||||
"unsupportedArrayItemType": "type d'élément de tableau non pris en charge \"{{type}}\"",
|
||||
"addLinearView": "Ajouter à la vue linéaire",
|
||||
"collectionOrScalarFieldType": "{{name}} (Unique ou Collection)",
|
||||
@@ -1527,7 +1587,7 @@
|
||||
"ipAdapter": "IP-Adapter",
|
||||
"viewMode": "Utiliser en vue linéaire",
|
||||
"collectionFieldType": "{{name}} (Collection)",
|
||||
"newWorkflow": "Nouveau processus",
|
||||
"newWorkflow": "Nouveau Workflow",
|
||||
"reorderLinearView": "Réorganiser la vue linéaire",
|
||||
"unknownOutput": "Sortie inconnue : {{name}}",
|
||||
"outputFieldTypeParseError": "Impossible d'analyser le type du champ de sortie {{node}}.{{field}} ({{message}})",
|
||||
@@ -1537,13 +1597,13 @@
|
||||
"unknownFieldType": "$t(nodes.unknownField) type : {{type}}",
|
||||
"inputFieldTypeParseError": "Impossible d'analyser le type du champ d'entrée {{node}}.{{field}} ({{message}})",
|
||||
"unableToExtractSchemaNameFromRef": "impossible d'extraire le nom du schéma à partir de la référence",
|
||||
"editMode": "Modifier dans l'éditeur de processus",
|
||||
"unknownErrorValidatingWorkflow": "Erreur inconnue lors de la validation du processus",
|
||||
"editMode": "Modifier dans l'éditeur de Workflow",
|
||||
"unknownErrorValidatingWorkflow": "Erreur inconnue lors de la validation du Workflow",
|
||||
"updateAllNodes": "Mettre à jour les nœuds",
|
||||
"allNodesUpdated": "Tous les nœuds mis à jour",
|
||||
"newWorkflowDesc": "Créer un nouveau processus ?",
|
||||
"newWorkflowDesc": "Créer un nouveau workflow ?",
|
||||
"edit": "Modifier",
|
||||
"noFieldsViewMode": "Ce processus n'a aucun champ sélectionné à afficher. Consultez le processus complet pour configurer les valeurs.",
|
||||
"noFieldsViewMode": "Ce workflow n'a aucun champ sélectionné à afficher. Consultez le workflow complet pour configurer les valeurs.",
|
||||
"graph": "Graph",
|
||||
"modelAccessError": "Impossible de trouver le modèle {{key}}, réinitialisation aux paramètres par défaut",
|
||||
"showEdgeLabelsHelp": "Afficher le nom sur les connections, indiquant les nœuds connectés",
|
||||
@@ -1557,9 +1617,9 @@
|
||||
"missingInvocationTemplate": "Modèle d'invocation manquant",
|
||||
"imageAccessError": "Impossible de trouver l'image {{image_name}}, réinitialisation à la valeur par défaut",
|
||||
"boardAccessError": "Impossible de trouver la planche {{board_id}}, réinitialisation à la valeur par défaut",
|
||||
"workflowHelpText": "Besoin d'aide ? Consultez notre guide sur <LinkComponent>Comment commencer avec les Processus</LinkComponent>.",
|
||||
"noWorkflows": "Aucun Processus",
|
||||
"noMatchingWorkflows": "Aucun processus correspondant"
|
||||
"workflowHelpText": "Besoin d'aide ? Consultez notre guide sur <LinkComponent>Comment commencer avec les Workflows</LinkComponent>.",
|
||||
"noWorkflows": "Aucun Workflows",
|
||||
"noMatchingWorkflows": "Aucun Workflows correspondant"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Aucun modèle correspondant",
|
||||
@@ -1576,59 +1636,51 @@
|
||||
},
|
||||
"workflows": {
|
||||
"workflowLibrary": "Bibliothèque",
|
||||
"loading": "Chargement des processus",
|
||||
"searchWorkflows": "Rechercher des processus",
|
||||
"workflowCleared": "Processus effacé",
|
||||
"loading": "Chargement des Workflows",
|
||||
"searchWorkflows": "Chercher des Workflows",
|
||||
"workflowCleared": "Workflow effacé",
|
||||
"noDescription": "Aucune description",
|
||||
"deleteWorkflow": "Supprimer le processus",
|
||||
"openWorkflow": "Ouvrir le processus",
|
||||
"uploadWorkflow": "Charger à partir du fichier",
|
||||
"workflowName": "Nom du processus",
|
||||
"unnamedWorkflow": "Processus sans nom",
|
||||
"saveWorkflowAs": "Enregistrer le processus sous",
|
||||
"workflows": "Processus",
|
||||
"savingWorkflow": "Enregistrement du processus...",
|
||||
"saveWorkflowToProject": "Enregistrer le processus dans le projet",
|
||||
"deleteWorkflow": "Supprimer le Workflow",
|
||||
"openWorkflow": "Ouvrir le Workflow",
|
||||
"uploadWorkflow": "Charger à partir d'un fichier",
|
||||
"workflowName": "Nom du Workflow",
|
||||
"unnamedWorkflow": "Workflow sans nom",
|
||||
"saveWorkflowAs": "Enregistrer le Workflow sous",
|
||||
"workflows": "Workflows",
|
||||
"savingWorkflow": "Enregistrement du Workflow...",
|
||||
"saveWorkflowToProject": "Enregistrer le Workflow dans le projet",
|
||||
"downloadWorkflow": "Enregistrer dans le fichier",
|
||||
"saveWorkflow": "Enregistrer le processus",
|
||||
"problemSavingWorkflow": "Problème de sauvegarde du processus",
|
||||
"workflowEditorMenu": "Menu de l'Éditeur de Processus",
|
||||
"newWorkflowCreated": "Nouveau processus créé",
|
||||
"clearWorkflowSearchFilter": "Réinitialiser le filtre de recherche de processus",
|
||||
"problemLoading": "Problème de chargement des processus",
|
||||
"workflowSaved": "Processus enregistré",
|
||||
"noWorkflows": "Pas de processus",
|
||||
"saveWorkflow": "Enregistrer le Workflow",
|
||||
"problemSavingWorkflow": "Problème de sauvegarde du Workflow",
|
||||
"workflowEditorMenu": "Menu de l'Éditeur de Workflow",
|
||||
"newWorkflowCreated": "Nouveau Workflow créé",
|
||||
"clearWorkflowSearchFilter": "Réinitialiser le filtre de recherche de Workflow",
|
||||
"problemLoading": "Problème de chargement des Workflows",
|
||||
"workflowSaved": "Workflow enregistré",
|
||||
"noWorkflows": "Pas de Workflows",
|
||||
"ascending": "Ascendant",
|
||||
"loadFromGraph": "Charger le processus à partir du graphique",
|
||||
"loadFromGraph": "Charger le Workflow à partir du graphique",
|
||||
"descending": "Descendant",
|
||||
"created": "Créé",
|
||||
"updated": "Mis à jour",
|
||||
"loadWorkflow": "$t(common.load) Processus",
|
||||
"loadWorkflow": "$t(common.load) Workflow",
|
||||
"convertGraph": "Convertir le graphique",
|
||||
"opened": "Ouvert",
|
||||
"name": "Nom",
|
||||
"autoLayout": "Mise en page automatique",
|
||||
"defaultWorkflows": "Processus par défaut",
|
||||
"userWorkflows": "Processus utilisateur",
|
||||
"projectWorkflows": "Processus du projet",
|
||||
"defaultWorkflows": "Workflows par défaut",
|
||||
"userWorkflows": "Workflows de l'utilisateur",
|
||||
"projectWorkflows": "Workflows du projet",
|
||||
"copyShareLink": "Copier le lien de partage",
|
||||
"chooseWorkflowFromLibrary": "Choisir le Processus dans la Bibliothèque",
|
||||
"uploadAndSaveWorkflow": "Charger dans la bibliothèque",
|
||||
"chooseWorkflowFromLibrary": "Choisir le Workflow dans la Bibliothèque",
|
||||
"uploadAndSaveWorkflow": "Importer dans la bibliothèque",
|
||||
"edit": "Modifer",
|
||||
"deleteWorkflow2": "Êtes-vous sûr de vouloir supprimer ce processus ? Ceci ne peut pas être annulé.",
|
||||
"deleteWorkflow2": "Êtes-vous sûr de vouloir supprimer ce Workflow ? Cette action ne peut pas être annulé.",
|
||||
"download": "Télécharger",
|
||||
"copyShareLinkForWorkflow": "Copier le lien de partage pour le processus",
|
||||
"copyShareLinkForWorkflow": "Copier le lien de partage pour le Workflow",
|
||||
"delete": "Supprimer"
|
||||
},
|
||||
"whatsNew": {
|
||||
"canvasV2Announcement": {
|
||||
"watchReleaseVideo": "Regarder la vidéo de lancement",
|
||||
"newLayerTypes": "Nouveaux types de couches pour un contrôle encore plus précis",
|
||||
"fluxSupport": "Support pour la famille de modèles Flux",
|
||||
"readReleaseNotes": "Lire les notes de version",
|
||||
"newCanvas": "Une nouvelle Toile de contrôle puissant",
|
||||
"watchUiUpdatesOverview": "Regarder l'aperçu des mises à jour de l'UI"
|
||||
},
|
||||
"whatsNewInInvoke": "Quoi de neuf dans Invoke"
|
||||
},
|
||||
"ui": {
|
||||
@@ -1639,7 +1691,7 @@
|
||||
"gallery": "Galerie",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"generation": "Génération",
|
||||
"workflows": "Processus",
|
||||
"workflows": "Workflows",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Modèles",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)"
|
||||
@@ -1749,7 +1801,9 @@
|
||||
"bboxGroup": "Créer à partir de la bounding box",
|
||||
"newRegionalReferenceImage": "Nouvelle image de référence régionale",
|
||||
"newGlobalReferenceImage": "Nouvelle image de référence globale",
|
||||
"newControlLayer": "Nouveau couche de contrôle"
|
||||
"newControlLayer": "Nouveau couche de contrôle",
|
||||
"newInpaintMask": "Nouveau Masque Inpaint",
|
||||
"newRegionalGuidance": "Nouveau Guide Régional"
|
||||
},
|
||||
"bookmark": "Marque-page pour Changement Rapide",
|
||||
"saveLayerToAssets": "Enregistrer la couche dans les ressources",
|
||||
@@ -1762,8 +1816,6 @@
|
||||
"on": "Activé",
|
||||
"label": "Aligner sur la grille"
|
||||
},
|
||||
"isolatedFilteringPreview": "Aperçu de filtrage isolé",
|
||||
"isolatedTransformingPreview": "Aperçu de transformation isolée",
|
||||
"invertBrushSizeScrollDirection": "Inverser le défilement pour la taille du pinceau",
|
||||
"pressureSensitivity": "Sensibilité à la pression",
|
||||
"preserveMask": {
|
||||
@@ -1771,9 +1823,10 @@
|
||||
"alert": "Préserver la zone masquée"
|
||||
},
|
||||
"isolatedPreview": "Aperçu Isolé",
|
||||
"isolatedStagingPreview": "Aperçu de l'attente isolé"
|
||||
"isolatedStagingPreview": "Aperçu de l'attente isolé",
|
||||
"isolatedLayerPreview": "Aperçu de la couche isolée",
|
||||
"isolatedLayerPreviewDesc": "Pour afficher uniquement cette couche lors de l'exécution d'opérations telles que le filtrage ou la transformation."
|
||||
},
|
||||
"convertToRasterLayer": "Convertir en Couche de Rastérisation",
|
||||
"transparency": "Transparence",
|
||||
"moveBackward": "Reculer",
|
||||
"rectangle": "Rectangle",
|
||||
@@ -1896,7 +1949,6 @@
|
||||
"globalReferenceImage_withCount_one": "$t(controlLayers.globalReferenceImage)",
|
||||
"globalReferenceImage_withCount_many": "Images de référence globales",
|
||||
"globalReferenceImage_withCount_other": "Images de référence globales",
|
||||
"convertToControlLayer": "Convertir en Couche de Contrôle",
|
||||
"layer_withCount_one": "Couche {{count}}",
|
||||
"layer_withCount_many": "Couches {{count}}",
|
||||
"layer_withCount_other": "Couches {{count}}",
|
||||
@@ -1959,7 +2011,41 @@
|
||||
"pullBboxIntoReferenceImageOk": "Bounding Box insérée dans l'Image de référence",
|
||||
"controlLayer_withCount_one": "$t(controlLayers.controlLayer)",
|
||||
"controlLayer_withCount_many": "Controler les couches",
|
||||
"controlLayer_withCount_other": "Controler les couches"
|
||||
"controlLayer_withCount_other": "Controler les couches",
|
||||
"copyInpaintMaskTo": "Copier $t(controlLayers.inpaintMask) vers",
|
||||
"copyRegionalGuidanceTo": "Copier $t(controlLayers.regionalGuidance) vers",
|
||||
"convertRasterLayerTo": "Convertir $t(controlLayers.rasterLayer) vers",
|
||||
"selectObject": {
|
||||
"selectObject": "Sélectionner l'objet",
|
||||
"clickToAdd": "Cliquez sur la couche pour ajouter un point",
|
||||
"apply": "Appliquer",
|
||||
"cancel": "Annuler",
|
||||
"dragToMove": "Faites glisser un point pour le déplacer",
|
||||
"clickToRemove": "Cliquez sur un point pour le supprimer",
|
||||
"include": "Inclure",
|
||||
"invertSelection": "Sélection Inversée",
|
||||
"saveAs": "Enregistrer sous",
|
||||
"neutral": "Neutre",
|
||||
"pointType": "Type de point",
|
||||
"exclude": "Exclure",
|
||||
"process": "Traiter",
|
||||
"reset": "Réinitialiser",
|
||||
"help1": "Sélectionnez un seul objet cible. Ajoutez des points <Bold>Inclure</Bold> et <Bold>Exclure</Bold> pour indiquer quelles parties de la couche font partie de l'objet cible.",
|
||||
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
|
||||
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
|
||||
},
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
|
||||
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",
|
||||
"newControlLayer": "Nouveau $t(controlLayers.controlLayer)",
|
||||
"newRegionalGuidance": "Nouveau $t(controlLayers.regionalGuidance)",
|
||||
"replaceCurrent": "Remplacer Actuel",
|
||||
"convertControlLayerTo": "Convertir $t(controlLayers.controlLayer) vers",
|
||||
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
|
||||
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
|
||||
"newInpaintMask": "Nouveau $t(controlLayers.inpaintMask)",
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.rasterLayer)"
|
||||
},
|
||||
"upscaling": {
|
||||
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
|
||||
@@ -1980,57 +2066,57 @@
|
||||
"missingTileControlNetModel": "Aucun modèle ControlNet valide installé"
|
||||
},
|
||||
"stylePresets": {
|
||||
"deleteTemplate": "Supprimer le modèle",
|
||||
"editTemplate": "Modifier le modèle",
|
||||
"deleteTemplate": "Supprimer le template",
|
||||
"editTemplate": "Modifier le template",
|
||||
"exportFailed": "Impossible de générer et de télécharger le CSV",
|
||||
"name": "Nom",
|
||||
"acceptedColumnsKeys": "Colonnes/clés acceptées :",
|
||||
"promptTemplatesDesc1": "Les modèles de prompt ajoutent du texte aux prompts que vous écrivez dans la zone de saisie des prompts.",
|
||||
"promptTemplatesDesc1": "Les templates de prompt ajoutent du texte aux prompts que vous écrivez dans la zone de saisie.",
|
||||
"private": "Privé",
|
||||
"searchByName": "Rechercher par nom",
|
||||
"viewList": "Afficher la liste des modèles",
|
||||
"noTemplates": "Aucun modèle",
|
||||
"viewList": "Afficher la liste des templates",
|
||||
"noTemplates": "Aucun templates",
|
||||
"insertPlaceholder": "Insérer un placeholder",
|
||||
"defaultTemplates": "Modèles par défaut",
|
||||
"defaultTemplates": "Template pré-défini",
|
||||
"deleteImage": "Supprimer l'image",
|
||||
"createPromptTemplate": "Créer un modèle de prompt",
|
||||
"createPromptTemplate": "Créer un template de prompt",
|
||||
"negativePrompt": "Prompt négatif",
|
||||
"promptTemplatesDesc3": "Si vous omettez le placeholder, le modèle sera ajouté à la fin de votre prompt.",
|
||||
"promptTemplatesDesc3": "Si vous omettez le placeholder, le template sera ajouté à la fin de votre prompt.",
|
||||
"positivePrompt": "Prompt positif",
|
||||
"choosePromptTemplate": "Choisir un modèle de prompt",
|
||||
"choosePromptTemplate": "Choisir un template de prompt",
|
||||
"toggleViewMode": "Basculer le mode d'affichage",
|
||||
"updatePromptTemplate": "Mettre à jour le modèle de prompt",
|
||||
"flatten": "Intégrer le modèle sélectionné dans le prompt actuel",
|
||||
"myTemplates": "Mes modèles",
|
||||
"updatePromptTemplate": "Mettre à jour le template de prompt",
|
||||
"flatten": "Intégrer le template sélectionné dans le prompt actuel",
|
||||
"myTemplates": "Mes Templates",
|
||||
"type": "Type",
|
||||
"exportDownloaded": "Exportation téléchargée",
|
||||
"clearTemplateSelection": "Supprimer la sélection de modèle",
|
||||
"promptTemplateCleared": "Modèle de prompt effacé",
|
||||
"templateDeleted": "Modèle de prompt supprimé",
|
||||
"exportPromptTemplates": "Exporter mes modèles de prompt (CSV)",
|
||||
"clearTemplateSelection": "Supprimer la sélection de template",
|
||||
"promptTemplateCleared": "Template de prompt effacé",
|
||||
"templateDeleted": "Template de prompt supprimé",
|
||||
"exportPromptTemplates": "Exporter mes templates de prompt (CSV)",
|
||||
"nameColumn": "'nom'",
|
||||
"positivePromptColumn": "\"prompt\" ou \"prompt_positif\"",
|
||||
"useForTemplate": "Utiliser pour le modèle de prompt",
|
||||
"uploadImage": "Charger une image",
|
||||
"importTemplates": "Importer des modèles de prompt (CSV/JSON)",
|
||||
"useForTemplate": "Utiliser pour le template de prompt",
|
||||
"uploadImage": "Importer une image",
|
||||
"importTemplates": "Importer des templates de prompt (CSV/JSON)",
|
||||
"negativePromptColumn": "'prompt_négatif'",
|
||||
"deleteTemplate2": "Êtes-vous sûr de vouloir supprimer ce modèle ? Cette action ne peut pas être annulée.",
|
||||
"deleteTemplate2": "Êtes-vous sûr de vouloir supprimer ce template ? Cette action ne peut pas être annulée.",
|
||||
"preview": "Aperçu",
|
||||
"shared": "Partagé",
|
||||
"noMatchingTemplates": "Aucun modèle correspondant",
|
||||
"sharedTemplates": "Modèles partagés",
|
||||
"unableToDeleteTemplate": "Impossible de supprimer le modèle de prompt",
|
||||
"noMatchingTemplates": "Aucun templates correspondant",
|
||||
"sharedTemplates": "Template partagés",
|
||||
"unableToDeleteTemplate": "Impossible de supprimer le template de prompt",
|
||||
"active": "Actif",
|
||||
"copyTemplate": "Copier le modèle",
|
||||
"viewModeTooltip": "Voici à quoi ressemblera votre prompt avec le modèle actuellement sélectionné. Pour modifier votre prompt, cliquez n'importe où dans la zone de texte.",
|
||||
"promptTemplatesDesc2": "Utilisez la chaîne de remplacement <Pre>{{placeholder}}</Pre> pour spécifier où votre prompt doit être inclus dans le modèle."
|
||||
"copyTemplate": "Copier le template",
|
||||
"viewModeTooltip": "Voici à quoi ressemblera votre prompt avec le template actuellement sélectionné. Pour modifier votre prompt, cliquez n'importe où dans la zone de texte.",
|
||||
"promptTemplatesDesc2": "Utilisez la chaîne de remplacement <Pre>{{placeholder}}</Pre> pour spécifier où votre prompt doit être inclus dans le template."
|
||||
},
|
||||
"system": {
|
||||
"logNamespaces": {
|
||||
"config": "Configuration",
|
||||
"canvas": "Toile",
|
||||
"generation": "Génération",
|
||||
"workflows": "Processus",
|
||||
"workflows": "Workflows",
|
||||
"system": "Système",
|
||||
"models": "Modèles",
|
||||
"logNamespaces": "Journalisation des espaces de noms",
|
||||
@@ -2051,8 +2137,12 @@
|
||||
"enableLogging": "Activer la journalisation"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStarted": "Pour commencer, saisissez un prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un modèle de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement dans la <StrongComponent>Galerie</StrongComponent> ou de les modifier sur la <StrongComponent>Toile</StrongComponent>.",
|
||||
"gettingStartedSeries": "Vous souhaitez plus de conseils ? Consultez notre <LinkComponent>Série de démarrage</LinkComponent> pour des astuces sur l'exploitation du plein potentiel de l'Invoke Studio."
|
||||
"toGetStarted": "Pour commencer, saisissez un prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement dans la <StrongComponent>Galerie</StrongComponent> ou de les modifier sur la <StrongComponent>Toile</StrongComponent>.",
|
||||
"gettingStartedSeries": "Vous souhaitez plus de conseils ? Consultez notre <LinkComponent>Série de démarrage</LinkComponent> pour des astuces sur l'exploitation du plein potentiel de l'Invoke Studio.",
|
||||
"noModelsInstalled": "Il semble qu'aucun modèle ne soit installé",
|
||||
"downloadStarterModels": "Télécharger les modèles de démarrage",
|
||||
"importModels": "Importer des Modèles",
|
||||
"toGetStartedLocal": "Pour commencer, assurez-vous de télécharger ou d'importer des modèles nécessaires pour exécuter Invoke. Ensuite, saisissez le prompt dans la boîte et cliquez sur <StrongComponent>Invoke</StrongComponent> pour générer votre première image. Sélectionnez un template de prompt pour améliorer les résultats. Vous pouvez choisir de sauvegarder vos images directement sur <StrongComponent>Galerie</StrongComponent> ou les modifier sur la <StrongComponent>Toile</StrongComponent>."
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Partager l'accès",
|
||||
|
||||
@@ -92,7 +92,11 @@
|
||||
"none": "Niente",
|
||||
"new": "Nuovo",
|
||||
"view": "Vista",
|
||||
"close": "Chiudi"
|
||||
"close": "Chiudi",
|
||||
"clipboard": "Appunti",
|
||||
"ok": "Ok",
|
||||
"generating": "Generazione",
|
||||
"loadingModel": "Caricamento del modello"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -542,7 +546,6 @@
|
||||
"defaultSettingsSaved": "Impostazioni predefinite salvate",
|
||||
"defaultSettings": "Impostazioni predefinite",
|
||||
"metadata": "Metadati",
|
||||
"useDefaultSettings": "Usa le impostazioni predefinite",
|
||||
"triggerPhrases": "Frasi Trigger",
|
||||
"deleteModelImage": "Elimina l'immagine del modello",
|
||||
"localOnly": "solo locale",
|
||||
@@ -577,7 +580,37 @@
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
"starterModelsInModelManager": "I modelli iniziali possono essere trovati in Gestione Modelli",
|
||||
"spandrelImageToImage": "Immagine a immagine (Spandrel)",
|
||||
"learnMoreAboutSupportedModels": "Scopri di più sui modelli che supportiamo"
|
||||
"learnMoreAboutSupportedModels": "Scopri di più sui modelli che supportiamo",
|
||||
"starterBundles": "Pacchetti per iniziare",
|
||||
"installingBundle": "Installazione del pacchetto",
|
||||
"skippingXDuplicates_one": ", saltando {{count}} duplicato",
|
||||
"skippingXDuplicates_many": ", saltando {{count}} duplicati",
|
||||
"skippingXDuplicates_other": ", saltando {{count}} duplicati",
|
||||
"installingModel": "Installazione del modello",
|
||||
"installingXModels_one": "Installazione di {{count}} modello",
|
||||
"installingXModels_many": "Installazione di {{count}} modelli",
|
||||
"installingXModels_other": "Installazione di {{count}} modelli",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze",
|
||||
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
|
||||
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
|
||||
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
|
||||
"restoreDefaultSettings": "Fare clic per utilizzare le impostazioni predefinite del modello.",
|
||||
"usingDefaultSettings": "Utilizzo delle impostazioni predefinite del modello",
|
||||
"huggingFace": "HuggingFace",
|
||||
"huggingFaceRepoID": "HuggingFace Repository ID",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"t5Encoder": "T5 Encoder",
|
||||
"hfTokenInvalidErrorMessage": "Gettone HuggingFace non valido o mancante.",
|
||||
"hfTokenRequired": "Stai tentando di scaricare un modello che richiede un gettone HuggingFace valido.",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Impossibile verificare il gettone HuggingFace. Ciò è probabilmente dovuto a un errore di rete. Riprova più tardi.",
|
||||
"hfTokenHelperText": "Per utilizzare alcuni modelli è necessario un gettone HF. Fai clic qui per creare o ottenere il tuo gettone.",
|
||||
"hfTokenInvalid": "Gettone HF non valido o mancante",
|
||||
"hfTokenUnableToVerify": "Impossibile verificare il gettone HF",
|
||||
"hfTokenSaved": "Gettone HF salvato",
|
||||
"hfForbidden": "Non hai accesso a questo modello HF",
|
||||
"hfTokenLabel": "Gettone HuggingFace (richiesto per alcuni modelli)",
|
||||
"hfForbiddenErrorMessage": "Consigliamo di visitare la pagina del repository su HuggingFace.com. Il proprietario potrebbe richiedere l'accettazione dei termini per poter effettuare il download.",
|
||||
"hfTokenInvalidErrorMessage2": "Aggiornalo in "
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -678,7 +711,10 @@
|
||||
"boxBlur": "Sfocatura Box",
|
||||
"staged": "Maschera espansa",
|
||||
"optimizedImageToImage": "Immagine-a-immagine ottimizzata",
|
||||
"sendToCanvas": "Invia alla Tela"
|
||||
"sendToCanvas": "Invia alla Tela",
|
||||
"coherenceMinDenoise": "Min rid. rumore",
|
||||
"recallMetadata": "Richiama i metadati",
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -713,7 +749,11 @@
|
||||
"reloadingIn": "Ricaricando in",
|
||||
"informationalPopoversDisabled": "Testo informativo a comparsa disabilitato",
|
||||
"informationalPopoversDisabledDesc": "I testi informativi a comparsa sono disabilitati. Attivali nelle impostazioni.",
|
||||
"confirmOnNewSession": "Conferma su nuova sessione"
|
||||
"confirmOnNewSession": "Conferma su nuova sessione",
|
||||
"enableModelDescriptions": "Abilita le descrizioni dei modelli nei menu a discesa",
|
||||
"modelDescriptionsDisabled": "Descrizioni dei modelli nei menu a discesa disabilitate",
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni.",
|
||||
"showDetailedInvocationProgress": "Mostra dettagli avanzamento"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "Caricamento fallito",
|
||||
@@ -722,7 +762,7 @@
|
||||
"serverError": "Errore del Server",
|
||||
"connected": "Connesso al server",
|
||||
"canceled": "Elaborazione annullata",
|
||||
"uploadFailedInvalidUploadDesc": "Deve essere una singola immagine PNG o JPEG",
|
||||
"uploadFailedInvalidUploadDesc": "Devono essere immagini PNG o JPEG.",
|
||||
"parameterSet": "Parametro richiamato",
|
||||
"parameterNotSet": "Parametro non richiamato",
|
||||
"problemCopyingImage": "Impossibile copiare l'immagine",
|
||||
@@ -731,7 +771,7 @@
|
||||
"baseModelChangedCleared_other": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
|
||||
"loadedWithWarnings": "Flusso di lavoro caricato con avvisi",
|
||||
"imageUploaded": "Immagine caricata",
|
||||
"addedToBoard": "Aggiunto alla bacheca",
|
||||
"addedToBoard": "Aggiunto alle risorse della bacheca {{name}}",
|
||||
"modelAddedSimple": "Modello aggiunto alla Coda",
|
||||
"imageUploadFailed": "Caricamento immagine non riuscito",
|
||||
"setControlImage": "Imposta come immagine di controllo",
|
||||
@@ -770,7 +810,12 @@
|
||||
"imageSavingFailed": "Salvataggio dell'immagine non riuscito",
|
||||
"layerCopiedToClipboard": "Livello copiato negli appunti",
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
|
||||
"linkCopied": "Collegamento copiato"
|
||||
"linkCopied": "Collegamento copiato",
|
||||
"addedToUncategorized": "Aggiunto alle risorse della bacheca $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Le immagini caricate verranno aggiunte alle risorse della bacheca {{boardName}}.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG o JPEG."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -785,7 +830,8 @@
|
||||
"about": "Informazioni",
|
||||
"submitSupportTicket": "Invia ticket di supporto",
|
||||
"toggleLeftPanel": "Attiva/disattiva il pannello sinistro (T)",
|
||||
"toggleRightPanel": "Attiva/disattiva il pannello destro (G)"
|
||||
"toggleRightPanel": "Attiva/disattiva il pannello destro (G)",
|
||||
"uploadImages": "Carica immagine(i)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomOutNodes": "Rimpicciolire",
|
||||
@@ -1059,7 +1105,8 @@
|
||||
"noLoRAsInstalled": "Nessun LoRA installato",
|
||||
"addLora": "Aggiungi LoRA",
|
||||
"defaultVAE": "VAE predefinito",
|
||||
"concepts": "Concetti"
|
||||
"concepts": "Concetti",
|
||||
"lora": "LoRA"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Disabilita",
|
||||
@@ -1116,7 +1163,8 @@
|
||||
"paragraphs": [
|
||||
"Scegli quanti livelli del modello CLIP saltare.",
|
||||
"Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip."
|
||||
]
|
||||
],
|
||||
"heading": "CLIP Skip"
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"heading": "Passaggio di Coerenza",
|
||||
@@ -1231,8 +1279,9 @@
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"Quanto rumore viene aggiunto all'immagine in ingresso.",
|
||||
"0 risulterà in un'immagine identica, mentre 1 risulterà in un'immagine completamente nuova."
|
||||
"Controlla la differenza tra l'immagine generata e il/i livello/i raster.",
|
||||
"Una forza inferiore rimane più vicina ai livelli raster visibili combinati. Una forza superiore si basa maggiormente sul prompt globale.",
|
||||
"Se non sono presenti livelli raster con contenuto visibile, questa impostazione viene ignorata."
|
||||
],
|
||||
"heading": "Forza di riduzione del rumore"
|
||||
},
|
||||
@@ -1244,7 +1293,7 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"paragraphs": [
|
||||
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
|
||||
"Metodo di riempimento durante il processo di Outpaint o Inpaint."
|
||||
],
|
||||
"heading": "Metodo di riempimento"
|
||||
},
|
||||
@@ -1412,7 +1461,7 @@
|
||||
"heading": "Livello minimo di riduzione del rumore",
|
||||
"paragraphs": [
|
||||
"Intensità minima di riduzione rumore per la modalità di Coerenza",
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpainting o l'outpainting"
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpaint o l'outpaint"
|
||||
]
|
||||
},
|
||||
"compositingMaskBlur": {
|
||||
@@ -1466,7 +1515,7 @@
|
||||
"optimizedDenoising": {
|
||||
"heading": "Immagine-a-immagine ottimizzata",
|
||||
"paragraphs": [
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpainting con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpaint con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
@@ -1475,6 +1524,42 @@
|
||||
"Controlla quanto il prompt influenza il processo di generazione.",
|
||||
"Valori di guida elevati possono causare sovrasaturazione e una guida elevata o bassa può causare risultati di generazione distorti. La guida si applica solo ai modelli FLUX DEV."
|
||||
]
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"paragraphs": [
|
||||
"Pennello per applicare un'immagine di riferimento ad aree specifiche."
|
||||
],
|
||||
"heading": "Immagine di riferimento Regionale"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"paragraphs": [
|
||||
"Contenuto basato sui pixel della tua tela, utilizzato durante la generazione dell'immagine."
|
||||
],
|
||||
"heading": "Livello Raster"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "Guida Regionale",
|
||||
"paragraphs": [
|
||||
"Pennello per guidare la posizione in cui devono apparire gli elementi dei prompt globali."
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "Guida regionale e immagine di riferimento regionale",
|
||||
"paragraphs": [
|
||||
"Per la Guida Regionale, utilizzare il pennello per indicare dove devono apparire gli elementi dei prompt globali.",
|
||||
"Per l'immagine di riferimento regionale, utilizzare il pennello per applicare un'immagine di riferimento ad aree specifiche."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "Immagine di riferimento Globale",
|
||||
"paragraphs": [
|
||||
"Applica un'immagine di riferimento per influenzare l'intera generazione."
|
||||
]
|
||||
},
|
||||
"inpainting": {
|
||||
"paragraphs": [
|
||||
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1496,7 +1581,6 @@
|
||||
"refinerSteps": "Passi Affinamento"
|
||||
},
|
||||
"metadata": {
|
||||
"seamless": "Senza giunture",
|
||||
"positivePrompt": "Prompt positivo",
|
||||
"negativePrompt": "Prompt negativo",
|
||||
"generationMode": "Modalità generazione",
|
||||
@@ -1524,7 +1608,10 @@
|
||||
"parsingFailed": "Analisi non riuscita",
|
||||
"recallParameter": "Richiama {{label}}",
|
||||
"canvasV2Metadata": "Tela",
|
||||
"guidance": "Guida"
|
||||
"guidance": "Guida",
|
||||
"seamlessXAxis": "Asse X senza giunte",
|
||||
"seamlessYAxis": "Asse Y senza giunte",
|
||||
"vae": "VAE"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Abilita Correzione Alta Risoluzione",
|
||||
@@ -1621,11 +1708,11 @@
|
||||
"regionalGuidance": "Guida regionale",
|
||||
"opacity": "Opacità",
|
||||
"mergeVisible": "Fondi il visibile",
|
||||
"mergeVisibleOk": "Livelli visibili uniti",
|
||||
"mergeVisibleOk": "Livelli uniti",
|
||||
"deleteReferenceImage": "Elimina l'immagine di riferimento",
|
||||
"referenceImage": "Immagine di riferimento",
|
||||
"fitBboxToLayers": "Adatta il riquadro di delimitazione ai livelli",
|
||||
"mergeVisibleError": "Errore durante l'unione dei livelli visibili",
|
||||
"mergeVisibleError": "Errore durante l'unione dei livelli",
|
||||
"regionalReferenceImage": "Immagine di riferimento Regionale",
|
||||
"newLayerFromImage": "Nuovo livello da immagine",
|
||||
"newCanvasFromImage": "Nuova tela da immagine",
|
||||
@@ -1664,7 +1751,7 @@
|
||||
"newControlLayerOk": "Livello di controllo creato",
|
||||
"bboxOverlay": "Mostra sovrapposizione riquadro",
|
||||
"resetCanvas": "Reimposta la tela",
|
||||
"outputOnlyMaskedRegions": "Solo regioni mascherate in uscita",
|
||||
"outputOnlyMaskedRegions": "In uscita solo le regioni generate",
|
||||
"enableAutoNegative": "Abilita Auto Negativo",
|
||||
"disableAutoNegative": "Disabilita Auto Negativo",
|
||||
"showHUD": "Mostra HUD",
|
||||
@@ -1701,7 +1788,7 @@
|
||||
"globalReferenceImage_withCount_many": "Immagini di riferimento Globali",
|
||||
"globalReferenceImage_withCount_other": "Immagini di riferimento Globali",
|
||||
"controlMode": {
|
||||
"balanced": "Bilanciato",
|
||||
"balanced": "Bilanciato (consigliato)",
|
||||
"controlMode": "Modalità di controllo",
|
||||
"prompt": "Prompt",
|
||||
"control": "Controllo",
|
||||
@@ -1712,12 +1799,12 @@
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"stagingOnCanvas": "Genera immagini nella",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Completo",
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Metodo Adattatore IP"
|
||||
},
|
||||
"showingType": "Mostrare {{type}}",
|
||||
"showingType": "Mostra {{type}}",
|
||||
"dynamicGrid": "Griglia dinamica",
|
||||
"tool": {
|
||||
"view": "Muovi",
|
||||
@@ -1811,7 +1898,10 @@
|
||||
"lineart_anime_edge_detection": {
|
||||
"description": "Genera una mappa dei bordi dal livello selezionato utilizzando il modello di rilevamento dei bordi Lineart Anime.",
|
||||
"label": "Rilevamento bordi Lineart Anime"
|
||||
}
|
||||
},
|
||||
"forMoreControl": "Per un maggiore controllo, fare clic su Avanzate qui sotto.",
|
||||
"advanced": "Avanzate",
|
||||
"processingLayerWith": "Elaborazione del livello con il filtro {{type}}."
|
||||
},
|
||||
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
|
||||
"regionalGuidance_withCount_hidden": "Guida regionale ({{count}} nascosti)",
|
||||
@@ -1845,8 +1935,6 @@
|
||||
"layer_withCount_one": "Livello ({{count}})",
|
||||
"layer_withCount_many": "Livelli ({{count}})",
|
||||
"layer_withCount_other": "Livelli ({{count}})",
|
||||
"convertToControlLayer": "Converti in livello di controllo",
|
||||
"convertToRasterLayer": "Converti in livello raster",
|
||||
"unlocked": "Sbloccato",
|
||||
"enableTransparencyEffect": "Abilita l'effetto trasparenza",
|
||||
"replaceLayer": "Sostituisci livello",
|
||||
@@ -1859,9 +1947,7 @@
|
||||
"newCanvasSession": "Nuova sessione Tela",
|
||||
"deleteSelected": "Elimina selezione",
|
||||
"settings": {
|
||||
"isolatedFilteringPreview": "Anteprima del filtraggio isolata",
|
||||
"isolatedStagingPreview": "Anteprima di generazione isolata",
|
||||
"isolatedTransformingPreview": "Anteprima di trasformazione isolata",
|
||||
"isolatedPreview": "Anteprima isolata",
|
||||
"invertBrushSizeScrollDirection": "Inverti scorrimento per dimensione pennello",
|
||||
"snapToGrid": {
|
||||
@@ -1873,7 +1959,9 @@
|
||||
"preserveMask": {
|
||||
"alert": "Preservare la regione mascherata",
|
||||
"label": "Preserva la regione mascherata"
|
||||
}
|
||||
},
|
||||
"isolatedLayerPreview": "Anteprima livello isolato",
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione."
|
||||
},
|
||||
"transform": {
|
||||
"reset": "Reimposta",
|
||||
@@ -1918,9 +2006,49 @@
|
||||
"canvasGroup": "Tela",
|
||||
"newRasterLayer": "Nuovo Livello Raster",
|
||||
"saveCanvasToGallery": "Salva la Tela nella Galleria",
|
||||
"saveToGalleryGroup": "Salva nella Galleria"
|
||||
"saveToGalleryGroup": "Salva nella Galleria",
|
||||
"newInpaintMask": "Nuova maschera Inpaint",
|
||||
"newRegionalGuidance": "Nuova Guida Regionale"
|
||||
},
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine"
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
||||
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
||||
"copyControlLayerTo": "Copia $t(controlLayers.controlLayer) in",
|
||||
"copyInpaintMaskTo": "Copia $t(controlLayers.inpaintMask) in",
|
||||
"selectObject": {
|
||||
"dragToMove": "Trascina un punto per spostarlo",
|
||||
"clickToAdd": "Fare clic sul livello per aggiungere un punto",
|
||||
"clickToRemove": "Clicca su un punto per rimuoverlo",
|
||||
"help3": "Inverte la selezione per selezionare tutto tranne l'oggetto di destinazione.",
|
||||
"pointType": "Tipo punto",
|
||||
"apply": "Applica",
|
||||
"reset": "Reimposta",
|
||||
"cancel": "Annulla",
|
||||
"selectObject": "Seleziona oggetto",
|
||||
"invertSelection": "Inverti selezione",
|
||||
"exclude": "Escludi",
|
||||
"include": "Includi",
|
||||
"neutral": "Neutro",
|
||||
"saveAs": "Salva come",
|
||||
"process": "Elabora",
|
||||
"help1": "Seleziona un singolo oggetto di destinazione. Aggiungi i punti <Bold>Includi</Bold> e <Bold>Escludi</Bold> per indicare quali parti del livello fanno parte dell'oggetto di destinazione.",
|
||||
"help2": "Inizia con un punto <Bold>Include</Bold> all'interno dell'oggetto di destinazione. Aggiungi altri punti per perfezionare la selezione. Meno punti in genere producono risultati migliori."
|
||||
},
|
||||
"convertControlLayerTo": "Converti $t(controlLayers.controlLayer) in",
|
||||
"newRasterLayer": "Nuovo $t(controlLayers.rasterLayer)",
|
||||
"newRegionalGuidance": "Nuova $t(controlLayers.regionalGuidance)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) come $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) come $t(controlLayers.controlLayer)",
|
||||
"convertInpaintMaskTo": "Converti $t(controlLayers.inpaintMask) in",
|
||||
"copyRegionalGuidanceTo": "Copia $t(controlLayers.regionalGuidance) in",
|
||||
"convertRasterLayerTo": "Converti $t(controlLayers.rasterLayer) in",
|
||||
"convertRegionalGuidanceTo": "Converti $t(controlLayers.regionalGuidance) in",
|
||||
"newControlLayer": "Nuovo $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "Nuova $t(controlLayers.inpaintMask)",
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"newFromImage": "Nuovo da Immagine",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -1952,7 +2080,9 @@
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia"
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2006,18 +2136,21 @@
|
||||
},
|
||||
"newUserExperience": {
|
||||
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
|
||||
"toGetStarted": "Per iniziare, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>."
|
||||
"toGetStarted": "Per iniziare, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>.",
|
||||
"importModels": "Importa modelli",
|
||||
"downloadStarterModels": "Scarica i modelli per iniziare",
|
||||
"noModelsInstalled": "Sembra che tu non abbia installato alcun modello",
|
||||
"toGetStartedLocal": "Per iniziare, assicurati di scaricare o importare i modelli necessari per eseguire Invoke. Quindi, inserisci un prompt nella casella e fai clic su <StrongComponent>Invoke</StrongComponent> per generare la tua prima immagine. Seleziona un modello di prompt per migliorare i risultati. Puoi scegliere di salvare le tue immagini direttamente nella <StrongComponent>Galleria</StrongComponent> o modificarle nella <StrongComponent>Tela</StrongComponent>."
|
||||
},
|
||||
"whatsNew": {
|
||||
"canvasV2Announcement": {
|
||||
"readReleaseNotes": "Leggi le Note di Rilascio",
|
||||
"fluxSupport": "Supporto per la famiglia di modelli Flux",
|
||||
"newCanvas": "Una nuova potente tela di controllo",
|
||||
"watchReleaseVideo": "Guarda il video di rilascio",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"newLayerTypes": "Nuovi tipi di livello per un miglior controllo"
|
||||
},
|
||||
"whatsNewInInvoke": "Novità in Invoke"
|
||||
"whatsNewInInvoke": "Novità in Invoke",
|
||||
"readReleaseNotes": "Leggi le note di rilascio",
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: supporto per SD 3.5 Medium e Large.",
|
||||
"<StrongComponent>Tela</StrongComponent>: elaborazione semplificata del livello di controllo e impostazioni di controllo predefinite migliorate."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"nodes": "ワークフロー",
|
||||
"txt2img": "txt2img",
|
||||
"postprocessing": "Post Processing",
|
||||
"postprocessing": "ポストプロセス",
|
||||
"t2iAdapter": "T2I アダプター",
|
||||
"communityLabel": "コミュニティ",
|
||||
"dontAskMeAgain": "次回から確認しない",
|
||||
@@ -71,8 +71,8 @@
|
||||
"orderBy": "並び順:",
|
||||
"enabled": "有効",
|
||||
"notInstalled": "未インストール",
|
||||
"positivePrompt": "プロンプト",
|
||||
"negativePrompt": "除外する要素",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"selected": "選択済み",
|
||||
"aboutDesc": "Invokeを業務で利用する場合はマークしてください:",
|
||||
"beta": "ベータ",
|
||||
@@ -80,7 +80,20 @@
|
||||
"editor": "エディタ",
|
||||
"safetensors": "Safetensors",
|
||||
"tab": "タブ",
|
||||
"toResolve": "解決方法"
|
||||
"toResolve": "解決方法",
|
||||
"openInViewer": "ビューアで開く",
|
||||
"placeholderSelectAModel": "モデルを選択",
|
||||
"clipboard": "クリップボード",
|
||||
"apply": "適用",
|
||||
"loadingImage": "画像をロード中",
|
||||
"off": "オフ",
|
||||
"view": "ビュー",
|
||||
"edit": "編集",
|
||||
"ok": "OK",
|
||||
"reset": "リセット",
|
||||
"none": "なし",
|
||||
"new": "新規",
|
||||
"close": "閉じる"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
@@ -125,12 +138,114 @@
|
||||
"compareHelp1": "<Kbd>Alt</Kbd> キーを押しながらギャラリー画像をクリックするか、矢印キーを使用して比較画像を変更します。",
|
||||
"compareHelp3": "<Kbd>C</Kbd>を押して、比較した画像を入れ替えます。",
|
||||
"compareHelp4": "<Kbd>[Z</Kbd>]または<Kbd>[Esc</Kbd>]を押して終了します。",
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。"
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。",
|
||||
"move": "移動",
|
||||
"openViewer": "ビューアを開く",
|
||||
"closeViewer": "ビューアを閉じる",
|
||||
"exitSearch": "画像検索を終了",
|
||||
"oldestFirst": "最古から",
|
||||
"showStarredImagesFirst": "スター付き画像を最初に",
|
||||
"exitBoardSearch": "ボード検索を終了",
|
||||
"showArchivedBoards": "アーカイブされたボードを表示",
|
||||
"searchImages": "メタデータで検索",
|
||||
"gallery": "ギャラリー",
|
||||
"newestFirst": "最新から",
|
||||
"jump": "ジャンプ",
|
||||
"go": "進む",
|
||||
"sortDirection": "並び替え順",
|
||||
"displayBoardSearch": "ボード検索",
|
||||
"displaySearch": "画像を検索",
|
||||
"boardsSettings": "ボード設定",
|
||||
"imagesSettings": "ギャラリー画像設定"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
"clearSearch": "検索をクリア",
|
||||
"noHotkeysFound": "ホットキーが見つかりません"
|
||||
"noHotkeysFound": "ホットキーが見つかりません",
|
||||
"viewer": {
|
||||
"runPostprocessing": {
|
||||
"title": "ポストプロセスを実行"
|
||||
},
|
||||
"useSize": {
|
||||
"title": "サイズを使用"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"title": "プロンプトを再使用"
|
||||
},
|
||||
"recallAll": {
|
||||
"title": "全てのメタデータを再使用"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "シード値を再使用"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "変形"
|
||||
},
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "消しゴムツール"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "変形をキャンセル"
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "レイヤーをリセット"
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "変形を適用"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "スポイトツール"
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "バウンディングボックスをキャンバスにフィット"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "ブラシツール"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"title": "移動ツール"
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"title": "バウンディングボックスツール"
|
||||
},
|
||||
"title": "キャンバス",
|
||||
"fitLayersToCanvas": {
|
||||
"title": "レイヤーをキャンバスにフィット"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"toggleLeftPanel": {
|
||||
"title": "左パネルをトグル",
|
||||
"desc": "左パネルを表示または非表示。"
|
||||
},
|
||||
"title": "アプリケーション",
|
||||
"invoke": {
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "キャンセル"
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "キューをクリア"
|
||||
}
|
||||
},
|
||||
"hotkeys": "ホットキー"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "モデルマネージャ",
|
||||
@@ -165,7 +280,7 @@
|
||||
"convertToDiffusers": "ディフューザーに変換",
|
||||
"alpha": "アルファ",
|
||||
"modelConverted": "モデル変換が完了しました",
|
||||
"predictionType": "予測タイプ(安定したディフュージョン 2.x モデルおよび一部の安定したディフュージョン 1.x モデル用)",
|
||||
"predictionType": "予測タイプ(SD 2.x モデルおよび一部のSD 1.x モデル用)",
|
||||
"selectModel": "モデルを選択",
|
||||
"advanced": "高度な設定",
|
||||
"modelDeleted": "モデルが削除されました",
|
||||
@@ -178,7 +293,9 @@
|
||||
"convertToDiffusersHelpText1": "このモデルは 🧨 Diffusers フォーマットに変換されます。",
|
||||
"convertToDiffusersHelpText3": "チェックポイントファイルは、InvokeAIルートフォルダ内にある場合、ディスクから削除されます。カスタムロケーションにある場合は、削除されません。",
|
||||
"convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。",
|
||||
"cancel": "キャンセル"
|
||||
"cancel": "キャンセル",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"addModels": "モデルを追加"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -200,7 +317,19 @@
|
||||
"info": "情報",
|
||||
"showOptionsPanel": "オプションパネルを表示",
|
||||
"iterations": "生成回数",
|
||||
"general": "基本設定"
|
||||
"general": "基本設定",
|
||||
"setToOptimalSize": "サイズをモデルに最適化",
|
||||
"invoke": {
|
||||
"addingImagesTo": "画像の追加先"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
"scheduler": "スケジューラー",
|
||||
"sendToUpscale": "アップスケーラーに転送",
|
||||
"useSize": "サイズを使用",
|
||||
"postProcessing": "ポストプロセス (Shift + U)",
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"recallMetadata": "メタデータを再使用"
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
@@ -213,7 +342,11 @@
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"imageCopied": "画像をコピー"
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageUploadFailed": "画像のアップロードに失敗しました",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
|
||||
"sentToUpscale": "アップスケーラーに転送しました",
|
||||
"imageUploaded": "画像をアップロードしました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -226,10 +359,12 @@
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"mode": "モード:",
|
||||
"about": "Invoke について",
|
||||
"submitSupportTicket": "サポート依頼を送信する"
|
||||
"submitSupportTicket": "サポート依頼を送信する",
|
||||
"uploadImages": "画像をアップロード",
|
||||
"toggleLeftPanel": "左パネルをトグル (T)",
|
||||
"toggleRightPanel": "右パネルをトグル (G)"
|
||||
},
|
||||
"metadata": {
|
||||
"seamless": "シームレス",
|
||||
"Threshold": "ノイズ閾値",
|
||||
"seed": "シード",
|
||||
"width": "幅",
|
||||
@@ -238,7 +373,8 @@
|
||||
"scheduler": "スケジューラー",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"strength": "Image to Image 強度",
|
||||
"recallParameters": "パラメータを呼び出す"
|
||||
"recallParameters": "パラメータを再使用",
|
||||
"recallParameter": "{{label}} を再使用"
|
||||
},
|
||||
"queue": {
|
||||
"queueEmpty": "キューが空です",
|
||||
@@ -298,14 +434,22 @@
|
||||
"prune": "刈り込み",
|
||||
"prompts_other": "プロンプト",
|
||||
"iterations_other": "繰り返し",
|
||||
"generations_other": "生成"
|
||||
"generations_other": "生成",
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"upscaling": "アップスケール",
|
||||
"generation": "生成",
|
||||
"other": "その他",
|
||||
"gallery": "ギャラリー"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "一致するモデルがありません",
|
||||
"loading": "読み込み中",
|
||||
"noMatchingLoRAs": "一致するLoRAがありません",
|
||||
"noModelsAvailable": "使用可能なモデルがありません",
|
||||
"selectModel": "モデルを選択してください"
|
||||
"selectModel": "モデルを選択してください",
|
||||
"concepts": "コンセプト",
|
||||
"addLora": "LoRAを追加"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -340,7 +484,8 @@
|
||||
"cannotConnectOutputToOutput": "出力から出力には接続できません",
|
||||
"cannotConnectToSelf": "自身のノードには接続できません",
|
||||
"colorCodeEdges": "カラー-Code Edges",
|
||||
"loadingNodes": "ノードを読み込み中..."
|
||||
"loadingNodes": "ノードを読み込み中...",
|
||||
"scheduler": "スケジューラー"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "自動追加するボード",
|
||||
@@ -363,7 +508,18 @@
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:"
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
"addPrivateBoard": "プライベートボードを追加",
|
||||
"addSharedBoard": "共有ボードを追加",
|
||||
"boards": "ボード",
|
||||
"private": "プライベートボード",
|
||||
"shared": "共有ボード",
|
||||
"archiveBoard": "ボードをアーカイブ",
|
||||
"archived": "アーカイブ完了",
|
||||
"unarchiveBoard": "アーカイブされていないボード",
|
||||
"imagesWithCount_other": "{{count}} の画像"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -388,6 +544,33 @@
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "領域ガイダンスと領域参照画像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "領域参照画像"
|
||||
},
|
||||
"paramScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "領域ガイダンス"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "ラスターレイヤー"
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全域参照画像"
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -428,5 +611,80 @@
|
||||
"tabs": {
|
||||
"queue": "キュー"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"globalReferenceImage_withCount_other": "全域参照画像",
|
||||
"regionalReferenceImage": "領域参照画像",
|
||||
"saveLayerToAssets": "レイヤーをアセットに保存",
|
||||
"global": "全域",
|
||||
"inpaintMasks_withCount_hidden": "インペイントマスク ({{count}} hidden)",
|
||||
"opacity": "透明度",
|
||||
"canvasContextMenu": {
|
||||
"newRegionalGuidance": "新規領域ガイダンス",
|
||||
"bboxGroup": "バウンディングボックスから作成",
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
"moveForward": "前面へ移動",
|
||||
"copyInpaintMaskTo": "$t(controlLayers.inpaintMask) をコピー",
|
||||
"transform": {
|
||||
"fitToBbox": "バウンディングボックスにフィット",
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
},
|
||||
"resetCanvas": "キャンバスをリセット",
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
"regionalGuidance_withCount_other": "領域ガイダンス",
|
||||
"tool": {
|
||||
"colorPicker": "スポイト",
|
||||
"brush": "ブラシ",
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
"moveToBack": "最背面へ移動",
|
||||
"duplicate": "複製",
|
||||
"addLayer": "レイヤーを追加",
|
||||
"rasterLayer": "ラスターレイヤー",
|
||||
"inpaintMasks_withCount_visible": "({{count}}) インペイントマスク",
|
||||
"regional": "領域",
|
||||
"rectangle": "矩形",
|
||||
"moveBackward": "背面へ移動",
|
||||
"moveToFront": "最前面へ移動",
|
||||
"mergeDown": "レイヤーを統合",
|
||||
"inpaintMask_withCount_other": "インペイントマスク",
|
||||
"canvas": "キャンバス",
|
||||
"fitBboxToLayers": "バウンディングボックスをレイヤーにフィット",
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました"
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
"choosePromptTemplate": "プロンプトテンプレートを選択",
|
||||
"myTemplates": "自分のテンプレート",
|
||||
"flatten": "選択中のテンプレートをプロンプトに展開",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"defaultTemplates": "デフォルトテンプレート",
|
||||
"createPromptTemplate": "プロンプトテンプレートを作成",
|
||||
"promptTemplateCleared": "プロンプトテンプレートをクリアしました",
|
||||
"searchByName": "名前で検索",
|
||||
"toggleViewMode": "表示モードを切り替え"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"scheduler": "スケジューラー"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +155,6 @@
|
||||
"path": "Pad",
|
||||
"triggerPhrases": "Triggerzinnen",
|
||||
"typePhraseHere": "Typ zin hier in",
|
||||
"useDefaultSettings": "Gebruik standaardinstellingen",
|
||||
"modelImageDeleteFailed": "Fout bij verwijderen modelafbeelding",
|
||||
"modelImageUpdated": "Modelafbeelding bijgewerkt",
|
||||
"modelImageUpdateFailed": "Fout bij bijwerken modelafbeelding",
|
||||
@@ -666,7 +665,6 @@
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"seamless": "Naadloos",
|
||||
"positivePrompt": "Positieve prompt",
|
||||
"negativePrompt": "Negatieve prompt",
|
||||
"generationMode": "Genereermodus",
|
||||
|
||||
@@ -94,7 +94,8 @@
|
||||
"reset": "Сброс",
|
||||
"none": "Ничего",
|
||||
"new": "Новый",
|
||||
"ok": "Ok"
|
||||
"ok": "Ok",
|
||||
"close": "Закрыть"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Размер изображений",
|
||||
@@ -160,7 +161,9 @@
|
||||
"openViewer": "Открыть просмотрщик",
|
||||
"closeViewer": "Закрыть просмотрщик",
|
||||
"imagesTab": "Изображения, созданные и сохраненные в Invoke.",
|
||||
"assetsTab": "Файлы, которые вы загрузили для использования в своих проектах."
|
||||
"assetsTab": "Файлы, которые вы загрузили для использования в своих проектах.",
|
||||
"boardsSettings": "Настройки доски",
|
||||
"imagesSettings": "Настройки галереи изображений"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Поиск горячих клавиш",
|
||||
@@ -541,7 +544,6 @@
|
||||
"scanResults": "Результаты сканирования",
|
||||
"source": "Источник",
|
||||
"triggerPhrases": "Триггерные фразы",
|
||||
"useDefaultSettings": "Использовать стандартные настройки",
|
||||
"modelName": "Название модели",
|
||||
"modelSettings": "Настройки модели",
|
||||
"upcastAttention": "Внимание",
|
||||
@@ -570,7 +572,6 @@
|
||||
"simpleModelPlaceholder": "URL или путь к локальному файлу или папке diffusers",
|
||||
"urlOrLocalPath": "URL или локальный путь",
|
||||
"urlOrLocalPathHelper": "URL-адреса должны указывать на один файл. Локальные пути могут указывать на один файл или папку для одной модели диффузоров.",
|
||||
"hfToken": "Токен HuggingFace",
|
||||
"starterModels": "Стартовые модели",
|
||||
"textualInversions": "Текстовые инверсии",
|
||||
"loraModels": "LoRAs",
|
||||
@@ -583,7 +584,18 @@
|
||||
"learnMoreAboutSupportedModels": "Подробнее о поддерживаемых моделях",
|
||||
"t5Encoder": "T5 энкодер",
|
||||
"spandrelImageToImage": "Image to Image (Spandrel)",
|
||||
"clipEmbed": "CLIP Embed"
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"installingXModels_one": "Установка {{count}} модели",
|
||||
"installingXModels_few": "Установка {{count}} моделей",
|
||||
"installingXModels_many": "Установка {{count}} моделей",
|
||||
"installingBundle": "Установка пакета",
|
||||
"installingModel": "Установка модели",
|
||||
"starterBundles": "Стартовые пакеты",
|
||||
"skippingXDuplicates_one": ", пропуская {{count}} дубликат",
|
||||
"skippingXDuplicates_few": ", пропуская {{count}} дубликата",
|
||||
"skippingXDuplicates_many": ", пропуская {{count}} дубликатов",
|
||||
"includesNModels": "Включает в себя {{n}} моделей и их зависимостей",
|
||||
"starterBundleHelpText": "Легко установите все модели, необходимые для начала работы с базовой моделью, включая основную модель, сети управления, IP-адаптеры и многое другое. При выборе комплекта все уже установленные модели будут пропущены."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Изображения",
|
||||
@@ -730,7 +742,7 @@
|
||||
"serverError": "Ошибка сервера",
|
||||
"connected": "Подключено к серверу",
|
||||
"canceled": "Обработка отменена",
|
||||
"uploadFailedInvalidUploadDesc": "Должно быть одно изображение в формате PNG или JPEG",
|
||||
"uploadFailedInvalidUploadDesc": "Это должны быть изображения PNG или JPEG.",
|
||||
"parameterNotSet": "Параметр не задан",
|
||||
"parameterSet": "Параметр задан",
|
||||
"problemCopyingImage": "Не удается скопировать изображение",
|
||||
@@ -742,7 +754,7 @@
|
||||
"setNodeField": "Установить как поле узла",
|
||||
"invalidUpload": "Неверная загрузка",
|
||||
"imageUploaded": "Изображение загружено",
|
||||
"addedToBoard": "Добавлено на доску",
|
||||
"addedToBoard": "Добавлено в активы доски {{name}}",
|
||||
"workflowLoaded": "Рабочий процесс загружен",
|
||||
"problemDeletingWorkflow": "Проблема с удалением рабочего процесса",
|
||||
"modelAddedSimple": "Модель добавлена в очередь",
|
||||
@@ -777,7 +789,13 @@
|
||||
"unableToLoadStylePreset": "Невозможно загрузить предустановку стиля",
|
||||
"layerCopiedToClipboard": "Слой скопирован в буфер обмена",
|
||||
"sentToUpscale": "Отправить на увеличение",
|
||||
"layerSavedToAssets": "Слой сохранен в активах"
|
||||
"layerSavedToAssets": "Слой сохранен в активах",
|
||||
"linkCopied": "Ссылка скопирована",
|
||||
"addedToUncategorized": "Добавлено в активы доски $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Загруженные изображения будут добавлены в активы доски {{boardName}}.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Должно быть не более {{count}} изображения в формате PNG или JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_few": "Должно быть не более {{count}} изображений в формате PNG или JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Должно быть не более {{count}} изображений в формате PNG или JPEG."
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Загрузить изображение",
|
||||
@@ -792,7 +810,8 @@
|
||||
"about": "Об этом",
|
||||
"submitSupportTicket": "Отправить тикет в службу поддержки",
|
||||
"toggleRightPanel": "Переключить правую панель (G)",
|
||||
"toggleLeftPanel": "Переключить левую панель (T)"
|
||||
"toggleLeftPanel": "Переключить левую панель (T)",
|
||||
"uploadImages": "Загрузить изображения"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Увеличьте масштаб",
|
||||
@@ -933,7 +952,7 @@
|
||||
"saveToGallery": "Сохранить в галерею",
|
||||
"noWorkflows": "Нет рабочих процессов",
|
||||
"noMatchingWorkflows": "Нет совпадающих рабочих процессов",
|
||||
"workflowHelpText": "Нужна помощь? Ознакомьтесь с нашим руководством <LinkComponent>Getting Started with Workflows</LinkComponent>"
|
||||
"workflowHelpText": "Нужна помощь? Ознакомьтесь с нашим руководством <LinkComponent>Getting Started with Workflows</LinkComponent>."
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Авто добавление Доски",
|
||||
@@ -1381,7 +1400,6 @@
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"seamless": "Бесшовность",
|
||||
"positivePrompt": "Запрос",
|
||||
"negativePrompt": "Негативный запрос",
|
||||
"generationMode": "Режим генерации",
|
||||
@@ -1409,7 +1427,8 @@
|
||||
"recallParameter": "Отозвать {{label}}",
|
||||
"allPrompts": "Все запросы",
|
||||
"imageDimensions": "Размеры изображения",
|
||||
"canvasV2Metadata": "Холст"
|
||||
"canvasV2Metadata": "Холст",
|
||||
"guidance": "Точность"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Статус",
|
||||
@@ -1561,7 +1580,12 @@
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"deleteWorkflow2": "Вы уверены, что хотите удалить этот рабочий процесс? Это нельзя отменить.",
|
||||
"chooseWorkflowFromLibrary": "Выбрать рабочий процесс из библиотеки",
|
||||
"uploadAndSaveWorkflow": "Загрузить в библиотеку"
|
||||
"uploadAndSaveWorkflow": "Загрузить в библиотеку",
|
||||
"edit": "Редактировать",
|
||||
"download": "Скачать",
|
||||
"copyShareLink": "Скопировать ссылку на общий доступ",
|
||||
"copyShareLinkForWorkflow": "Скопировать ссылку на общий доступ для рабочего процесса",
|
||||
"delete": "Удалить"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
@@ -1809,14 +1833,12 @@
|
||||
},
|
||||
"settings": {
|
||||
"isolatedPreview": "Изолированный предпросмотр",
|
||||
"isolatedTransformingPreview": "Изолированный предпросмотр преобразования",
|
||||
"invertBrushSizeScrollDirection": "Инвертировать прокрутку для размера кисти",
|
||||
"snapToGrid": {
|
||||
"label": "Привязка к сетке",
|
||||
"on": "Вкл",
|
||||
"off": "Выкл"
|
||||
},
|
||||
"isolatedFilteringPreview": "Изолированный предпросмотр фильтрации",
|
||||
"pressureSensitivity": "Чувствительность к давлению",
|
||||
"isolatedStagingPreview": "Изолированный предпросмотр на промежуточной стадии",
|
||||
"preserveMask": {
|
||||
@@ -1838,7 +1860,6 @@
|
||||
"enableAutoNegative": "Включить авто негатив",
|
||||
"maskFill": "Заполнение маски",
|
||||
"viewProgressInViewer": "Просматривайте прогресс и результаты в <Btn>Просмотрщике изображений</Btn>.",
|
||||
"convertToRasterLayer": "Конвертировать в растровый слой",
|
||||
"tool": {
|
||||
"move": "Двигать",
|
||||
"bbox": "Ограничительная рамка",
|
||||
@@ -1890,7 +1911,10 @@
|
||||
"fitToBbox": "Вместить в рамку",
|
||||
"reset": "Сбросить",
|
||||
"apply": "Применить",
|
||||
"cancel": "Отменить"
|
||||
"cancel": "Отменить",
|
||||
"fitModeContain": "Уместить",
|
||||
"fitMode": "Режим подгонки",
|
||||
"fitModeFill": "Заполнить"
|
||||
},
|
||||
"disableAutoNegative": "Отключить авто негатив",
|
||||
"deleteReferenceImage": "Удалить эталонное изображение",
|
||||
@@ -1903,7 +1927,6 @@
|
||||
"newGallerySession": "Новая сессия галереи",
|
||||
"sendToCanvasDesc": "Нажатие кнопки Invoke отображает вашу текущую работу на холсте.",
|
||||
"globalReferenceImages_withCount_hidden": "Глобальные эталонные изображения ({{count}} скрыто)",
|
||||
"convertToControlLayer": "Конвертировать в контрольный слой",
|
||||
"layer_withCount_one": "Слой ({{count}})",
|
||||
"layer_withCount_few": "Слои ({{count}})",
|
||||
"layer_withCount_many": "Слои ({{count}})",
|
||||
@@ -1920,7 +1943,8 @@
|
||||
"globalReferenceImage": "Глобальное эталонное изображение",
|
||||
"sendToGallery": "Отправить в галерею",
|
||||
"referenceImage": "Эталонное изображение",
|
||||
"addGlobalReferenceImage": "Добавить $t(controlLayers.globalReferenceImage)"
|
||||
"addGlobalReferenceImage": "Добавить $t(controlLayers.globalReferenceImage)",
|
||||
"newImg2ImgCanvasFromImage": "Новое img2img из изображения"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2032,14 +2056,6 @@
|
||||
}
|
||||
},
|
||||
"whatsNew": {
|
||||
"canvasV2Announcement": {
|
||||
"newLayerTypes": "Новые типы слоев для еще большего контроля",
|
||||
"readReleaseNotes": "Прочитать информацию о выпуске",
|
||||
"watchReleaseVideo": "Смотреть видео о выпуске",
|
||||
"fluxSupport": "Поддержка семейства моделей Flux",
|
||||
"newCanvas": "Новый мощный холст управления",
|
||||
"watchUiUpdatesOverview": "Обзор обновлений пользовательского интерфейса"
|
||||
},
|
||||
"whatsNewInInvoke": "Что нового в Invoke"
|
||||
},
|
||||
"newUserExperience": {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -82,7 +82,23 @@
|
||||
"dontShowMeThese": "请勿显示这些内容",
|
||||
"beta": "测试版",
|
||||
"toResolve": "解决",
|
||||
"tab": "标签页"
|
||||
"tab": "标签页",
|
||||
"apply": "应用",
|
||||
"edit": "编辑",
|
||||
"off": "关",
|
||||
"loadingImage": "正在加载图片",
|
||||
"ok": "确定",
|
||||
"placeholderSelectAModel": "选择一个模型",
|
||||
"close": "关闭",
|
||||
"reset": "重设",
|
||||
"none": "无",
|
||||
"new": "新建",
|
||||
"view": "视图",
|
||||
"alpha": "透明度通道",
|
||||
"openInViewer": "在查看器中打开",
|
||||
"clipboard": "剪贴板",
|
||||
"loadingModel": "加载模型",
|
||||
"generating": "生成中"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "预览大小",
|
||||
@@ -124,7 +140,7 @@
|
||||
"selectAllOnPage": "选择本页全部",
|
||||
"swapImages": "交换图像",
|
||||
"exitBoardSearch": "退出面板搜索",
|
||||
"exitSearch": "退出搜索",
|
||||
"exitSearch": "退出图像搜索",
|
||||
"oldestFirst": "最旧在前",
|
||||
"sortDirection": "排序方向",
|
||||
"showStarredImagesFirst": "优先显示收藏的图片",
|
||||
@@ -135,17 +151,333 @@
|
||||
"searchImages": "按元数据搜索",
|
||||
"jump": "跳过",
|
||||
"compareHelp2": "按 <Kbd>M</Kbd> 键切换不同的比较模式。",
|
||||
"displayBoardSearch": "显示面板搜索",
|
||||
"displaySearch": "显示搜索",
|
||||
"displayBoardSearch": "板块搜索",
|
||||
"displaySearch": "图像搜索",
|
||||
"stretchToFit": "拉伸以适应",
|
||||
"exitCompare": "退出对比",
|
||||
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。",
|
||||
"go": "运行"
|
||||
"go": "运行",
|
||||
"boardsSettings": "画板设置",
|
||||
"imagesSettings": "画廊图片设置",
|
||||
"gallery": "画廊",
|
||||
"move": "移动",
|
||||
"imagesTab": "您在Invoke中创建和保存的图片。",
|
||||
"openViewer": "打开查看器",
|
||||
"closeViewer": "关闭查看器",
|
||||
"assetsTab": "您已上传用于项目的文件。"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "检索快捷键",
|
||||
"noHotkeysFound": "未找到快捷键",
|
||||
"clearSearch": "清除检索项"
|
||||
"clearSearch": "清除检索项",
|
||||
"app": {
|
||||
"cancelQueueItem": {
|
||||
"title": "取消",
|
||||
"desc": "取消当前正在处理的队列项目。"
|
||||
},
|
||||
"selectQueueTab": {
|
||||
"title": "选择队列标签",
|
||||
"desc": "选择队列标签。"
|
||||
},
|
||||
"toggleLeftPanel": {
|
||||
"desc": "显示或隐藏左侧面板。",
|
||||
"title": "开关左侧面板"
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "重设面板布局",
|
||||
"desc": "将左侧和右侧面板重置为默认大小和布局。"
|
||||
},
|
||||
"togglePanels": {
|
||||
"title": "开关面板",
|
||||
"desc": "同时显示或隐藏左右两侧的面板。"
|
||||
},
|
||||
"selectWorkflowsTab": {
|
||||
"title": "选择工作流标签",
|
||||
"desc": "选择工作流标签。"
|
||||
},
|
||||
"selectModelsTab": {
|
||||
"title": "选择模型标签",
|
||||
"desc": "选择模型标签。"
|
||||
},
|
||||
"toggleRightPanel": {
|
||||
"title": "开关右侧面板",
|
||||
"desc": "显示或隐藏右侧面板。"
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "清除队列",
|
||||
"desc": "取消并清除所有队列条目。"
|
||||
},
|
||||
"selectCanvasTab": {
|
||||
"title": "选择画布标签",
|
||||
"desc": "选择画布标签。"
|
||||
},
|
||||
"invokeFront": {
|
||||
"desc": "将生成请求排队,添加到队列的前面。",
|
||||
"title": "调用(前台)"
|
||||
},
|
||||
"selectUpscalingTab": {
|
||||
"title": "选择放大选项卡",
|
||||
"desc": "选择高清放大选项卡。"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "聚焦提示",
|
||||
"desc": "将光标焦点移动到正向提示。"
|
||||
},
|
||||
"title": "应用程序",
|
||||
"invoke": {
|
||||
"title": "调用",
|
||||
"desc": "将生成请求排队,添加到队列的末尾。"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
"selectBrushTool": {
|
||||
"title": "画笔工具",
|
||||
"desc": "选择画笔工具。"
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "橡皮擦工具",
|
||||
"desc": "选择橡皮擦工具。"
|
||||
},
|
||||
"title": "画布",
|
||||
"selectColorPickerTool": {
|
||||
"title": "拾色器工具",
|
||||
"desc": "选择拾色器工具。"
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "使边界框适应画布",
|
||||
"desc": "缩放并调整视图以适应边界框。"
|
||||
},
|
||||
"setZoomTo400Percent": {
|
||||
"title": "缩放到400%",
|
||||
"desc": "将画布的缩放设置为400%。"
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"desc": "将画布的缩放设置为800%。",
|
||||
"title": "缩放到800%"
|
||||
},
|
||||
"redo": {
|
||||
"desc": "重做上一次画布操作。",
|
||||
"title": "重做"
|
||||
},
|
||||
"nextEntity": {
|
||||
"title": "下一层",
|
||||
"desc": "在列表中选择下一层。"
|
||||
},
|
||||
"selectRectTool": {
|
||||
"title": "矩形工具",
|
||||
"desc": "选择矩形工具。"
|
||||
},
|
||||
"selectViewTool": {
|
||||
"title": "视图工具",
|
||||
"desc": "选择视图工具。"
|
||||
},
|
||||
"prevEntity": {
|
||||
"desc": "在列表中选择上一层。",
|
||||
"title": "上一层"
|
||||
},
|
||||
"transformSelected": {
|
||||
"desc": "变换所选图层。",
|
||||
"title": "变换"
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"title": "边界框工具",
|
||||
"desc": "选择边界框工具。"
|
||||
},
|
||||
"setZoomTo200Percent": {
|
||||
"title": "缩放到200%",
|
||||
"desc": "将画布的缩放设置为200%。"
|
||||
},
|
||||
"applyFilter": {
|
||||
"title": "应用过滤器",
|
||||
"desc": "将待处理的过滤器应用于所选图层。"
|
||||
},
|
||||
"filterSelected": {
|
||||
"title": "过滤器",
|
||||
"desc": "对所选图层进行过滤。仅适用于栅格层和控制层。"
|
||||
},
|
||||
"cancelFilter": {
|
||||
"title": "取消过滤器",
|
||||
"desc": "取消待处理的过滤器。"
|
||||
},
|
||||
"incrementToolWidth": {
|
||||
"title": "增加工具宽度",
|
||||
"desc": "增加所选的画笔或橡皮擦工具的宽度。"
|
||||
},
|
||||
"decrementToolWidth": {
|
||||
"desc": "减少所选的画笔或橡皮擦工具的宽度。",
|
||||
"title": "减少工具宽度"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"title": "移动工具",
|
||||
"desc": "选择移动工具。"
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "将颜色设置为白色",
|
||||
"desc": "将当前工具的颜色设置为白色。"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"desc": "取消待处理的变换。",
|
||||
"title": "取消变换"
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "应用变换",
|
||||
"desc": "将待处理的变换应用于所选图层。"
|
||||
},
|
||||
"setZoomTo100Percent": {
|
||||
"title": "缩放到100%",
|
||||
"desc": "将画布的缩放设置为100%。"
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "重置图层",
|
||||
"desc": "重置选定的图层。仅适用于修复蒙版和区域指导。"
|
||||
},
|
||||
"undo": {
|
||||
"title": "撤消",
|
||||
"desc": "撤消上一次画布操作。"
|
||||
},
|
||||
"quickSwitch": {
|
||||
"title": "图层快速切换",
|
||||
"desc": "在最后两个选定的图层之间切换。如果某个图层被书签标记,则始终在该图层和最后一个未标记的图层之间切换。"
|
||||
},
|
||||
"fitLayersToCanvas": {
|
||||
"title": "使图层适应画布",
|
||||
"desc": "缩放并调整视图以适应所有可见图层。"
|
||||
},
|
||||
"deleteSelected": {
|
||||
"title": "删除图层",
|
||||
"desc": "删除选定的图层。"
|
||||
}
|
||||
},
|
||||
"hotkeys": "快捷键",
|
||||
"workflows": {
|
||||
"pasteSelection": {
|
||||
"title": "粘贴",
|
||||
"desc": "粘贴复制的节点和边。"
|
||||
},
|
||||
"title": "工作流",
|
||||
"addNode": {
|
||||
"title": "添加节点",
|
||||
"desc": "打开添加节点菜单。"
|
||||
},
|
||||
"copySelection": {
|
||||
"desc": "复制选定的节点和边。",
|
||||
"title": "复制"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
"title": "带边缘的粘贴",
|
||||
"desc": "粘贴复制的节点、边,以及与复制的节点连接的所有边。"
|
||||
},
|
||||
"selectAll": {
|
||||
"title": "全选",
|
||||
"desc": "选择所有节点和边。"
|
||||
},
|
||||
"deleteSelection": {
|
||||
"title": "删除",
|
||||
"desc": "删除选定的节点和边。"
|
||||
},
|
||||
"undo": {
|
||||
"title": "撤销",
|
||||
"desc": "撤销上一个工作流操作。"
|
||||
},
|
||||
"redo": {
|
||||
"desc": "重做上一个工作流操作。",
|
||||
"title": "重做"
|
||||
}
|
||||
},
|
||||
"gallery": {
|
||||
"title": "画廊",
|
||||
"galleryNavUp": {
|
||||
"title": "向上导航",
|
||||
"desc": "在图库网格中向上导航,选择该图像。如果在页面顶部,则转到上一页。"
|
||||
},
|
||||
"galleryNavUpAlt": {
|
||||
"title": "向上导航(比较图像)",
|
||||
"desc": "与向上导航相同,但选择比较图像,如果比较模式尚未打开,则将其打开。"
|
||||
},
|
||||
"selectAllOnPage": {
|
||||
"desc": "选择当前页面上的所有图像。",
|
||||
"title": "选页面上的所有内容"
|
||||
},
|
||||
"galleryNavDownAlt": {
|
||||
"title": "向下导航(比较图像)",
|
||||
"desc": "与向下导航相同,但选择比较图像,如果比较模式尚未打开,则将其打开。"
|
||||
},
|
||||
"galleryNavLeftAlt": {
|
||||
"title": "向左导航(比较图像)",
|
||||
"desc": "与向左导航相同,但选择比较图像,如果比较模式尚未打开,则将其打开。"
|
||||
},
|
||||
"clearSelection": {
|
||||
"title": "清除选择",
|
||||
"desc": "清除当前的选择(如果有的话)。"
|
||||
},
|
||||
"deleteSelection": {
|
||||
"title": "删除",
|
||||
"desc": "删除所有选定的图像。默认情况下,系统会提示您确认删除。如果这些图像当前在应用中使用,系统将发出警告。"
|
||||
},
|
||||
"galleryNavLeft": {
|
||||
"title": "向左导航",
|
||||
"desc": "在图库网格中向左导航,选择该图像。如果处于行的第一张图像,转到上一行。如果处于页面的第一张图像,转到上一页。"
|
||||
},
|
||||
"galleryNavRight": {
|
||||
"title": "向右导航",
|
||||
"desc": "在图库网格中向右导航,选择该图像。如果在行的最后一张图像,转到下一行。如果在页面的最后一张图像,转到下一页。"
|
||||
},
|
||||
"galleryNavDown": {
|
||||
"desc": "在图库网格中向下导航,选择该图像。如果在页面底部,则转到下一页。",
|
||||
"title": "向下导航"
|
||||
},
|
||||
"galleryNavRightAlt": {
|
||||
"title": "向右导航(比较图像)",
|
||||
"desc": "与向右导航相同,但选择比较图像,如果比较模式尚未打开,则将其打开。"
|
||||
}
|
||||
},
|
||||
"viewer": {
|
||||
"toggleMetadata": {
|
||||
"desc": "显示或隐藏当前图像的元数据覆盖。",
|
||||
"title": "显示/隐藏元数据"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"desc": "召回当前图像的正面和负面提示。",
|
||||
"title": "召回提示"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "显示/隐藏图像查看器",
|
||||
"desc": "显示或隐藏图像查看器。仅在画布选项卡上可用。"
|
||||
},
|
||||
"recallAll": {
|
||||
"desc": "召回当前图像的所有元数据。",
|
||||
"title": "召回所有元数据"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "召回种子",
|
||||
"desc": "召回当前图像的种子。"
|
||||
},
|
||||
"swapImages": {
|
||||
"title": "交换比较图像",
|
||||
"desc": "交换正在比较的图像。"
|
||||
},
|
||||
"nextComparisonMode": {
|
||||
"title": "下一个比较模式",
|
||||
"desc": "环浏览比较模式。"
|
||||
},
|
||||
"loadWorkflow": {
|
||||
"title": "加载工作流",
|
||||
"desc": "加载当前图像的保存工作流程(如果有的话)。"
|
||||
},
|
||||
"title": "图像查看器",
|
||||
"remix": {
|
||||
"title": "混合",
|
||||
"desc": "召回当前图像的所有元数据,除了种子。"
|
||||
},
|
||||
"useSize": {
|
||||
"title": "使用尺寸",
|
||||
"desc": "使用当前图像的尺寸作为边界框尺寸。"
|
||||
},
|
||||
"runPostprocessing": {
|
||||
"title": "行后处理",
|
||||
"desc": "对当前图像运行所选的后处理。"
|
||||
}
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "模型管理器",
|
||||
@@ -210,7 +542,6 @@
|
||||
"noModelsInstalled": "无已安装的模型",
|
||||
"urlOrLocalPathHelper": "链接应该指向单个文件.本地路径可以指向单个文件,或者对于单个扩散模型(diffusers model),可以指向一个文件夹.",
|
||||
"modelSettings": "模型设置",
|
||||
"useDefaultSettings": "使用默认设置",
|
||||
"scanPlaceholder": "本地文件夹路径",
|
||||
"installRepo": "安装仓库",
|
||||
"modelImageDeleted": "模型图像已删除",
|
||||
@@ -249,7 +580,36 @@
|
||||
"loraTriggerPhrases": "LoRA 触发词",
|
||||
"ipAdapters": "IP适配器",
|
||||
"spandrelImageToImage": "图生图(Spandrel)",
|
||||
"starterModelsInModelManager": "您可以在模型管理器中找到初始模型"
|
||||
"starterModelsInModelManager": "您可以在模型管理器中找到初始模型",
|
||||
"noDefaultSettings": "此模型没有配置默认设置。请访问模型管理器添加默认设置。",
|
||||
"clipEmbed": "CLIP 嵌入",
|
||||
"defaultSettingsOutOfSync": "某些设置与模型的默认值不匹配:",
|
||||
"restoreDefaultSettings": "点击以使用模型的默认设置。",
|
||||
"usingDefaultSettings": "使用模型的默认设置",
|
||||
"huggingFace": "HuggingFace",
|
||||
"hfTokenInvalid": "HF 令牌无效或缺失",
|
||||
"hfTokenLabel": "HuggingFace 令牌(某些模型所需)",
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。",
|
||||
"includesNModels": "包括 {{n}} 个模型及其依赖项",
|
||||
"starterBundles": "启动器包",
|
||||
"learnMoreAboutSupportedModels": "了解更多关于我们支持的模型的信息",
|
||||
"hfForbidden": "您没有权限访问这个 HF 模型",
|
||||
"hfTokenInvalidErrorMessage": "无效或缺失 HuggingFace 令牌。",
|
||||
"hfTokenRequired": "您正在尝试下载一个需要有效 HuggingFace 令牌的模型。",
|
||||
"hfTokenSaved": "HF 令牌已保存",
|
||||
"hfForbiddenErrorMessage": "我们建议访问 HuggingFace.com 上的仓库页面。所有者可能要求您接受条款才能下载。",
|
||||
"hfTokenUnableToVerifyErrorMessage": "无法验证 HuggingFace 令牌。这可能是由于网络错误导致的。请稍后再试。",
|
||||
"hfTokenInvalidErrorMessage2": "在这里更新它。 ",
|
||||
"hfTokenUnableToVerify": "无法验证 HF 令牌",
|
||||
"skippingXDuplicates_other": "跳过 {{count}} 个重复项",
|
||||
"starterBundleHelpText": "轻松安装所有用于启动基础模型所需的模型,包括主模型、ControlNets、IP适配器等。选择一个安装包时,会跳过已安装的模型。",
|
||||
"installingBundle": "正在安装模型包",
|
||||
"installingModel": "正在安装模型",
|
||||
"installingXModels_other": "正在安装 {{count}} 个模型",
|
||||
"t5Encoder": "T5 编码器",
|
||||
"clipLEmbed": "CLIP-L 嵌入",
|
||||
"clipGEmbed": "CLIP-G 嵌入",
|
||||
"loraModels": "LoRAs(低秩适配)"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -308,8 +668,23 @@
|
||||
"controlAdapterIncompatibleBaseModel": "Control Adapter的基础模型不兼容",
|
||||
"ipAdapterIncompatibleBaseModel": "IP Adapter的基础模型不兼容",
|
||||
"ipAdapterNoImageSelected": "未选择IP Adapter图像",
|
||||
"rgNoRegion": "未选择区域"
|
||||
}
|
||||
"rgNoRegion": "未选择区域",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框宽度为 {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框宽度为 {{width}}"
|
||||
},
|
||||
"canvasIsFiltering": "画布正在过滤",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
|
||||
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",
|
||||
"noFLUXVAEModelSelected": "未为FLUX生成选择VAE模型",
|
||||
"canvasIsRasterizing": "画布正在栅格化",
|
||||
"canvasIsCompositing": "画布正在合成",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框宽度为 {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框宽度为 {{width}}",
|
||||
"noT5EncoderModelSelected": "未为FLUX生成选择T5编码器模型",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框高度为 {{height}}",
|
||||
"canvasIsTransforming": "画布正在变换"
|
||||
},
|
||||
"patchmatchDownScaleSize": "缩小",
|
||||
"clipSkip": "CLIP 跳过层",
|
||||
@@ -331,7 +706,15 @@
|
||||
"sendToUpscale": "发送到放大",
|
||||
"processImage": "处理图像",
|
||||
"infillColorValue": "填充颜色",
|
||||
"coherenceMinDenoise": "最小去噪"
|
||||
"coherenceMinDenoise": "最小去噪",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"disabledNoRasterContent": "已禁用(无栅格内容)",
|
||||
"optimizedImageToImage": "优化的图生图",
|
||||
"guidance": "引导",
|
||||
"gaussianBlur": "高斯模糊",
|
||||
"recallMetadata": "调用元数据",
|
||||
"boxBlur": "方框模糊",
|
||||
"staged": "已分阶段处理"
|
||||
},
|
||||
"settings": {
|
||||
"models": "模型",
|
||||
@@ -361,13 +744,18 @@
|
||||
"enableInformationalPopovers": "启用信息弹窗",
|
||||
"reloadingIn": "重新加载中",
|
||||
"informationalPopoversDisabled": "信息提示框已禁用",
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用."
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用.",
|
||||
"enableModelDescriptions": "在下拉菜单中启用模型描述",
|
||||
"confirmOnNewSession": "新会话时确认",
|
||||
"modelDescriptionsDisabledDesc": "下拉菜单中的模型描述已被禁用。可在设置中启用。",
|
||||
"modelDescriptionsDisabled": "下拉菜单中的模型描述已禁用",
|
||||
"showDetailedInvocationProgress": "显示进度详情"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "上传失败",
|
||||
"imageCopied": "图像已复制",
|
||||
"parametersNotSet": "参数未恢复",
|
||||
"uploadFailedInvalidUploadDesc": "必须是单张的 PNG 或 JPEG 图片",
|
||||
"uploadFailedInvalidUploadDesc": "必须是单个 PNG 或 JPEG 图像。",
|
||||
"connected": "服务器连接",
|
||||
"parameterSet": "参数已恢复",
|
||||
"parameterNotSet": "参数未恢复",
|
||||
@@ -379,7 +767,7 @@
|
||||
"setControlImage": "设为控制图像",
|
||||
"setNodeField": "设为节点字段",
|
||||
"imageUploaded": "图像已上传",
|
||||
"addedToBoard": "已添加到面板",
|
||||
"addedToBoard": "添加到{{name}}的资产中",
|
||||
"workflowLoaded": "工作流已加载",
|
||||
"imageUploadFailed": "图像上传失败",
|
||||
"baseModelChangedCleared_other": "已清除或禁用{{count}}个不兼容的子模型",
|
||||
@@ -402,7 +790,24 @@
|
||||
"errorCopied": "错误信息已复制",
|
||||
"modelImportCanceled": "模型导入已取消",
|
||||
"importFailed": "导入失败",
|
||||
"importSuccessful": "导入成功"
|
||||
"importSuccessful": "导入成功",
|
||||
"layerSavedToAssets": "图层已保存到资产",
|
||||
"sentToUpscale": "已发送到放大处理",
|
||||
"addedToUncategorized": "已添加到看板 $t(boards.uncategorized) 的资产中",
|
||||
"linkCopied": "链接已复制",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "最多只能上传 {{count}} 张 PNG 或 JPEG 图像。",
|
||||
"problemSavingLayer": "无法保存图层",
|
||||
"unableToLoadImage": "无法加载图像",
|
||||
"imageNotLoadedDesc": "无法找到图像",
|
||||
"unableToLoadStylePreset": "无法加载样式预设",
|
||||
"stylePresetLoaded": "样式预设已加载",
|
||||
"problemCopyingLayer": "无法复制图层",
|
||||
"sentToCanvas": "已发送到画布",
|
||||
"unableToLoadImageMetadata": "无法加载图像元数据",
|
||||
"imageSaved": "图像已保存",
|
||||
"imageSavingFailed": "图像保存失败",
|
||||
"layerCopiedToClipboard": "图层已复制到剪贴板",
|
||||
"imagesWillBeAddedTo": "上传的图像将添加到看板 {{boardName}} 的资产中。"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Invoke 进度条",
|
||||
@@ -416,7 +821,9 @@
|
||||
"createIssue": "创建问题",
|
||||
"about": "关于",
|
||||
"submitSupportTicket": "提交支持工单",
|
||||
"toggleRightPanel": "切换右侧面板(G)"
|
||||
"toggleRightPanel": "切换右侧面板(G)",
|
||||
"uploadImages": "上传图片",
|
||||
"toggleLeftPanel": "开关左侧面板(T)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "放大",
|
||||
@@ -550,7 +957,12 @@
|
||||
"clearWorkflow": "清除工作流",
|
||||
"imageAccessError": "无法找到图像 {{image_name}},正在恢复默认设置",
|
||||
"boardAccessError": "无法找到面板 {{board_id}},正在恢复默认设置",
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置"
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置",
|
||||
"noWorkflows": "无工作流程",
|
||||
"workflowHelpText": "需要帮助?请查看我们的《<LinkComponent>工作流程入门指南</LinkComponent>》。",
|
||||
"noMatchingWorkflows": "无匹配的工作流程",
|
||||
"saveToGallery": "保存到图库",
|
||||
"singleFieldType": "{{name}}(单一模型)"
|
||||
},
|
||||
"queue": {
|
||||
"status": "状态",
|
||||
@@ -569,7 +981,7 @@
|
||||
"cancelSucceeded": "项目已取消",
|
||||
"queue": "队列",
|
||||
"batch": "批处理",
|
||||
"clearQueueAlertDialog": "清除队列时会立即取消所有处理中的项目并且会完全清除队列。",
|
||||
"clearQueueAlertDialog": "清空队列将立即取消所有正在处理的项目,并完全清空队列。待处理的过滤器将被取消。",
|
||||
"pending": "待定",
|
||||
"completedIn": "完成于",
|
||||
"resumeFailed": "恢复处理器时出现问题",
|
||||
@@ -610,7 +1022,15 @@
|
||||
"openQueue": "打开队列",
|
||||
"prompts_other": "提示词",
|
||||
"iterations_other": "迭代",
|
||||
"generations_other": "生成"
|
||||
"generations_other": "生成",
|
||||
"canvas": "画布",
|
||||
"workflows": "工作流",
|
||||
"generation": "生成",
|
||||
"other": "其他",
|
||||
"gallery": "画廊",
|
||||
"destination": "目标存储",
|
||||
"upscaling": "高清放大",
|
||||
"origin": "来源"
|
||||
},
|
||||
"sdxl": {
|
||||
"refinerStart": "Refiner 开始作用时机",
|
||||
@@ -649,7 +1069,6 @@
|
||||
"workflow": "工作流",
|
||||
"steps": "步数",
|
||||
"scheduler": "调度器",
|
||||
"seamless": "无缝",
|
||||
"recallParameters": "召回参数",
|
||||
"noRecallParameters": "未找到要召回的参数",
|
||||
"vae": "VAE",
|
||||
@@ -658,7 +1077,11 @@
|
||||
"parsingFailed": "解析失败",
|
||||
"recallParameter": "调用{{label}}",
|
||||
"imageDimensions": "图像尺寸",
|
||||
"parameterSet": "已设置参数{{parameter}}"
|
||||
"parameterSet": "已设置参数{{parameter}}",
|
||||
"guidance": "指导",
|
||||
"seamlessXAxis": "无缝 X 轴",
|
||||
"seamlessYAxis": "无缝 Y 轴",
|
||||
"canvasV2Metadata": "画布"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "无相匹配的模型",
|
||||
@@ -709,7 +1132,8 @@
|
||||
"shared": "共享面板",
|
||||
"archiveBoard": "归档面板",
|
||||
"archived": "已归档",
|
||||
"assetsWithCount_other": "{{count}}项资源"
|
||||
"assetsWithCount_other": "{{count}}项资源",
|
||||
"updateBoardError": "更新画板出错"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"seedBehaviour": {
|
||||
@@ -869,7 +1293,8 @@
|
||||
"heading": "去噪强度",
|
||||
"paragraphs": [
|
||||
"为输入图像添加的噪声量。",
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。"
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。",
|
||||
"当没有具有可见内容的栅格图层时,此设置将被忽略。"
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
@@ -1058,7 +1483,8 @@
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度.",
|
||||
"与生成CFG Scale相似."
|
||||
]
|
||||
],
|
||||
"heading": "CFG比例"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "结构",
|
||||
@@ -1089,6 +1515,62 @@
|
||||
"paragraphs": [
|
||||
"比例控制决定了输出图像的大小,它是基于输入图像分辨率的倍数来计算的.例如对一张1024x1024的图像进行2倍上采样,将会得到一张2048x2048的输出图像."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全局参考图像",
|
||||
"paragraphs": [
|
||||
"应用参考图像以影响整个生成过程。"
|
||||
]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"paragraphs": [
|
||||
"画布的基于像素的内容,用于图像生成过程。"
|
||||
],
|
||||
"heading": "栅格图层"
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"paragraphs": [
|
||||
"对于区域引导,使用画笔引导全局提示中的元素应出现的位置。",
|
||||
"对于区域参考图像,使用画笔将参考图像应用到特定区域。"
|
||||
],
|
||||
"heading": "区域引导与区域参考图像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "区域参考图像",
|
||||
"paragraphs": [
|
||||
"使用画笔将参考图像应用到特定区域。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "优化的图生图",
|
||||
"paragraphs": [
|
||||
"启用‘优化的图生图’功能,可在使用 Flux 模型进行图生图和图像修复转换时提供更平滑的降噪强度调节。此设置可以提高对图像变化程度的控制能力,但如果您更倾向于使用标准的降噪强度调节方式,也可以关闭此功能。该设置仍在优化中,目前处于测试阶段。"
|
||||
]
|
||||
},
|
||||
"inpainting": {
|
||||
"paragraphs": [
|
||||
"控制由降噪强度引导的修改区域。"
|
||||
],
|
||||
"heading": "图像重绘"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "区域引导",
|
||||
"paragraphs": [
|
||||
"使用画笔引导全局提示中的元素应出现的位置。"
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商业许可",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev] 模型受 FLUX [dev] 非商业许可协议的约束。如需在 Invoke 中将此模型类型用于商业目的,请访问我们的网站了解更多信息。"
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度。",
|
||||
"较高的引导值可能导致过度饱和,而过高或过低的引导值可能导致生成结果失真。引导仅适用于FLUX DEV模型。"
|
||||
],
|
||||
"heading": "引导"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1151,7 +1633,18 @@
|
||||
"convertGraph": "转换图表",
|
||||
"loadWorkflow": "$t(common.load) 工作流",
|
||||
"loadFromGraph": "从图表加载工作流",
|
||||
"autoLayout": "自动布局"
|
||||
"autoLayout": "自动布局",
|
||||
"edit": "编辑",
|
||||
"copyShareLinkForWorkflow": "复制工作流程的分享链接",
|
||||
"delete": "删除",
|
||||
"download": "下载",
|
||||
"defaultWorkflows": "默认工作流程",
|
||||
"userWorkflows": "用户工作流程",
|
||||
"projectWorkflows": "项目工作流程",
|
||||
"copyShareLink": "复制分享链接",
|
||||
"chooseWorkflowFromLibrary": "从库中选择工作流程",
|
||||
"uploadAndSaveWorkflow": "上传到库",
|
||||
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -1175,7 +1668,8 @@
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "添加提示词触发器",
|
||||
"noMatchingTriggers": "没有匹配的触发器"
|
||||
"noMatchingTriggers": "没有匹配的触发器",
|
||||
"compatibleEmbeddings": "兼容的嵌入"
|
||||
},
|
||||
"controlLayers": {
|
||||
"autoNegative": "自动反向",
|
||||
@@ -1186,10 +1680,115 @@
|
||||
"moveToFront": "移动到前面",
|
||||
"addLayer": "添加层",
|
||||
"deletePrompt": "删除提示词",
|
||||
"addPositivePrompt": "添加 $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "添加 $t(common.negativePrompt)",
|
||||
"addPositivePrompt": "添加 $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "添加 $t(controlLayers.negativePrompt)",
|
||||
"rectangle": "矩形",
|
||||
"opacity": "透明度"
|
||||
"opacity": "透明度",
|
||||
"canvas": "画布",
|
||||
"fitBboxToLayers": "将边界框适配到图层",
|
||||
"cropLayerToBbox": "将图层裁剪到边界框",
|
||||
"saveBboxToGallery": "将边界框保存到图库",
|
||||
"savedToGalleryOk": "已保存到图库",
|
||||
"saveLayerToAssets": "将图层保存到资产",
|
||||
"removeBookmark": "移除书签",
|
||||
"regional": "区域",
|
||||
"saveCanvasToGallery": "将画布保存到图库",
|
||||
"global": "全局",
|
||||
"bookmark": "添加书签以快速切换",
|
||||
"regionalReferenceImage": "局部参考图像",
|
||||
"mergingLayers": "正在合并图层",
|
||||
"newControlLayerError": "创建控制层时出现问题",
|
||||
"pullBboxIntoReferenceImageError": "将边界框导入参考图像时出现问题",
|
||||
"mergeVisibleOk": "已合并图层",
|
||||
"maskFill": "遮罩填充",
|
||||
"newCanvasFromImage": "从图像创建新画布",
|
||||
"pullBboxIntoReferenceImageOk": "边界框已导入到参考图像",
|
||||
"globalReferenceImage_withCount_other": "全局参考图像",
|
||||
"addInpaintMask": "添加 $t(controlLayers.inpaintMask)",
|
||||
"referenceImage": "参考图像",
|
||||
"globalReferenceImage": "全局参考图像",
|
||||
"newRegionalGuidance": "新建 $t(controlLayers.regionalGuidance)",
|
||||
"savedToGalleryError": "保存到图库时出错",
|
||||
"copyRasterLayerTo": "复制 $t(controlLayers.rasterLayer) 到",
|
||||
"clearHistory": "清除历史记录",
|
||||
"inpaintMask": "修复遮罩",
|
||||
"regionalGuidance_withCount_visible": "区域引导({{count}} 个)",
|
||||
"inpaintMasks_withCount_hidden": "修复遮罩({{count}} 个已隐藏)",
|
||||
"enableAutoNegative": "启用自动负面提示",
|
||||
"disableAutoNegative": "禁用自动负面提示",
|
||||
"deleteReferenceImage": "删除参考图像",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"controlLayers_withCount_visible": "控制图层({{count}} 个)",
|
||||
"rasterLayers_withCount_visible": "栅格图层({{count}} 个)",
|
||||
"canvasAsRasterLayer": "将 $t(controlLayers.canvas) 转换为 $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "将 $t(controlLayers.canvas) 转换为 $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "将 $t(controlLayers.regionalGuidance) 转换为",
|
||||
"newInpaintMask": "新建 $t(controlLayers.inpaintMask)",
|
||||
"regionIsEmpty": "选定区域为空",
|
||||
"mergeVisible": "合并可见图层",
|
||||
"showHUD": "显示 HUD(抬头显示)",
|
||||
"newLayerFromImage": "从图像创建新图层",
|
||||
"layer_other": "图层",
|
||||
"transparency": "透明度",
|
||||
"addRasterLayer": "添加 $t(controlLayers.rasterLayer)",
|
||||
"newRasterLayerOk": "已创建栅格层",
|
||||
"newRasterLayerError": "创建栅格层时出现问题",
|
||||
"inpaintMasks_withCount_visible": "修复遮罩({{count}} 个)",
|
||||
"convertRasterLayerTo": "将 $t(controlLayers.rasterLayer) 转换为",
|
||||
"copyControlLayerTo": "复制 $t(controlLayers.controlLayer) 到",
|
||||
"copyInpaintMaskTo": "复制 $t(controlLayers.inpaintMask) 到",
|
||||
"copyRegionalGuidanceTo": "复制 $t(controlLayers.regionalGuidance) 到",
|
||||
"newRasterLayer": "新建 $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新建 $t(controlLayers.controlLayer)",
|
||||
"newImg2ImgCanvasFromImage": "从图像创建新的图生图",
|
||||
"rasterLayer": "栅格层",
|
||||
"controlLayer": "控制层",
|
||||
"outputOnlyMaskedRegions": "仅输出生成的区域",
|
||||
"addControlLayer": "添加 $t(controlLayers.controlLayer)",
|
||||
"newGlobalReferenceImageOk": "已创建全局参考图像",
|
||||
"newGlobalReferenceImageError": "创建全局参考图像时出现问题",
|
||||
"newRegionalReferenceImageOk": "已创建局部参考图像",
|
||||
"newControlLayerOk": "已创建控制层",
|
||||
"mergeVisibleError": "合并图层时出错",
|
||||
"bboxOverlay": "显示边界框覆盖层",
|
||||
"clipToBbox": "将Clip限制到边界框",
|
||||
"width": "宽度",
|
||||
"addGlobalReferenceImage": "添加 $t(controlLayers.globalReferenceImage)",
|
||||
"inpaintMask_withCount_other": "修复遮罩",
|
||||
"regionalGuidance_withCount_other": "区域引导",
|
||||
"newRegionalReferenceImageError": "创建局部参考图像时出现问题",
|
||||
"pullBboxIntoLayerError": "将边界框导入图层时出现问题",
|
||||
"pullBboxIntoLayerOk": "边界框已导入到图层",
|
||||
"sendToCanvasDesc": "按下“Invoke”按钮会将您的工作进度暂存到画布上。",
|
||||
"resetCanvas": "重置画布",
|
||||
"sendToGallery": "发送到图库",
|
||||
"sendToGalleryDesc": "按下“Invoke”键会生成并保存一张唯一的图像到您的图库中。",
|
||||
"rasterLayer_withCount_other": "栅格图层",
|
||||
"newFromImage": "从图像创建新内容",
|
||||
"mergeDown": "向下合并",
|
||||
"clearCaches": "清除缓存",
|
||||
"recalculateRects": "重新计算矩形",
|
||||
"duplicate": "复制",
|
||||
"regionalGuidance_withCount_hidden": "区域引导({{count}} 个已隐藏)",
|
||||
"convertControlLayerTo": "将 $t(controlLayers.controlLayer) 转换为",
|
||||
"convertInpaintMaskTo": "将 $t(controlLayers.inpaintMask) 转换为",
|
||||
"viewProgressInViewer": "在 <Btn>图像查看器</Btn> 中查看进度和输出结果。",
|
||||
"viewProgressOnCanvas": "在 <Btn>画布</Btn> 上查看进度和暂存的输出内容。",
|
||||
"sendingToGallery": "将生成内容发送到图库",
|
||||
"copyToClipboard": "复制到剪贴板",
|
||||
"controlLayer_withCount_other": "控制图层",
|
||||
"sendingToCanvas": "在画布上准备生成",
|
||||
"addReferenceImage": "添加 $t(controlLayers.referenceImage)",
|
||||
"addRegionalGuidance": "添加 $t(controlLayers.regionalGuidance)",
|
||||
"controlLayers_withCount_hidden": "控制图层({{count}} 个已隐藏)",
|
||||
"rasterLayers_withCount_hidden": "栅格图层({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_hidden": "全局参考图像({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_visible": "全局参考图像({{count}} 个)",
|
||||
"layer_withCount_other": "图层({{count}} 个)",
|
||||
"enableTransparencyEffect": "启用透明效果",
|
||||
"disableTransparencyEffect": "禁用透明效果",
|
||||
"hidingType": "隐藏 {{type}}",
|
||||
"showingType": "显示 {{type}}"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
|
||||
@@ -58,7 +58,6 @@
|
||||
"model": "模型",
|
||||
"seed": "種子",
|
||||
"vae": "VAE",
|
||||
"seamless": "無縫",
|
||||
"metadata": "元數據",
|
||||
"width": "寬度",
|
||||
"height": "高度"
|
||||
|
||||
@@ -4,13 +4,12 @@ import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
|
||||
import { useLogger } from 'app/logging/useLogger';
|
||||
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
||||
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import ImageUploadOverlay from 'common/components/ImageUploadOverlay';
|
||||
import { useFocusRegionWatcher } from 'common/hooks/focus';
|
||||
import { useClearStorage } from 'common/hooks/useClearStorage';
|
||||
import { useFullscreenDropzone } from 'common/hooks/useFullscreenDropzone';
|
||||
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
|
||||
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
||||
import {
|
||||
@@ -18,6 +17,7 @@ import {
|
||||
NewGallerySessionDialog,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
|
||||
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
@@ -59,8 +59,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
|
||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||
useSyncLoggingConfig();
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
clearStorage();
|
||||
@@ -90,19 +89,8 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
|
||||
return (
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<Box
|
||||
id="invoke-app-wrapper"
|
||||
w="100dvw"
|
||||
h="100dvh"
|
||||
position="relative"
|
||||
overflow="hidden"
|
||||
{...dropzone.getRootProps()}
|
||||
>
|
||||
<input {...dropzone.getInputProps()} />
|
||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||
<AppContent />
|
||||
{dropzone.isDragActive && isHandlingUpload && (
|
||||
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
|
||||
)}
|
||||
</Box>
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
@@ -119,6 +107,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
@@ -8,13 +7,11 @@ import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { memo } from 'react';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const GlobalImageHotkeys = memo(() => {
|
||||
useAssertSingleton('GlobalImageHotkeys');
|
||||
const lastSelectedImage = useAppSelector(selectLastSelectedImage);
|
||||
const { currentData: imageDTO } = useGetImageDTOQuery(lastSelectedImage?.image_name ?? skipToken);
|
||||
const imageDTO = useAppSelector(selectLastSelectedImage);
|
||||
|
||||
if (!imageDTO) {
|
||||
return null;
|
||||
|
||||
@@ -2,6 +2,8 @@ import 'i18n';
|
||||
|
||||
import type { Middleware } from '@reduxjs/toolkit';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import type { LoggingOverrides } from 'app/logging/logger';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
|
||||
@@ -17,10 +19,9 @@ import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
|
||||
import { createStore } from 'app/store/store';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import AppDndContext from 'features/dnd/components/AppDndContext';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useMemo } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
||||
import { $socketOptions } from 'services/events/stores';
|
||||
@@ -46,6 +47,7 @@ interface Props extends PropsWithChildren {
|
||||
isDebugging?: boolean;
|
||||
logo?: ReactNode;
|
||||
workflowCategories?: WorkflowCategory[];
|
||||
loggingOverrides?: LoggingOverrides;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
@@ -65,7 +67,26 @@ const InvokeAIUI = ({
|
||||
isDebugging = false,
|
||||
logo,
|
||||
workflowCategories,
|
||||
loggingOverrides,
|
||||
}: Props) => {
|
||||
useLayoutEffect(() => {
|
||||
/*
|
||||
* We need to configure logging before anything else happens - useLayoutEffect ensures we set this at the first
|
||||
* possible opportunity.
|
||||
*
|
||||
* Once redux initializes, we will check the user's settings and update the logging config accordingly. See
|
||||
* `useSyncLoggingConfig`.
|
||||
*/
|
||||
$loggingOverrides.set(loggingOverrides);
|
||||
|
||||
// Until we get the user's settings, we will use the overrides OR default values.
|
||||
configureLogging(
|
||||
loggingOverrides?.logIsEnabled ?? true,
|
||||
loggingOverrides?.logLevel ?? 'debug',
|
||||
loggingOverrides?.logNamespaces ?? '*'
|
||||
);
|
||||
}, [loggingOverrides]);
|
||||
|
||||
useEffect(() => {
|
||||
// configure API client token
|
||||
if (token) {
|
||||
@@ -215,9 +236,7 @@ const InvokeAIUI = ({
|
||||
<Provider store={store}>
|
||||
<React.Suspense fallback={<Loading />}>
|
||||
<ThemeLocaleProvider>
|
||||
<AppDndContext>
|
||||
<App config={config} studioInitAction={studioInitAction} />
|
||||
</AppDndContext>
|
||||
<App config={config} studioInitAction={studioInitAction} />
|
||||
</ThemeLocaleProvider>
|
||||
</React.Suspense>
|
||||
</Provider>
|
||||
|
||||
@@ -9,15 +9,15 @@ const serializeMessage: MessageSerializer = (message) => {
|
||||
};
|
||||
|
||||
ROARR.serializeMessage = serializeMessage;
|
||||
ROARR.write = createLogWriter();
|
||||
|
||||
export const BASE_CONTEXT = {};
|
||||
const BASE_CONTEXT = {};
|
||||
|
||||
export const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
|
||||
const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
|
||||
|
||||
export const zLogNamespace = z.enum([
|
||||
'canvas',
|
||||
'config',
|
||||
'dnd',
|
||||
'events',
|
||||
'gallery',
|
||||
'generation',
|
||||
@@ -35,8 +35,22 @@ export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fat
|
||||
export type LogLevel = z.infer<typeof zLogLevel>;
|
||||
export const isLogLevel = (v: unknown): v is LogLevel => zLogLevel.safeParse(v).success;
|
||||
|
||||
/**
|
||||
* Override logging settings.
|
||||
* @property logIsEnabled Override the enabled log state. Omit to use the user's settings.
|
||||
* @property logNamespaces Override the enabled log namespaces. Use `"*"` for all namespaces. Omit to use the user's settings.
|
||||
* @property logLevel Override the log level. Omit to use the user's settings.
|
||||
*/
|
||||
export type LoggingOverrides = {
|
||||
logIsEnabled?: boolean;
|
||||
logNamespaces?: LogNamespace[] | '*';
|
||||
logLevel?: LogLevel;
|
||||
};
|
||||
|
||||
export const $loggingOverrides = atom<LoggingOverrides | undefined>();
|
||||
|
||||
// Translate human-readable log levels to numbers, used for log filtering
|
||||
export const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||
const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||
trace: 10,
|
||||
debug: 20,
|
||||
info: 30,
|
||||
@@ -44,3 +58,40 @@ export const LOG_LEVEL_MAP: Record<LogLevel, number> = {
|
||||
error: 50,
|
||||
fatal: 60,
|
||||
};
|
||||
|
||||
/**
|
||||
* Configure logging, pushing settings to local storage.
|
||||
*
|
||||
* @param logIsEnabled Whether logging is enabled
|
||||
* @param logLevel The log level
|
||||
* @param logNamespaces A list of log namespaces to enable, or '*' to enable all
|
||||
*/
|
||||
export const configureLogging = (
|
||||
logIsEnabled: boolean = true,
|
||||
logLevel: LogLevel = 'warn',
|
||||
logNamespaces: LogNamespace[] | '*'
|
||||
): void => {
|
||||
if (!logIsEnabled) {
|
||||
// Disable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'false');
|
||||
} else {
|
||||
// Enable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'true');
|
||||
|
||||
// Use a filter to show only logs of the given level
|
||||
let filter = `context.logLevel:>=${LOG_LEVEL_MAP[logLevel]}`;
|
||||
|
||||
const namespaces = logNamespaces === '*' ? zLogNamespace.options : logNamespaces;
|
||||
|
||||
if (namespaces.length > 0) {
|
||||
filter += ` AND (${namespaces.map((ns) => `context.namespace:${ns}`).join(' OR ')})`;
|
||||
} else {
|
||||
// This effectively hides all logs because we use namespaces for all logs
|
||||
filter += ' AND context.namespace:undefined';
|
||||
}
|
||||
|
||||
localStorage.setItem('ROARR_FILTER', filter);
|
||||
}
|
||||
|
||||
ROARR.write = createLogWriter();
|
||||
};
|
||||
|
||||
@@ -1,53 +1,9 @@
|
||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
selectSystemLogIsEnabled,
|
||||
selectSystemLogLevel,
|
||||
selectSystemLogNamespaces,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { useEffect, useMemo } from 'react';
|
||||
import { ROARR, Roarr } from 'roarr';
|
||||
import { useMemo } from 'react';
|
||||
|
||||
import type { LogNamespace } from './logger';
|
||||
import { $logger, BASE_CONTEXT, LOG_LEVEL_MAP, logger } from './logger';
|
||||
import { logger } from './logger';
|
||||
|
||||
export const useLogger = (namespace: LogNamespace) => {
|
||||
const logLevel = useAppSelector(selectSystemLogLevel);
|
||||
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
|
||||
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
|
||||
|
||||
// The provided Roarr browser log writer uses localStorage to config logging to console
|
||||
useEffect(() => {
|
||||
if (logIsEnabled) {
|
||||
// Enable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'true');
|
||||
|
||||
// Use a filter to show only logs of the given level
|
||||
let filter = `context.logLevel:>=${LOG_LEVEL_MAP[logLevel]}`;
|
||||
if (logNamespaces.length > 0) {
|
||||
filter += ` AND (${logNamespaces.map((ns) => `context.namespace:${ns}`).join(' OR ')})`;
|
||||
} else {
|
||||
filter += ' AND context.namespace:undefined';
|
||||
}
|
||||
localStorage.setItem('ROARR_FILTER', filter);
|
||||
} else {
|
||||
// Disable console log output
|
||||
localStorage.setItem('ROARR_LOG', 'false');
|
||||
}
|
||||
ROARR.write = createLogWriter();
|
||||
}, [logLevel, logIsEnabled, logNamespaces]);
|
||||
|
||||
// Update the module-scoped logger context as needed
|
||||
useEffect(() => {
|
||||
// TODO: type this properly
|
||||
//eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const newContext: Record<string, any> = {
|
||||
...BASE_CONTEXT,
|
||||
};
|
||||
|
||||
$logger.set(Roarr.child(newContext));
|
||||
}, []);
|
||||
|
||||
const log = useMemo(() => logger(namespace), [namespace]);
|
||||
|
||||
return log;
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import {
|
||||
selectSystemLogIsEnabled,
|
||||
selectSystemLogLevel,
|
||||
selectSystemLogNamespaces,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { useLayoutEffect } from 'react';
|
||||
|
||||
/**
|
||||
* This hook synchronizes the logging configuration stored in Redux with the logging system, which uses localstorage.
|
||||
*
|
||||
* The sync is one-way: from Redux to localstorage. This means that changes made in the UI will be reflected in the
|
||||
* logging system, but changes made directly to localstorage will not be reflected in the UI.
|
||||
*
|
||||
* See {@link configureLogging}
|
||||
*/
|
||||
export const useSyncLoggingConfig = () => {
|
||||
useAssertSingleton('useSyncLoggingConfig');
|
||||
|
||||
const loggingOverrides = useStore($loggingOverrides);
|
||||
|
||||
const logLevel = useAppSelector(selectSystemLogLevel);
|
||||
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
|
||||
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
|
||||
|
||||
useLayoutEffect(() => {
|
||||
configureLogging(
|
||||
loggingOverrides?.logIsEnabled ?? logIsEnabled,
|
||||
loggingOverrides?.logLevel ?? logLevel,
|
||||
loggingOverrides?.logNamespaces ?? logNamespaces
|
||||
);
|
||||
}, [
|
||||
logIsEnabled,
|
||||
logLevel,
|
||||
logNamespaces,
|
||||
loggingOverrides?.logIsEnabled,
|
||||
loggingOverrides?.logLevel,
|
||||
loggingOverrides?.logNamespaces,
|
||||
]);
|
||||
};
|
||||
@@ -1,4 +1,3 @@
|
||||
export const STORAGE_PREFIX = '@@invokeai-';
|
||||
export const EMPTY_ARRAY = [];
|
||||
/** @knipignore */
|
||||
export const EMPTY_OBJECT = {};
|
||||
|
||||
@@ -16,7 +16,6 @@ import { addGalleryOffsetChangedListener } from 'app/store/middleware/listenerMi
|
||||
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
|
||||
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
|
||||
import { addImageDeletionListeners } from 'app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners';
|
||||
import { addImageDroppedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped';
|
||||
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
|
||||
import { addImagesStarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesStarred';
|
||||
import { addImagesUnstarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesUnstarred';
|
||||
@@ -93,9 +92,6 @@ addGetOpenAPISchemaListener(startAppListening);
|
||||
addWorkflowLoadRequestedListener(startAppListening);
|
||||
addUpdateAllNodesRequestedListener(startAppListening);
|
||||
|
||||
// DND
|
||||
addImageDroppedListener(startAppListening);
|
||||
|
||||
// Models
|
||||
addModelSelectedListener(startAppListening);
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('queue');
|
||||
|
||||
@@ -39,9 +39,9 @@ export const addAdHocPostProcessingRequestedListener = (startAppListening: AppSt
|
||||
|
||||
const enqueueResult = await req.unwrap();
|
||||
req.reset();
|
||||
log.debug({ enqueueResult } as SerializableObject, t('queue.graphQueued'));
|
||||
log.debug({ enqueueResult } as JsonObject, t('queue.graphQueued'));
|
||||
} catch (error) {
|
||||
log.error({ enqueueBatchArg } as SerializableObject, t('queue.graphFailedToQueue'));
|
||||
log.error({ enqueueBatchArg } as JsonObject, t('queue.graphFailedToQueue'));
|
||||
|
||||
if (error instanceof Object && 'status' in error && error.status === 403) {
|
||||
return;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user