mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 18:18:12 -05:00
Compare commits
833 Commits
v4.2.9.dev
...
v5.0.0.a2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f82640b5df | ||
|
|
e3e50abc5a | ||
|
|
061bff2814 | ||
|
|
e5a53be42b | ||
|
|
54c94bd713 | ||
|
|
8d56becf04 | ||
|
|
dc51ccd9a6 | ||
|
|
f5eefedc49 | ||
|
|
136891ec3d | ||
|
|
c5543e42c7 | ||
|
|
edae8a1617 | ||
|
|
9c1cf3e860 | ||
|
|
b6cef9d440 | ||
|
|
ebb92bee26 | ||
|
|
d6c553ca5e | ||
|
|
8b6512cc90 | ||
|
|
a6b998c125 | ||
|
|
5275782533 | ||
|
|
ede3bd8e64 | ||
|
|
da2583b894 | ||
|
|
9210970130 | ||
|
|
2a022a811c | ||
|
|
1a53e8dc5c | ||
|
|
4e12e23b69 | ||
|
|
fd56b35982 | ||
|
|
71e0abe653 | ||
|
|
56956ccf78 | ||
|
|
6d46d82028 | ||
|
|
3ed29a16a8 | ||
|
|
b67c369bdb | ||
|
|
e774b6879e | ||
|
|
e7d95c3724 | ||
|
|
1b65884dbe | ||
|
|
eff9ddc980 | ||
|
|
400ef8cdc3 | ||
|
|
b0ec3de40a | ||
|
|
b38b8bc90c | ||
|
|
a5ab5e5146 | ||
|
|
61fc30b345 | ||
|
|
46d0ba8ce2 | ||
|
|
5a3e0d76d9 | ||
|
|
5eb919f602 | ||
|
|
2301b388e8 | ||
|
|
dbf13999a0 | ||
|
|
a37592f9f3 | ||
|
|
60d4514fd8 | ||
|
|
9709da901c | ||
|
|
44df59e9e9 | ||
|
|
fbe80ceab2 | ||
|
|
a86822db4d | ||
|
|
f024cb1d05 | ||
|
|
6b2d900b54 | ||
|
|
3d6d5affb5 | ||
|
|
99b683fc1f | ||
|
|
d5cd50c3ea | ||
|
|
d7cde0fc23 | ||
|
|
541605edb4 | ||
|
|
0194344de2 | ||
|
|
34f3cb3116 | ||
|
|
5ab4818eb6 | ||
|
|
60d2541934 | ||
|
|
8d87549ebe | ||
|
|
4cb5854990 | ||
|
|
6f4d3d0395 | ||
|
|
93e9e64b3a | ||
|
|
2bdfc340aa | ||
|
|
2a1bc3e044 | ||
|
|
b4d006d14b | ||
|
|
464603e0ea | ||
|
|
864e471e5a | ||
|
|
670e054fe0 | ||
|
|
0abd81ac80 | ||
|
|
1870daffa1 | ||
|
|
d6d27a82a6 | ||
|
|
ff0d2fcc92 | ||
|
|
a2969816fa | ||
|
|
6b20d1564d | ||
|
|
bf484bc90e | ||
|
|
fc58d34d25 | ||
|
|
c15793b794 | ||
|
|
1e32be827e | ||
|
|
8422908b70 | ||
|
|
d10ff59f9c | ||
|
|
eab1f50a6f | ||
|
|
6e346884e3 | ||
|
|
1c9fd1f19a | ||
|
|
28385d06d1 | ||
|
|
12e6f1be89 | ||
|
|
e1a66e22e9 | ||
|
|
b3569e5c0d | ||
|
|
c64693fffd | ||
|
|
ce9f17726f | ||
|
|
5f62dc6699 | ||
|
|
07cb12eef7 | ||
|
|
9e9f465552 | ||
|
|
e148cc810b | ||
|
|
160f54d1ea | ||
|
|
480856a528 | ||
|
|
97aad2ab2f | ||
|
|
2b93dbd96a | ||
|
|
ce4c79a8d9 | ||
|
|
151b4efd3f | ||
|
|
16806e5d8d | ||
|
|
8e01d295db | ||
|
|
fd00e40ca7 | ||
|
|
029158ef3a | ||
|
|
96b74f4a79 | ||
|
|
b1e85f8b60 | ||
|
|
aa418f0aba | ||
|
|
8b747b022b | ||
|
|
ed4b5dfac3 | ||
|
|
b189937bc9 | ||
|
|
e176e48fa3 | ||
|
|
4931bdace5 | ||
|
|
c3b52a1853 | ||
|
|
b201541cb0 | ||
|
|
ba54a05efd | ||
|
|
6746870591 | ||
|
|
542844c6a3 | ||
|
|
4e5f4dadf2 | ||
|
|
1c15c2cb03 | ||
|
|
a041f1f388 | ||
|
|
d0b62c88c9 | ||
|
|
0fd4dd4513 | ||
|
|
4d3ed34232 | ||
|
|
74de22349d | ||
|
|
18ad271225 | ||
|
|
f92730080c | ||
|
|
f83b500645 | ||
|
|
1349e73a1a | ||
|
|
1fdb702557 | ||
|
|
4df531b7c0 | ||
|
|
a5a077964e | ||
|
|
944719cb9c | ||
|
|
92ae679314 | ||
|
|
771c3210b7 | ||
|
|
517946f66e | ||
|
|
eb09253b4e | ||
|
|
d81cd050ef | ||
|
|
ae5ed18f12 | ||
|
|
9026180533 | ||
|
|
437ea1109b | ||
|
|
95177a7389 | ||
|
|
d01af064f9 | ||
|
|
d50ee14d0b | ||
|
|
096e8deac5 | ||
|
|
e3b6ad7076 | ||
|
|
23c93509e0 | ||
|
|
f5eb6a06b5 | ||
|
|
db99b773bc | ||
|
|
daa0064947 | ||
|
|
ea062ab01a | ||
|
|
0c81a435f4 | ||
|
|
be7254dbf8 | ||
|
|
f49cee976d | ||
|
|
c246fc98b3 | ||
|
|
45e155d392 | ||
|
|
c82e17916f | ||
|
|
d9359bac23 | ||
|
|
ae65f89999 | ||
|
|
dd8b25260d | ||
|
|
4f76f5f848 | ||
|
|
3cdc5d869f | ||
|
|
19aa747b8f | ||
|
|
e20ae31d96 | ||
|
|
09fd415527 | ||
|
|
50768a957e | ||
|
|
3942e2a501 | ||
|
|
1a51842277 | ||
|
|
d001a36e14 | ||
|
|
8c65f60e7d | ||
|
|
d48ce8168e | ||
|
|
a955ab6bee | ||
|
|
81bfd4cc08 | ||
|
|
65f1944a93 | ||
|
|
b68845f43f | ||
|
|
bb994751ee | ||
|
|
f3aad7a494 | ||
|
|
80a69e0867 | ||
|
|
e2f2bdbbc2 | ||
|
|
ecda2b1681 | ||
|
|
d00e006784 | ||
|
|
9a6411f2c8 | ||
|
|
b05b0281af | ||
|
|
fb9bce6636 | ||
|
|
92eebd6aaf | ||
|
|
4484981c97 | ||
|
|
8cff753c81 | ||
|
|
b5681f1657 | ||
|
|
abb74fa664 | ||
|
|
ff88536b4a | ||
|
|
cb20c3b313 | ||
|
|
e8335fe7c4 | ||
|
|
749ff3eb71 | ||
|
|
6877db12c9 | ||
|
|
bbdbe36ada | ||
|
|
fca09d79cc | ||
|
|
719cc12d82 | ||
|
|
b8fed9a554 | ||
|
|
e0ea8b72a6 | ||
|
|
df41564c4c | ||
|
|
42ec07daad | ||
|
|
f33e3d63d5 | ||
|
|
451ee78f31 | ||
|
|
65ea492a75 | ||
|
|
afb35d9717 | ||
|
|
f6624322d8 | ||
|
|
00a4504406 | ||
|
|
2d737f824c | ||
|
|
174c136abc | ||
|
|
eb4dcf4453 | ||
|
|
df6ee189db | ||
|
|
d558aefcc7 | ||
|
|
2adffc84d4 | ||
|
|
5b1035d64c | ||
|
|
da48a5d533 | ||
|
|
f22366a427 | ||
|
|
7def35b1c0 | ||
|
|
ace87948dd | ||
|
|
04555f3916 | ||
|
|
dce1fb0d02 | ||
|
|
1617ee0e6f | ||
|
|
ee94ac3d32 | ||
|
|
10066b349b | ||
|
|
db8084fda1 | ||
|
|
f85536de22 | ||
|
|
7c47e7cfc3 | ||
|
|
37ee1ab35b | ||
|
|
488b682489 | ||
|
|
9601d99c01 | ||
|
|
56aa6a3114 | ||
|
|
4f60cec997 | ||
|
|
e012832386 | ||
|
|
b9ce1cfc16 | ||
|
|
17dd8bb37b | ||
|
|
459d59aac4 | ||
|
|
5cb26fac9f | ||
|
|
3b8c9bb34b | ||
|
|
f9d380107c | ||
|
|
f8b60da938 | ||
|
|
f5fd25d235 | ||
|
|
0097958f62 | ||
|
|
7f8e0c00d9 | ||
|
|
1ef5db035d | ||
|
|
89ff9b8b88 | ||
|
|
bac0ce1e69 | ||
|
|
04f78a99ad | ||
|
|
f4d8809758 | ||
|
|
06dd144c92 | ||
|
|
9b3ec12a3e | ||
|
|
82d50bfcc9 | ||
|
|
7563214a6d | ||
|
|
d99dbdfe7c | ||
|
|
d9fe16bab4 | ||
|
|
db50525442 | ||
|
|
e8190f4389 | ||
|
|
e5e59bf801 | ||
|
|
dd7d4da5e3 | ||
|
|
f394584dff | ||
|
|
1a06b5f1c6 | ||
|
|
9a089495a1 | ||
|
|
c5c8859463 | ||
|
|
6a6efc4574 | ||
|
|
e6bc861ebf | ||
|
|
1499cea82e | ||
|
|
f55282f9bf | ||
|
|
452784068b | ||
|
|
e6b841126b | ||
|
|
31ce4f9283 | ||
|
|
60b3dc846e | ||
|
|
7bb2dc0075 | ||
|
|
7f437adaba | ||
|
|
5a1309cf6e | ||
|
|
f56648be3c | ||
|
|
15735dda6e | ||
|
|
1f1777f7a6 | ||
|
|
167c8ba4ec | ||
|
|
cc7ae42baa | ||
|
|
5fe844c5d9 | ||
|
|
23248dad90 | ||
|
|
caeefdf2ed | ||
|
|
d40d6291a0 | ||
|
|
fd38668f55 | ||
|
|
583654d176 | ||
|
|
59cba2f860 | ||
|
|
772f0b80a1 | ||
|
|
8d8272ee53 | ||
|
|
fef1dddd50 | ||
|
|
725da6e875 | ||
|
|
257b18230a | ||
|
|
a8de6406c5 | ||
|
|
dd2e68bf00 | ||
|
|
7825e325df | ||
|
|
33b3268f83 | ||
|
|
3dbd8212aa | ||
|
|
3694f337bc | ||
|
|
ab77997746 | ||
|
|
5fa7910664 | ||
|
|
8dbb473fde | ||
|
|
4a1240a709 | ||
|
|
664987f2aa | ||
|
|
9e391ec431 | ||
|
|
06944b3ea7 | ||
|
|
f48b949aa8 | ||
|
|
b4166083c5 | ||
|
|
56d53b18f0 | ||
|
|
20961215e7 | ||
|
|
49c75ca381 | ||
|
|
cf6751cc06 | ||
|
|
6cc828b628 | ||
|
|
ddeffb3ef1 | ||
|
|
95b606683f | ||
|
|
0598b89738 | ||
|
|
c2be63a811 | ||
|
|
639304197b | ||
|
|
c4a85cf1bf | ||
|
|
cff80524a8 | ||
|
|
2d1b13bde7 | ||
|
|
220b78d0e7 | ||
|
|
efb97c301e | ||
|
|
cd865347eb | ||
|
|
54ccb9846d | ||
|
|
22a2849683 | ||
|
|
2bae67cfe9 | ||
|
|
de8e8d9f68 | ||
|
|
eced34a72a | ||
|
|
591e8162c1 | ||
|
|
f4998bc308 | ||
|
|
39a49fb585 | ||
|
|
2b9073da36 | ||
|
|
d3aa54f7bd | ||
|
|
f0a959f6fe | ||
|
|
9a5b702013 | ||
|
|
018807d678 | ||
|
|
cf5e8bf4ea | ||
|
|
03ae65863c | ||
|
|
3b7b6d6404 | ||
|
|
e9171c80f6 | ||
|
|
0fd3881b3a | ||
|
|
01ac4c3b3e | ||
|
|
f1fcc98a09 | ||
|
|
b2823569f0 | ||
|
|
3bd98e62de | ||
|
|
318672be53 | ||
|
|
c5a05691fe | ||
|
|
04fcb9e8e6 | ||
|
|
a1534b6503 | ||
|
|
0aa4b1575d | ||
|
|
85eb6ad616 | ||
|
|
9fd2841df0 | ||
|
|
bd23dcd751 | ||
|
|
4d480093d9 | ||
|
|
bb0d2b6ce2 | ||
|
|
0d863a876b | ||
|
|
3fadfd3bbb | ||
|
|
401152f16f | ||
|
|
b69350e9ee | ||
|
|
7b429e0a54 | ||
|
|
3d23fe1fe0 | ||
|
|
d4117f5595 | ||
|
|
2686210887 | ||
|
|
9a804b7986 | ||
|
|
ef0699310d | ||
|
|
afa2da3d2d | ||
|
|
ac1132b5bc | ||
|
|
0276dac38f | ||
|
|
5a3dd83167 | ||
|
|
9f587009cd | ||
|
|
c5ed5e866e | ||
|
|
1f10bc1d63 | ||
|
|
311451b3c9 | ||
|
|
a48e5d9cb0 | ||
|
|
ad92010778 | ||
|
|
01e8988fcc | ||
|
|
d6fec0a0df | ||
|
|
37dc7ee595 | ||
|
|
6d79dc61d2 | ||
|
|
966bc67001 | ||
|
|
4c66a0dcd0 | ||
|
|
50051ee147 | ||
|
|
621f12a1bc | ||
|
|
741b22041d | ||
|
|
f358bb9364 | ||
|
|
65bbc0f00f | ||
|
|
7bf0e554ea | ||
|
|
82b1d8dab8 | ||
|
|
5dda364b2c | ||
|
|
c4e95684b5 | ||
|
|
a0d644ac42 | ||
|
|
37198159c9 | ||
|
|
7170adf3a2 | ||
|
|
cc50578faf | ||
|
|
e80d8b4365 | ||
|
|
30050a23b9 | ||
|
|
706a3c8f2b | ||
|
|
384601898a | ||
|
|
94eb5e638f | ||
|
|
5629c54d55 | ||
|
|
1303396d0e | ||
|
|
bcd5bcf8d7 | ||
|
|
787a4422cb | ||
|
|
5d52633c78 | ||
|
|
1d45444104 | ||
|
|
dd84f2ca64 | ||
|
|
b1c4a91de0 | ||
|
|
187ef3548e | ||
|
|
4abf24a2f6 | ||
|
|
2435ce34be | ||
|
|
e7841824ef | ||
|
|
10596073ac | ||
|
|
405994ee7a | ||
|
|
534d4fa495 | ||
|
|
2aa413d44f | ||
|
|
e6ebb0390e | ||
|
|
5fb9ffca6f | ||
|
|
bd62bab91f | ||
|
|
54edd3f101 | ||
|
|
a889a762b8 | ||
|
|
2163f65be7 | ||
|
|
78471b4bc3 | ||
|
|
af99238a96 | ||
|
|
4e5937036d | ||
|
|
6edc7bbd1d | ||
|
|
db437da726 | ||
|
|
95a9bacd01 | ||
|
|
e95e776733 | ||
|
|
760c7a3076 | ||
|
|
7dd1aec767 | ||
|
|
976b1a5fee | ||
|
|
b79a5e46e2 | ||
|
|
02ddfc5aac | ||
|
|
57f3107dba | ||
|
|
acde3d8952 | ||
|
|
be4983fcbb | ||
|
|
39c8bded65 | ||
|
|
e8f678adde | ||
|
|
e1666c85b7 | ||
|
|
6469cd6e24 | ||
|
|
b6032fd186 | ||
|
|
7a546349e4 | ||
|
|
375c7494b6 | ||
|
|
ac0cc91046 | ||
|
|
918254b600 | ||
|
|
814c3bed09 | ||
|
|
d94ceb25b0 | ||
|
|
619d469fa5 | ||
|
|
02c2308938 | ||
|
|
cf66e6d4ce | ||
|
|
8df40d2d94 | ||
|
|
9942d9a1dc | ||
|
|
835431ad9a | ||
|
|
b5c2b8fdec | ||
|
|
bbcc242280 | ||
|
|
e4ff850ca8 | ||
|
|
9117753a70 | ||
|
|
8095a17f0c | ||
|
|
0d1af8e26e | ||
|
|
b5834002a5 | ||
|
|
f2ba9c5d20 | ||
|
|
2fac67d8a5 | ||
|
|
36e07269e8 | ||
|
|
a35a2a6c8f | ||
|
|
050f258c8e | ||
|
|
4bad6d005a | ||
|
|
22287c9362 | ||
|
|
ee4b27c051 | ||
|
|
93c4454b8d | ||
|
|
5fc2a6a4ad | ||
|
|
c7d2766f2e | ||
|
|
06d76ed362 | ||
|
|
4a1fc2a91f | ||
|
|
0578bf0890 | ||
|
|
e3984cd006 | ||
|
|
f2e197f4e7 | ||
|
|
3cf9a53f88 | ||
|
|
c8d42e64c5 | ||
|
|
82e91afed2 | ||
|
|
13e3fc5e7a | ||
|
|
a32a2c3782 | ||
|
|
73611a7d83 | ||
|
|
7a012e4487 | ||
|
|
8935e6e7c2 | ||
|
|
8af572d502 | ||
|
|
8a0e2d9475 | ||
|
|
6d39a86dbd | ||
|
|
25d16bc779 | ||
|
|
805343f525 | ||
|
|
054c3becc0 | ||
|
|
e317f0ce29 | ||
|
|
a98d92a6c7 | ||
|
|
919f8b1386 | ||
|
|
7cd510a501 | ||
|
|
1b9aeaaea0 | ||
|
|
9b176de649 | ||
|
|
bd63cc0562 | ||
|
|
5580131017 | ||
|
|
5ae4bff91c | ||
|
|
67f06b2f6e | ||
|
|
5be89533f2 | ||
|
|
e54cc241cd | ||
|
|
a17d1f2186 | ||
|
|
23952baaff | ||
|
|
3d286ab8c3 | ||
|
|
2bb64b99e6 | ||
|
|
e26fb33ca7 | ||
|
|
6ab3e9048b | ||
|
|
7a1170f96c | ||
|
|
436ee920bb | ||
|
|
cd09b49e77 | ||
|
|
8a4b4ec4fe | ||
|
|
2b7e6b44ec | ||
|
|
989330af83 | ||
|
|
6c8971748f | ||
|
|
906d70b495 | ||
|
|
a036413f6a | ||
|
|
bb52dccc7a | ||
|
|
d19479941d | ||
|
|
820adec14a | ||
|
|
64efb6b486 | ||
|
|
479063564d | ||
|
|
ba0e4bdc62 | ||
|
|
fc34fec30a | ||
|
|
d69ab7fc86 | ||
|
|
eee0ffd6db | ||
|
|
dcf9e8f2a7 | ||
|
|
8adb0d8fa9 | ||
|
|
3d4c18abf6 | ||
|
|
eba1d054ef | ||
|
|
58b6923bc7 | ||
|
|
ad5c815ade | ||
|
|
d0c0b5e7c4 | ||
|
|
758badb05a | ||
|
|
6bad5bf2d7 | ||
|
|
fbae3fca60 | ||
|
|
fd42c82c83 | ||
|
|
35f9bd57fd | ||
|
|
90f7e4851e | ||
|
|
4ec45a22c7 | ||
|
|
c2b746a3e3 | ||
|
|
2c5e76aa8b | ||
|
|
7ea21370b2 | ||
|
|
ae5e7845bb | ||
|
|
f96a83eecf | ||
|
|
9ce74d8eff | ||
|
|
59ff96a085 | ||
|
|
b82c8d87a3 | ||
|
|
513f95e221 | ||
|
|
34729f7703 | ||
|
|
433b9d6380 | ||
|
|
0cbc684cb8 | ||
|
|
56f5698fc6 | ||
|
|
6e4dc2a69a | ||
|
|
137e9aa820 | ||
|
|
13e8710de9 | ||
|
|
767337fb8e | ||
|
|
d4a0e7899b | ||
|
|
181f54afd3 | ||
|
|
7900a7e2c0 | ||
|
|
ffb9b94719 | ||
|
|
115d938e8e | ||
|
|
53b6959bd5 | ||
|
|
184baaf579 | ||
|
|
eeaa17fbee | ||
|
|
beb4d73f04 | ||
|
|
8c9472cf4e | ||
|
|
ebaa6769b0 | ||
|
|
74de066363 | ||
|
|
148ca3b7d8 | ||
|
|
05ca8951a6 | ||
|
|
95b94a2aa7 | ||
|
|
8661152a73 | ||
|
|
145775021d | ||
|
|
2fd9575cd3 | ||
|
|
749cdcc39e | ||
|
|
9fc4008bfc | ||
|
|
f80127772e | ||
|
|
37b02ba467 | ||
|
|
971da20198 | ||
|
|
f55711c14b | ||
|
|
2f6e4c4a4a | ||
|
|
a0fc840835 | ||
|
|
b65866cb2e | ||
|
|
dffa0bb2fe | ||
|
|
8e56452df8 | ||
|
|
839e24e597 | ||
|
|
44c68f8551 | ||
|
|
5b17bbaac2 | ||
|
|
a9ec37ea79 | ||
|
|
8ed4351a9a | ||
|
|
c7b88219d3 | ||
|
|
8189af0f41 | ||
|
|
083b7d99c8 | ||
|
|
682c2f5c75 | ||
|
|
e56b5e6966 | ||
|
|
5a8fb2af90 | ||
|
|
8d08d456b6 | ||
|
|
a6c2497b35 | ||
|
|
0fcd203b6c | ||
|
|
e91562c245 | ||
|
|
9a0a48a939 | ||
|
|
c28224d574 | ||
|
|
a2840d31bd | ||
|
|
847d1c534c | ||
|
|
dc51374601 | ||
|
|
9680bd61fe | ||
|
|
fdb27d836d | ||
|
|
4d0567823a | ||
|
|
d0cfe632c9 | ||
|
|
03809763a6 | ||
|
|
41ff92592c | ||
|
|
3c754032c9 | ||
|
|
92a1d41eac | ||
|
|
8a0f723b28 | ||
|
|
f5474f18d6 | ||
|
|
2c729946a2 | ||
|
|
e7933cdae1 | ||
|
|
a012cc7041 | ||
|
|
fc2bb5014c | ||
|
|
002fddbf6e | ||
|
|
5d1b6452b0 | ||
|
|
1ea31f6952 | ||
|
|
b19bbc9212 | ||
|
|
16ce3da31f | ||
|
|
91bf5ac9a2 | ||
|
|
9d51882192 | ||
|
|
ac99d61e17 | ||
|
|
b21c28e8fe | ||
|
|
361d3383fc | ||
|
|
54ff94ec38 | ||
|
|
07beb170be | ||
|
|
eafa536c56 | ||
|
|
abdb5abbc1 | ||
|
|
a1dbf426ec | ||
|
|
30ba131704 | ||
|
|
e3f0fb539e | ||
|
|
d6667c773b | ||
|
|
3bd180882c | ||
|
|
1bb7f40b0a | ||
|
|
93d1140a31 | ||
|
|
4235885d47 | ||
|
|
6dc8f5b42e | ||
|
|
bf8d2250ca | ||
|
|
1b2d045be1 | ||
|
|
04df9f5873 | ||
|
|
849b775e55 | ||
|
|
728e21b5ae | ||
|
|
d3a183fe1d | ||
|
|
9ab9d0948f | ||
|
|
7bb6f18175 | ||
|
|
ac0f93f2c2 | ||
|
|
8a75b1411a | ||
|
|
0d552d0ba6 | ||
|
|
6ee0064ce0 | ||
|
|
5c6cd1e897 | ||
|
|
5fcaae39df | ||
|
|
7899c0ef78 | ||
|
|
543af856de | ||
|
|
3e21106336 | ||
|
|
9295985082 | ||
|
|
3ccd58af50 | ||
|
|
3f56c93b8c | ||
|
|
1311276a27 | ||
|
|
327788b1d6 | ||
|
|
1c6015ca73 | ||
|
|
4eaedbb981 | ||
|
|
2c52b77187 | ||
|
|
70527bf931 | ||
|
|
2911de8d7b | ||
|
|
62037ce577 | ||
|
|
e5bff7646a | ||
|
|
ce4b1f7f8d | ||
|
|
09bf3e7d29 | ||
|
|
18d61c2408 | ||
|
|
efac5c8f06 | ||
|
|
dd9f71203d | ||
|
|
3b51509f18 | ||
|
|
324033bdf8 | ||
|
|
d5c32dc2e7 | ||
|
|
b8c8276645 | ||
|
|
c6bf9193e2 | ||
|
|
17911ecf64 | ||
|
|
13bb45934c | ||
|
|
54ba852e71 | ||
|
|
bc85ef6e65 | ||
|
|
856b0f81d5 | ||
|
|
060fe11663 | ||
|
|
9dab54c1ed | ||
|
|
0f7a422153 | ||
|
|
058bf94c93 | ||
|
|
1a0600772f | ||
|
|
d54c18f8c3 | ||
|
|
5fc0bc5136 | ||
|
|
6f0a2d1104 | ||
|
|
9be3e0050d | ||
|
|
11596e45d1 | ||
|
|
ca3913a3c8 | ||
|
|
a6c900ef83 | ||
|
|
209f9e26a0 | ||
|
|
f9eb25b861 | ||
|
|
a3a5e81fdb | ||
|
|
0d73d9dfd3 | ||
|
|
7cdea43a37 | ||
|
|
638d16ce6e | ||
|
|
9a860dbab5 | ||
|
|
5c2a48bba8 | ||
|
|
05338bdba3 | ||
|
|
b32eeada1b | ||
|
|
acc1fefa77 | ||
|
|
a850ffa537 | ||
|
|
2bcb53fe03 | ||
|
|
94fc73ed95 | ||
|
|
df9f998671 | ||
|
|
be3ad43a07 | ||
|
|
5aa155c39f | ||
|
|
c21a21c2aa | ||
|
|
91bcdc10eb | ||
|
|
f18c8e2239 | ||
|
|
2db7608401 | ||
|
|
506632206c | ||
|
|
234a1b6571 | ||
|
|
c9d45d864f | ||
|
|
c0177516f2 | ||
|
|
accf2b5831 | ||
|
|
2f14f83a9a | ||
|
|
262968d0c9 | ||
|
|
244ac735af | ||
|
|
b919bcfc8c | ||
|
|
c21e44cf6b | ||
|
|
593ff0be75 | ||
|
|
6fd042df96 | ||
|
|
c3e1cf7230 | ||
|
|
5b3d86ab14 | ||
|
|
5d4bbbd806 | ||
|
|
cfc6d9e439 | ||
|
|
d10954f47a | ||
|
|
c3e1198448 | ||
|
|
fe9f042111 | ||
|
|
32e86ba72d | ||
|
|
28cd39d152 | ||
|
|
25f3e25555 | ||
|
|
699fbb4e55 | ||
|
|
5fa93de8c4 | ||
|
|
74e976aae4 | ||
|
|
dd829e9d6a | ||
|
|
56bca03fbe | ||
|
|
d0572730a8 | ||
|
|
eb816936ed | ||
|
|
e1b9cac1df | ||
|
|
d927b631c5 | ||
|
|
17dc5d98d1 | ||
|
|
cda086093d | ||
|
|
bda579577c | ||
|
|
a16b555d47 | ||
|
|
6667c39c73 | ||
|
|
5219ac12a6 | ||
|
|
445f813fb9 | ||
|
|
87f9e59cfb | ||
|
|
8b03b39aa8 | ||
|
|
e59b6bb971 | ||
|
|
24a7ed467c | ||
|
|
f01f1033ac | ||
|
|
d35f515413 | ||
|
|
125b459e56 | ||
|
|
33edee1ba6 | ||
|
|
d20335dabc | ||
|
|
d10d258213 | ||
|
|
d57ba1ed8b | ||
|
|
2d0e34e57b | ||
|
|
a005d06255 | ||
|
|
a301ef5a5a | ||
|
|
9422df2737 | ||
|
|
6dabe4d3ca | ||
|
|
00e4652d30 | ||
|
|
b6434c5318 | ||
|
|
3f7f9f8d61 | ||
|
|
f3bb592544 | ||
|
|
69f080fb75 | ||
|
|
04272a7cc8 | ||
|
|
8d35af946e | ||
|
|
24065ec6b6 | ||
|
|
627b0bf644 | ||
|
|
b43da46b82 | ||
|
|
4255a01c64 | ||
|
|
23adbd4002 | ||
|
|
fb5a24fcc6 | ||
|
|
cfdd5a1900 | ||
|
|
2313f326df | ||
|
|
2e092a2313 | ||
|
|
763ef06c18 | ||
|
|
8292f6cd42 | ||
|
|
278bba499e | ||
|
|
dd99ed28e0 | ||
|
|
9a8aca69bf | ||
|
|
7ad62512eb | ||
|
|
bd466661ec | ||
|
|
7ebb509d05 | ||
|
|
0aa13c046c | ||
|
|
a7a33d73f5 | ||
|
|
ffa39857d3 | ||
|
|
e85c3bc465 | ||
|
|
8185ba7054 | ||
|
|
d501865bec | ||
|
|
d62310bb5f | ||
|
|
1835bff196 | ||
|
|
87261bdbc9 | ||
|
|
4e4b6c6dbc | ||
|
|
5e8cf9fb6a | ||
|
|
c738fe051f | ||
|
|
29fe1533f2 | ||
|
|
77090070bd | ||
|
|
6ba9b1b6b0 | ||
|
|
c578b8df1e | ||
|
|
cad9a41433 | ||
|
|
5fefb3b0f4 | ||
|
|
5284a870b0 | ||
|
|
e064377c05 | ||
|
|
3e569c8312 | ||
|
|
16825ee6e9 | ||
|
|
3f5340fa53 | ||
|
|
f2a1a39b33 | ||
|
|
326de55d3e | ||
|
|
b2df909570 | ||
|
|
026ac36b06 | ||
|
|
92125e5fd2 | ||
|
|
c0c139da88 | ||
|
|
404ad6a7fd | ||
|
|
fc39086fb4 | ||
|
|
cd215700fe | ||
|
|
e97fd85904 | ||
|
|
0a263fa5b1 | ||
|
|
fae3836a8d | ||
|
|
b3d2eb4178 | ||
|
|
576f1cbb75 |
37
.github/workflows/build-container.yml
vendored
37
.github/workflows/build-container.yml
vendored
@@ -13,6 +13,12 @@ on:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push-to-registry:
|
||||
description: Push the built image to the container registry
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -50,16 +56,15 @@ jobs:
|
||||
df -h
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: |
|
||||
ghcr.io/${{ github.repository }}
|
||||
${{ env.DOCKERHUB_REPOSITORY }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
@@ -72,49 +77,33 @@ jobs:
|
||||
suffix=-${{ matrix.gpu-driver }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# - name: Login to Docker Hub
|
||||
# if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
timeout-minutes: 40
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' || github.event.inputs.push-to-registry }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||
|
||||
# - name: Docker Hub Description
|
||||
# if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
# uses: peter-evans/dockerhub-description@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
# repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
# short-description: ${{ github.event.repository.description }}
|
||||
|
||||
@@ -196,6 +196,22 @@ tips to reduce the problem:
|
||||
=== "12GB VRAM GPU"
|
||||
|
||||
This should be sufficient to generate larger images up to about 1280x1280.
|
||||
|
||||
## Checkpoint Models Load Slowly or Use Too Much RAM
|
||||
|
||||
The difference between diffusers models (a folder containing multiple
|
||||
subfolders) and checkpoint models (a file ending with .safetensors or
|
||||
.ckpt) is that InvokeAI is able to load diffusers models into memory
|
||||
incrementally, while checkpoint models must be loaded all at
|
||||
once. With very large models, or systems with limited RAM, you may
|
||||
experience slowdowns and other memory-related issues when loading
|
||||
checkpoint models.
|
||||
|
||||
To solve this, go to the Model Manager tab (the cube), select the
|
||||
checkpoint model that's giving you trouble, and press the "Convert"
|
||||
button in the upper right of your browser window. This will conver the
|
||||
checkpoint into a diffusers model, after which loading should be
|
||||
faster and less memory-intensive.
|
||||
|
||||
## Memory Leak (Linux)
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
|
||||
import io
|
||||
import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from enum import Enum
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import List, Optional, Type
|
||||
|
||||
@@ -17,6 +19,7 @@ from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.config import get_config
|
||||
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
@@ -31,6 +34,7 @@ from invokeai.backend.model_manager.config import (
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache_base import CacheStats
|
||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
@@ -50,6 +54,13 @@ class ModelsList(BaseModel):
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
class CacheType(str, Enum):
|
||||
"""Cache type - one of vram or ram."""
|
||||
|
||||
RAM = "RAM"
|
||||
VRAM = "VRAM"
|
||||
|
||||
|
||||
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
|
||||
"""Add a cover image URL to a model configuration."""
|
||||
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
|
||||
@@ -797,3 +808,83 @@ async def get_starter_models() -> list[StarterModel]:
|
||||
model.dependencies = missing_deps
|
||||
|
||||
return starter_models
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
"/model_cache",
|
||||
operation_id="get_cache_size",
|
||||
response_model=float,
|
||||
summary="Get maximum size of model manager RAM or VRAM cache.",
|
||||
)
|
||||
async def get_cache_size(cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM)) -> float:
|
||||
"""Return the current RAM or VRAM cache size setting (in GB)."""
|
||||
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
|
||||
value = 0.0
|
||||
if cache_type == CacheType.RAM:
|
||||
value = cache.max_cache_size
|
||||
elif cache_type == CacheType.VRAM:
|
||||
value = cache.max_vram_cache_size
|
||||
return value
|
||||
|
||||
|
||||
@model_manager_router.put(
|
||||
"/model_cache",
|
||||
operation_id="set_cache_size",
|
||||
response_model=float,
|
||||
summary="Set maximum size of model manager RAM or VRAM cache, optionally writing new value out to invokeai.yaml config file.",
|
||||
)
|
||||
async def set_cache_size(
|
||||
value: float = Query(description="The new value for the maximum cache size"),
|
||||
cache_type: CacheType = Query(description="The cache type", default=CacheType.RAM),
|
||||
persist: bool = Query(description="Write new value out to invokeai.yaml", default=False),
|
||||
) -> float:
|
||||
"""Set the current RAM or VRAM cache size setting (in GB). ."""
|
||||
cache = ApiDependencies.invoker.services.model_manager.load.ram_cache
|
||||
app_config = get_config()
|
||||
# Record initial state.
|
||||
vram_old = app_config.vram
|
||||
ram_old = app_config.ram
|
||||
|
||||
# Prepare target state.
|
||||
vram_new = vram_old
|
||||
ram_new = ram_old
|
||||
if cache_type == CacheType.RAM:
|
||||
ram_new = value
|
||||
elif cache_type == CacheType.VRAM:
|
||||
vram_new = value
|
||||
else:
|
||||
raise ValueError(f"Unexpected {cache_type=}.")
|
||||
|
||||
config_path = app_config.config_file_path
|
||||
new_config_path = config_path.with_suffix(".yaml.new")
|
||||
|
||||
try:
|
||||
# Try to apply the target state.
|
||||
cache.max_vram_cache_size = vram_new
|
||||
cache.max_cache_size = ram_new
|
||||
app_config.ram = ram_new
|
||||
app_config.vram = vram_new
|
||||
if persist:
|
||||
app_config.write_file(new_config_path)
|
||||
shutil.move(new_config_path, config_path)
|
||||
except Exception as e:
|
||||
# If there was a failure, restore the initial state.
|
||||
cache.max_cache_size = ram_old
|
||||
cache.max_vram_cache_size = vram_old
|
||||
app_config.ram = ram_old
|
||||
app_config.vram = vram_old
|
||||
|
||||
raise RuntimeError("Failed to update cache size") from e
|
||||
return value
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
"/stats",
|
||||
operation_id="get_stats",
|
||||
response_model=Optional[CacheStats],
|
||||
summary="Get model manager RAM cache performance statistics.",
|
||||
)
|
||||
async def get_stats() -> Optional[CacheStats]:
|
||||
"""Return performance statistics on the model manager's RAM cache. Will return null if no models have been loaded."""
|
||||
|
||||
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats
|
||||
|
||||
@@ -11,7 +11,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByOriginResult,
|
||||
CancelByDestinationResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
PruneResult,
|
||||
@@ -107,16 +107,18 @@ async def cancel_by_batch_ids(
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_origin",
|
||||
operation_id="cancel_by_origin",
|
||||
"/{queue_id}/cancel_by_destination",
|
||||
operation_id="cancel_by_destination",
|
||||
responses={200: {"model": CancelByBatchIDsResult}},
|
||||
)
|
||||
async def cancel_by_origin(
|
||||
async def cancel_by_destination(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
origin: str = Query(description="The origin to cancel all queue items for"),
|
||||
) -> CancelByOriginResult:
|
||||
destination: str = Query(description="The destination to cancel all queue items for"),
|
||||
) -> CancelByDestinationResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_origin(queue_id=queue_id, origin=origin)
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
|
||||
@@ -185,7 +185,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.mask,
|
||||
description=FieldDescriptions.denoise_mask,
|
||||
input=Input.Connection,
|
||||
ui_order=8,
|
||||
)
|
||||
|
||||
@@ -45,11 +45,13 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
VAEModel = "VAEModelField"
|
||||
FluxVAEModel = "FluxVAEModelField"
|
||||
LoRAModel = "LoRAModelField"
|
||||
ControlNetModel = "ControlNetModelField"
|
||||
IPAdapterModel = "IPAdapterModelField"
|
||||
T2IAdapterModel = "T2IAdapterModelField"
|
||||
T5EncoderModel = "T5EncoderModelField"
|
||||
CLIPEmbedModel = "CLIPEmbedModelField"
|
||||
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
||||
# endregion
|
||||
|
||||
@@ -128,6 +130,7 @@ class FieldDescriptions:
|
||||
noise = "Noise tensor"
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
t5_encoder = "T5 tokenizer and text encoder"
|
||||
clip_embed_model = "CLIP Embed loader"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
transformer = "Transformer"
|
||||
vae = "VAE"
|
||||
@@ -178,7 +181,7 @@ class FieldDescriptions:
|
||||
)
|
||||
num_1 = "The first number"
|
||||
num_2 = "The second number"
|
||||
mask = "The mask to use for the operation"
|
||||
denoise_mask = "A mask of the region to apply the denoising process to."
|
||||
board = "The board to save the image to"
|
||||
image = "The image to process"
|
||||
tile_size = "Tile size"
|
||||
|
||||
249
invokeai/app/invocations/flux_denoise.py
Normal file
249
invokeai/app/invocations/flux_denoise.py
Normal file
@@ -0,0 +1,249 @@
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.sampling_utils import (
|
||||
clip_timestep_schedule,
|
||||
generate_img_ids,
|
||||
get_noise,
|
||||
get_schedule,
|
||||
pack,
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_denoise",
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run denoising process with a FLUX transformer model."""
|
||||
|
||||
# If latents is provided, this means we are doing image-to-image.
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
# denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.denoise_mask,
|
||||
input=Input.Connection,
|
||||
)
|
||||
denoising_start: float = InputField(
|
||||
default=0.0,
|
||||
ge=0,
|
||||
le=1,
|
||||
description=FieldDescriptions.denoising_start,
|
||||
)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
)
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
||||
num_steps: int = InputField(
|
||||
default=4, description="Number of diffusion steps. Recommended values are schnell: 4, dev: 50."
|
||||
)
|
||||
guidance: float = InputField(
|
||||
default=4.0,
|
||||
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
|
||||
)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = self._run_diffusion(context)
|
||||
latents = latents.detach().to("cpu")
|
||||
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=inference_dtype)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
init_latents = init_latents.to(device=TorchDevice.choose_torch_device(), dtype=inference_dtype)
|
||||
|
||||
# Prepare input noise.
|
||||
noise = get_noise(
|
||||
num_samples=1,
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
seed=self.seed,
|
||||
)
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
is_schnell = "schnell" in transformer_info.config.config_path
|
||||
|
||||
# Calculate the timestep schedule.
|
||||
image_seq_len = noise.shape[-1] * noise.shape[-2] // 4
|
||||
timesteps = get_schedule(
|
||||
num_steps=self.num_steps,
|
||||
image_seq_len=image_seq_len,
|
||||
shift=not is_schnell,
|
||||
)
|
||||
|
||||
# Clip the timesteps schedule based on denoising_start and denoising_end.
|
||||
timesteps = clip_timestep_schedule(timesteps, self.denoising_start, self.denoising_end)
|
||||
|
||||
# Prepare input latent image.
|
||||
if init_latents is not None:
|
||||
# If init_latents is provided, we are doing image-to-image.
|
||||
|
||||
if is_schnell:
|
||||
context.logger.warning(
|
||||
"Running image-to-image with a FLUX schnell model. This is not recommended. The results are likely "
|
||||
"to be poor. Consider using a FLUX dev model instead."
|
||||
)
|
||||
|
||||
# Noise the orig_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
x = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
raise ValueError("denoising_start should be 0 when initial latents are not provided.")
|
||||
|
||||
x = noise
|
||||
|
||||
# If len(timesteps) == 1, then short-circuit. We are just noising the input latents, but not taking any
|
||||
# denoising steps.
|
||||
if len(timesteps) <= 1:
|
||||
return x
|
||||
|
||||
inpaint_mask = self._prep_inpaint_mask(context, x)
|
||||
|
||||
b, _c, h, w = x.shape
|
||||
img_ids = generate_img_ids(h=h, w=w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
|
||||
bs, t5_seq_len, _ = t5_embeddings.shape
|
||||
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
||||
|
||||
# Pack all latent tensors.
|
||||
init_latents = pack(init_latents) if init_latents is not None else None
|
||||
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
|
||||
noise = pack(noise)
|
||||
x = pack(x)
|
||||
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len correctly.
|
||||
assert image_seq_len == x.shape[1]
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
if inpaint_mask is not None:
|
||||
assert init_latents is not None
|
||||
inpaint_extension = InpaintExtension(
|
||||
init_latents=init_latents,
|
||||
inpaint_mask=inpaint_mask,
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
with transformer_info as transformer:
|
||||
assert isinstance(transformer, Flux)
|
||||
|
||||
x = denoise(
|
||||
model=transformer,
|
||||
img=x,
|
||||
img_ids=img_ids,
|
||||
txt=t5_embeddings,
|
||||
txt_ids=txt_ids,
|
||||
vec=clip_embeddings,
|
||||
timesteps=timesteps,
|
||||
step_callback=self._build_step_callback(context),
|
||||
guidance=self.guidance,
|
||||
inpaint_extension=inpaint_extension,
|
||||
)
|
||||
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
return x
|
||||
|
||||
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
|
||||
"""Prepare the inpaint mask.
|
||||
|
||||
- Loads the mask
|
||||
- Resizes if necessary
|
||||
- Casts to same device/dtype as latents
|
||||
- Expands mask to the same shape as latents so that they line up after 'packing'
|
||||
|
||||
Args:
|
||||
context (InvocationContext): The invocation context, for loading the inpaint mask.
|
||||
latents (torch.Tensor): A latent image tensor. In 'unpacked' format. Used to determine the target shape,
|
||||
device, and dtype for the inpaint mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor | None: Inpaint mask.
|
||||
"""
|
||||
if self.denoise_mask is None:
|
||||
return None
|
||||
|
||||
mask = context.tensors.load(self.denoise_mask.mask_name)
|
||||
|
||||
_, _, latent_height, latent_width = latents.shape
|
||||
mask = tv_resize(
|
||||
img=mask,
|
||||
size=[latent_height, latent_width],
|
||||
interpolation=tv_transforms.InterpolationMode.BILINEAR,
|
||||
antialias=False,
|
||||
)
|
||||
|
||||
mask = mask.to(device=latents.device, dtype=latents.dtype)
|
||||
|
||||
# Expand the inpaint mask to the same shape as `latents` so that when we 'pack' `mask` it lines up with
|
||||
# `latents`.
|
||||
return mask.expand_as(latents)
|
||||
|
||||
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
state.latents = unpack(state.latents.float(), self.height, self.width).squeeze()
|
||||
context.util.flux_step_callback(state)
|
||||
|
||||
return step_callback
|
||||
@@ -40,7 +40,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
||||
t5_embeddings, clip_embeddings = self._encode_prompt(context)
|
||||
# Note: The T5 and CLIP encoding are done in separate functions to ensure that all model references are locally
|
||||
# scoped. This ensures that the T5 model can be freed and gc'd before loading the CLIP model (if necessary).
|
||||
t5_embeddings = self._t5_encode(context)
|
||||
clip_embeddings = self._clip_encode(context)
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
|
||||
)
|
||||
@@ -48,12 +51,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return FluxConditioningOutput.build(conditioning_name)
|
||||
|
||||
def _encode_prompt(self, context: InvocationContext) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load CLIP.
|
||||
clip_tokenizer_info = context.models.load(self.clip.tokenizer)
|
||||
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
|
||||
# Load T5.
|
||||
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
|
||||
@@ -70,6 +68,15 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
|
||||
def _clip_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
clip_tokenizer_info = context.models.load(self.clip.tokenizer)
|
||||
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
clip_text_encoder_info as clip_text_encoder,
|
||||
clip_tokenizer_info as clip_tokenizer,
|
||||
@@ -81,6 +88,5 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds, pooled_prompt_embeds
|
||||
return pooled_prompt_embeds
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
Input,
|
||||
InputField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField, VAEField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.session_processor.session_processor_common import CanceledException
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.sampling import denoise, get_noise, get_schedule, prepare_latent_img_patches, unpack
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_text_to_image",
|
||||
title="FLUX Text to Image",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Text-to-image generation using a FLUX model."""
|
||||
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
)
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
||||
num_steps: int = InputField(
|
||||
default=4, description="Number of diffusion steps. Recommend values are schnell: 4, dev: 50."
|
||||
)
|
||||
guidance: float = InputField(
|
||||
default=4.0,
|
||||
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
|
||||
)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
|
||||
latents = self._run_diffusion(context, flux_conditioning.clip_embeds, flux_conditioning.t5_embeds)
|
||||
image = self._run_vae_decoding(context, latents)
|
||||
image_dto = context.images.save(image=image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
clip_embeddings: torch.Tensor,
|
||||
t5_embeddings: torch.Tensor,
|
||||
):
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Prepare input noise.
|
||||
x = get_noise(
|
||||
num_samples=1,
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
seed=self.seed,
|
||||
)
|
||||
|
||||
img, img_ids = prepare_latent_img_patches(x)
|
||||
|
||||
is_schnell = "schnell" in transformer_info.config.config_path
|
||||
|
||||
timesteps = get_schedule(
|
||||
num_steps=self.num_steps,
|
||||
image_seq_len=img.shape[1],
|
||||
shift=not is_schnell,
|
||||
)
|
||||
|
||||
bs, t5_seq_len, _ = t5_embeddings.shape
|
||||
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
||||
|
||||
# HACK(ryand): Manually empty the cache. Currently we don't check the size of the model before loading it from
|
||||
# disk. Since the transformer model is large (24GB), there's a good chance that it will OOM on 32GB RAM systems
|
||||
# if the cache is not empty.
|
||||
context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30)
|
||||
|
||||
with transformer_info as transformer:
|
||||
assert isinstance(transformer, Flux)
|
||||
|
||||
def step_callback() -> None:
|
||||
if context.util.is_canceled():
|
||||
raise CanceledException
|
||||
|
||||
# TODO: Make this look like the image before re-enabling
|
||||
# latent_image = unpack(img.float(), self.height, self.width)
|
||||
# latent_image = latent_image.squeeze() # Remove unnecessary dimensions
|
||||
# flattened_tensor = latent_image.reshape(-1) # Flatten to shape [48*128*128]
|
||||
|
||||
# # Create a new tensor of the required shape [255, 255, 3]
|
||||
# latent_image = flattened_tensor[: 255 * 255 * 3].reshape(255, 255, 3) # Reshape to RGB format
|
||||
|
||||
# # Convert to a NumPy array and then to a PIL Image
|
||||
# image = Image.fromarray(latent_image.cpu().numpy().astype(np.uint8))
|
||||
|
||||
# (width, height) = image.size
|
||||
# width *= 8
|
||||
# height *= 8
|
||||
|
||||
# dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||
|
||||
# # TODO: move this whole function to invocation context to properly reference these variables
|
||||
# context._services.events.emit_invocation_denoise_progress(
|
||||
# context._data.queue_item,
|
||||
# context._data.invocation,
|
||||
# state,
|
||||
# ProgressImage(dataURL=dataURL, width=width, height=height),
|
||||
# )
|
||||
|
||||
x = denoise(
|
||||
model=transformer,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=t5_embeddings,
|
||||
txt_ids=txt_ids,
|
||||
vec=clip_embeddings,
|
||||
timesteps=timesteps,
|
||||
step_callback=step_callback,
|
||||
guidance=self.guidance,
|
||||
)
|
||||
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
|
||||
return x
|
||||
|
||||
def _run_vae_decoding(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
latents: torch.Tensor,
|
||||
) -> Image.Image:
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
latents = latents.to(dtype=TorchDevice.choose_torch_dtype())
|
||||
img = vae.decode(latents)
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
img = rearrange(img[0], "c h w -> h w c")
|
||||
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
||||
|
||||
return img_pil
|
||||
60
invokeai/app/invocations/flux_vae_decode.py
Normal file
60
invokeai/app/invocations/flux_vae_decode.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_vae_decode",
|
||||
title="FLUX Latents to Image",
|
||||
tags=["latents", "image", "vae", "l2i", "flux"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
)
|
||||
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
latents: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype())
|
||||
img = vae.decode(latents)
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
img = rearrange(img[0], "c h w -> h w c") # noqa: F821
|
||||
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
||||
return img_pil
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
image = self._vae_decode(vae_info=vae_info, latents=latents)
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
image_dto = context.images.save(image=image)
|
||||
return ImageOutput.build(image_dto)
|
||||
67
invokeai/app/invocations/flux_vae_encode.py
Normal file
67
invokeai/app/invocations/flux_vae_encode.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import einops
|
||||
import torch
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_vae_encode",
|
||||
title="FLUX Image to Latents",
|
||||
tags=["latents", "image", "vae", "i2l", "flux"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
)
|
||||
class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
"""Encodes an image into latents."""
|
||||
|
||||
image: ImageField = InputField(
|
||||
description="The image to encode.",
|
||||
)
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
# TODO(ryand): Expose seed parameter at the invocation level.
|
||||
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
|
||||
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
|
||||
# should be used for VAE encode sampling.
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
image_tensor = image_tensor.to(
|
||||
device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
|
||||
)
|
||||
latents = vae.encode(image_tensor, sample=True, generator=generator)
|
||||
return latents
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
@@ -126,7 +126,7 @@ class ImageMaskToTensorInvocation(BaseInvocation, WithMetadata):
|
||||
title="Tensor Mask to Image",
|
||||
tags=["mask"],
|
||||
category="mask",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class MaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Convert a mask tensor to an image."""
|
||||
@@ -135,6 +135,11 @@ class MaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
mask = context.tensors.load(self.mask.tensor_name)
|
||||
|
||||
# Squeeze the channel dimension if it exists.
|
||||
if mask.dim() == 3:
|
||||
mask = mask.squeeze(0)
|
||||
|
||||
# Ensure that the mask is binary.
|
||||
if mask.dtype != torch.bool:
|
||||
mask = mask > 0.5
|
||||
|
||||
@@ -157,7 +157,7 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.3",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
@@ -169,23 +169,35 @@ class FluxModelLoaderInvocation(BaseInvocation):
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
ui_type=UIType.T5EncoderModel,
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
model_key = self.model.key
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
if not context.models.exists(model_key):
|
||||
raise ValueError(f"Unknown model: {model_key}")
|
||||
transformer = self._get_model(context, SubModelType.Transformer)
|
||||
tokenizer = self._get_model(context, SubModelType.Tokenizer)
|
||||
tokenizer2 = self._get_model(context, SubModelType.Tokenizer2)
|
||||
clip_encoder = self._get_model(context, SubModelType.TextEncoder)
|
||||
t5_encoder = self._get_model(context, SubModelType.TextEncoder2)
|
||||
vae = self._get_model(context, SubModelType.VAE)
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
@@ -197,52 +209,6 @@ class FluxModelLoaderInvocation(BaseInvocation):
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
def _get_model(self, context: InvocationContext, submodel: SubModelType) -> ModelIdentifierField:
|
||||
match submodel:
|
||||
case SubModelType.Transformer:
|
||||
return self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
case SubModelType.VAE:
|
||||
return self._pull_model_from_mm(
|
||||
context,
|
||||
SubModelType.VAE,
|
||||
"FLUX.1-schnell_ae",
|
||||
ModelType.VAE,
|
||||
BaseModelType.Flux,
|
||||
)
|
||||
case submodel if submodel in [SubModelType.Tokenizer, SubModelType.TextEncoder]:
|
||||
return self._pull_model_from_mm(
|
||||
context,
|
||||
submodel,
|
||||
"clip-vit-large-patch14",
|
||||
ModelType.CLIPEmbed,
|
||||
BaseModelType.Any,
|
||||
)
|
||||
case submodel if submodel in [SubModelType.Tokenizer2, SubModelType.TextEncoder2]:
|
||||
return self._pull_model_from_mm(
|
||||
context,
|
||||
submodel,
|
||||
self.t5_encoder.name,
|
||||
ModelType.T5Encoder,
|
||||
BaseModelType.Any,
|
||||
)
|
||||
case _:
|
||||
raise Exception(f"{submodel.value} is not a supported submodule for a flux model")
|
||||
|
||||
def _pull_model_from_mm(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
submodel: SubModelType,
|
||||
name: str,
|
||||
type: ModelType,
|
||||
base: BaseModelType,
|
||||
):
|
||||
if models := context.models.search_by_attrs(name=name, base=base, type=type):
|
||||
if len(models) != 1:
|
||||
raise Exception(f"Multiple models detected for selected model with name {name}")
|
||||
return ModelIdentifierField.from_config(models[0]).model_copy(update={"submodel_type": submodel})
|
||||
else:
|
||||
raise ValueError(f"Please install the {base}:{type} model named {name} via starter models")
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
|
||||
@@ -88,7 +88,8 @@ class QueueItemEventBase(QueueEventBase):
|
||||
|
||||
item_id: int = Field(description="The ID of the queue item")
|
||||
batch_id: str = Field(description="The ID of the queue batch")
|
||||
origin: str | None = Field(default=None, description="The origin of the batch")
|
||||
origin: str | None = Field(default=None, description="The origin of the queue item")
|
||||
destination: str | None = Field(default=None, description="The destination of the queue item")
|
||||
|
||||
|
||||
class InvocationEventBase(QueueItemEventBase):
|
||||
@@ -114,6 +115,7 @@ class InvocationStartedEvent(InvocationEventBase):
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
origin=queue_item.origin,
|
||||
destination=queue_item.destination,
|
||||
session_id=queue_item.session_id,
|
||||
invocation=invocation,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
@@ -148,6 +150,7 @@ class InvocationDenoiseProgressEvent(InvocationEventBase):
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
origin=queue_item.origin,
|
||||
destination=queue_item.destination,
|
||||
session_id=queue_item.session_id,
|
||||
invocation=invocation,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
@@ -186,6 +189,7 @@ class InvocationCompleteEvent(InvocationEventBase):
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
origin=queue_item.origin,
|
||||
destination=queue_item.destination,
|
||||
session_id=queue_item.session_id,
|
||||
invocation=invocation,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
@@ -219,6 +223,7 @@ class InvocationErrorEvent(InvocationEventBase):
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
origin=queue_item.origin,
|
||||
destination=queue_item.destination,
|
||||
session_id=queue_item.session_id,
|
||||
invocation=invocation,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
@@ -257,6 +262,7 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
origin=queue_item.origin,
|
||||
destination=queue_item.destination,
|
||||
session_id=queue_item.session_id,
|
||||
status=queue_item.status,
|
||||
error_type=queue_item.error_type,
|
||||
|
||||
@@ -103,7 +103,7 @@ class HFModelSource(StringLikeSource):
|
||||
if self.variant:
|
||||
base += f":{self.variant or ''}"
|
||||
if self.subfolder:
|
||||
base += f":{self.subfolder}"
|
||||
base += f"::{self.subfolder.as_posix()}"
|
||||
return base
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByOriginResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
@@ -97,8 +97,8 @@ class SessionQueueBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_by_origin(self, queue_id: str, origin: str) -> CancelByOriginResult:
|
||||
"""Cancels all queue items with the given batch origin"""
|
||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||
"""Cancels all queue items with the given batch destination"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -77,7 +77,14 @@ BatchDataCollection: TypeAlias = list[list[BatchDatum]]
|
||||
|
||||
class Batch(BaseModel):
|
||||
batch_id: str = Field(default_factory=uuid_string, description="The ID of the batch")
|
||||
origin: str | None = Field(default=None, description="The origin of this batch.")
|
||||
origin: str | None = Field(
|
||||
default=None,
|
||||
description="The origin of this queue item. This data is used by the frontend to determine how to handle results.",
|
||||
)
|
||||
destination: str | None = Field(
|
||||
default=None,
|
||||
description="The origin of this queue item. This data is used by the frontend to determine how to handle results",
|
||||
)
|
||||
data: Optional[BatchDataCollection] = Field(default=None, description="The batch data collection.")
|
||||
graph: Graph = Field(description="The graph to initialize the session with")
|
||||
workflow: Optional[WorkflowWithoutID] = Field(
|
||||
@@ -196,7 +203,14 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
status: QUEUE_ITEM_STATUS = Field(default="pending", description="The status of this queue item")
|
||||
priority: int = Field(default=0, description="The priority of this queue item")
|
||||
batch_id: str = Field(description="The ID of the batch associated with this queue item")
|
||||
origin: str | None = Field(default=None, description="The origin of this queue item. ")
|
||||
origin: str | None = Field(
|
||||
default=None,
|
||||
description="The origin of this queue item. This data is used by the frontend to determine how to handle results.",
|
||||
)
|
||||
destination: str | None = Field(
|
||||
default=None,
|
||||
description="The origin of this queue item. This data is used by the frontend to determine how to handle results",
|
||||
)
|
||||
session_id: str = Field(
|
||||
description="The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed."
|
||||
)
|
||||
@@ -297,6 +311,7 @@ class BatchStatus(BaseModel):
|
||||
queue_id: str = Field(..., description="The ID of the queue")
|
||||
batch_id: str = Field(..., description="The ID of the batch")
|
||||
origin: str | None = Field(..., description="The origin of the batch")
|
||||
destination: str | None = Field(..., description="The destination of the batch")
|
||||
pending: int = Field(..., description="Number of queue items with status 'pending'")
|
||||
in_progress: int = Field(..., description="Number of queue items with status 'in_progress'")
|
||||
completed: int = Field(..., description="Number of queue items with status 'complete'")
|
||||
@@ -331,10 +346,10 @@ class CancelByBatchIDsResult(BaseModel):
|
||||
canceled: int = Field(..., description="Number of queue items canceled")
|
||||
|
||||
|
||||
class CancelByOriginResult(BaseModel):
|
||||
"""Result of canceling by list of batch ids"""
|
||||
class CancelByDestinationResult(CancelByBatchIDsResult):
|
||||
"""Result of canceling by a destination"""
|
||||
|
||||
canceled: int = Field(..., description="Number of queue items canceled")
|
||||
pass
|
||||
|
||||
|
||||
class CancelByQueueIDResult(CancelByBatchIDsResult):
|
||||
@@ -443,6 +458,7 @@ class SessionQueueValueToInsert(NamedTuple):
|
||||
priority: int # priority
|
||||
workflow: Optional[str] # workflow json
|
||||
origin: str | None
|
||||
destination: str | None
|
||||
|
||||
|
||||
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
|
||||
@@ -464,6 +480,7 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new
|
||||
priority, # priority
|
||||
json.dumps(workflow, default=to_jsonable_python) if workflow else None, # workflow (json)
|
||||
batch.origin, # origin
|
||||
batch.destination, # destination
|
||||
)
|
||||
)
|
||||
return values_to_insert
|
||||
|
||||
@@ -10,7 +10,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByOriginResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
@@ -128,8 +128,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
self.__cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
@@ -426,19 +426,19 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.release()
|
||||
return CancelByBatchIDsResult(canceled=count)
|
||||
|
||||
def cancel_by_origin(self, queue_id: str, origin: str) -> CancelByOriginResult:
|
||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||
try:
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
self.__lock.acquire()
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
AND origin == ?
|
||||
AND destination == ?
|
||||
AND status != 'canceled'
|
||||
AND status != 'completed'
|
||||
AND status != 'failed'
|
||||
"""
|
||||
params = (queue_id, origin)
|
||||
params = (queue_id, destination)
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
@@ -457,14 +457,14 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
params,
|
||||
)
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.origin == origin:
|
||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelByOriginResult(canceled=count)
|
||||
return CancelByDestinationResult(canceled=count)
|
||||
|
||||
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
||||
try:
|
||||
@@ -579,7 +579,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
session_id,
|
||||
batch_id,
|
||||
queue_id,
|
||||
origin
|
||||
origin,
|
||||
destination
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
"""
|
||||
@@ -659,7 +660,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*), origin
|
||||
SELECT status, count(*), origin, destination
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
@@ -672,6 +673,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
total = sum(row[1] for row in result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in result}
|
||||
origin = result[0]["origin"] if result else None
|
||||
destination = result[0]["destination"] if result else None
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
@@ -681,6 +683,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return BatchStatus(
|
||||
batch_id=batch_id,
|
||||
origin=origin,
|
||||
destination=destination,
|
||||
queue_id=queue_id,
|
||||
pending=counts.get("pending", 0),
|
||||
in_progress=counts.get("in_progress", 0),
|
||||
|
||||
@@ -14,7 +14,7 @@ from invokeai.app.services.image_records.image_records_common import ImageCatego
|
||||
from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.invocation_services import InvocationServices
|
||||
from invokeai.app.services.model_records.model_records_base import UnknownModelException
|
||||
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||
from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
@@ -557,6 +557,24 @@ class UtilInterface(InvocationContextInterface):
|
||||
is_canceled=self.is_canceled,
|
||||
)
|
||||
|
||||
def flux_step_callback(self, intermediate_state: PipelineIntermediateState) -> None:
|
||||
"""
|
||||
The step callback emits a progress event with the current step, the total number of
|
||||
steps, a preview image, and some other internal metadata.
|
||||
|
||||
This should be called after each denoising step.
|
||||
|
||||
Args:
|
||||
intermediate_state: The intermediate state of the diffusion pipeline.
|
||||
"""
|
||||
|
||||
flux_step_callback(
|
||||
context_data=self._data,
|
||||
intermediate_state=intermediate_state,
|
||||
events=self._services.events,
|
||||
is_canceled=self.is_canceled,
|
||||
)
|
||||
|
||||
|
||||
class InvocationContext:
|
||||
"""Provides access to various services and data for the current invocation.
|
||||
|
||||
@@ -10,9 +10,11 @@ class Migration15Callback:
|
||||
def _add_origin_col(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
- Adds `origin` column to the session queue table.
|
||||
- Adds `destination` column to the session queue table.
|
||||
"""
|
||||
|
||||
cursor.execute("ALTER TABLE session_queue ADD COLUMN origin TEXT;")
|
||||
cursor.execute("ALTER TABLE session_queue ADD COLUMN destination TEXT;")
|
||||
|
||||
|
||||
def build_migration_15() -> Migration:
|
||||
@@ -21,6 +23,7 @@ def build_migration_15() -> Migration:
|
||||
|
||||
This migration does the following:
|
||||
- Adds `origin` column to the session queue table.
|
||||
- Adds `destination` column to the session queue table.
|
||||
"""
|
||||
migration_15 = Migration(
|
||||
from_version=14,
|
||||
|
||||
@@ -0,0 +1,407 @@
|
||||
{
|
||||
"name": "FLUX Image to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "A simple image-to-image workflow using a FLUX dev model. ",
|
||||
"version": "1.0.4",
|
||||
"contact": "",
|
||||
"tags": "image2image, flux, image-to-image",
|
||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend using FLUX dev models for image-to-image workflows. The image-to-image performance with FLUX schnell models is poor.",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "t5_encoder_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "clip_embed_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "vae_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"fieldName": "denoising_start"
|
||||
},
|
||||
{
|
||||
"nodeId": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"fieldName": "prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"fieldName": "num_steps"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "2981a67c-480f-4237-9384-26b68dbf912b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "2981a67c-480f-4237-9384-26b68dbf912b",
|
||||
"type": "flux_vae_encode",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"image": {
|
||||
"name": "image",
|
||||
"label": "",
|
||||
"value": {
|
||||
"image_name": "8a5c62aa-9335-45d2-9c71-89af9fc1f8d4.png"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 732.7680166609682,
|
||||
"y": -24.37398171806909
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"type": "flux_denoise",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"name": "denoise_mask",
|
||||
"label": ""
|
||||
},
|
||||
"denoising_start": {
|
||||
"name": "denoising_start",
|
||||
"label": "",
|
||||
"value": 0.04
|
||||
},
|
||||
"denoising_end": {
|
||||
"name": "denoising_end",
|
||||
"label": "",
|
||||
"value": 1
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"positive_text_conditioning": {
|
||||
"name": "positive_text_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"num_steps": {
|
||||
"name": "num_steps",
|
||||
"label": "Steps (Recommend 30 for Dev, 4 for Schnell)",
|
||||
"value": 30
|
||||
},
|
||||
"guidance": {
|
||||
"name": "guidance",
|
||||
"label": "",
|
||||
"value": 4
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1182.8836633018684,
|
||||
"y": -251.38882958913183
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"type": "flux_vae_decode",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1575.5797431839133,
|
||||
"y": -209.00150975507415
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"type": "flux_model_loader",
|
||||
"version": "1.0.4",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "Model (dev variant recommended for Image-to-Image)"
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_embed_model": {
|
||||
"name": "clip_embed_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "fa23a584-b623-415d-832a-21b5098ff1a1",
|
||||
"hash": "blake3:17c19f0ef941c3b7609a9c94a659ca5364de0be364a91d4179f0e39ba17c3b70",
|
||||
"name": "clip-vit-large-patch14",
|
||||
"base": "any",
|
||||
"type": "clip_embed"
|
||||
}
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "74fc82ba-c0a8-479d-a890-2126f82da758",
|
||||
"hash": "blake3:ce21cb76364aa6e2421311cf4a4b5eb052a76c4f1cd207b50703d8978198a068",
|
||||
"name": "FLUX.1-schnell_ae",
|
||||
"base": "flux",
|
||||
"type": "vae"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 328.1809894659957,
|
||||
"y": -90.2241133566946
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"type": "flux_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"clip": {
|
||||
"name": "clip",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"t5_max_seq_len": {
|
||||
"name": "t5_max_seq_len",
|
||||
"label": "T5 Max Seq Len",
|
||||
"value": 256
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": "a cat wearing a birthday hat"
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 745.8823365057267,
|
||||
"y": -299.60249175851914
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 725.834098928012,
|
||||
"y": 496.2710031089931
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-2981a67c-480f-4237-9384-26b68dbf912bheight-ace0258f-67d7-4eee-a218-6fff27065214height",
|
||||
"type": "default",
|
||||
"source": "2981a67c-480f-4237-9384-26b68dbf912b",
|
||||
"target": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"sourceHandle": "height",
|
||||
"targetHandle": "height"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-2981a67c-480f-4237-9384-26b68dbf912bwidth-ace0258f-67d7-4eee-a218-6fff27065214width",
|
||||
"type": "default",
|
||||
"source": "2981a67c-480f-4237-9384-26b68dbf912b",
|
||||
"target": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"sourceHandle": "width",
|
||||
"targetHandle": "width"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-2981a67c-480f-4237-9384-26b68dbf912blatents-ace0258f-67d7-4eee-a218-6fff27065214latents",
|
||||
"type": "default",
|
||||
"source": "2981a67c-480f-4237-9384-26b68dbf912b",
|
||||
"target": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90vae-2981a67c-480f-4237-9384-26b68dbf912bvae",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "2981a67c-480f-4237-9384-26b68dbf912b",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ace0258f-67d7-4eee-a218-6fff27065214latents-7e5172eb-48c1-44db-a770-8fd83e1435d1latents",
|
||||
"type": "default",
|
||||
"source": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-ace0258f-67d7-4eee-a218-6fff27065214seed",
|
||||
"type": "default",
|
||||
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"target": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90transformer-ace0258f-67d7-4eee-a218-6fff27065214transformer",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-ace0258f-67d7-4eee-a218-6fff27065214positive_text_conditioning",
|
||||
"type": "default",
|
||||
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"target": "ace0258f-67d7-4eee-a218-6fff27065214",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_text_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90vae-7e5172eb-48c1-44db-a770-8fd83e1435d1vae",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90max_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "max_seq_len",
|
||||
"targetHandle": "t5_max_seq_len"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90t5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90clip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,27 +1,35 @@
|
||||
{
|
||||
"name": "FLUX Text to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "A simple text-to-image workflow using FLUX dev or schnell models. Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
||||
"version": "1.0.0",
|
||||
"description": "A simple text-to-image workflow using FLUX dev or schnell models.",
|
||||
"version": "1.0.4",
|
||||
"contact": "",
|
||||
"tags": "text2image, flux",
|
||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "t5_encoder_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "clip_embed_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"fieldName": "vae_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"fieldName": "prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"nodeId": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"fieldName": "num_steps"
|
||||
},
|
||||
{
|
||||
"nodeId": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"fieldName": "t5_encoder"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
@@ -30,12 +38,127 @@
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"id": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"id": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"type": "flux_denoise",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"name": "denoise_mask",
|
||||
"label": ""
|
||||
},
|
||||
"denoising_start": {
|
||||
"name": "denoising_start",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"name": "denoising_end",
|
||||
"label": "",
|
||||
"value": 1
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"positive_text_conditioning": {
|
||||
"name": "positive_text_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"num_steps": {
|
||||
"name": "num_steps",
|
||||
"label": "Steps (Recommend 30 for Dev, 4 for Schnell)",
|
||||
"value": 30
|
||||
},
|
||||
"guidance": {
|
||||
"name": "guidance",
|
||||
"label": "",
|
||||
"value": 4
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1186.1868226120378,
|
||||
"y": -214.9459927686657
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"type": "flux_vae_decode",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1575.5797431839133,
|
||||
"y": -209.00150975507415
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"type": "flux_model_loader",
|
||||
"version": "1.0.3",
|
||||
"version": "1.0.4",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
@@ -44,31 +167,25 @@
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "Model (Starter Models can be found in Model Manager)",
|
||||
"value": {
|
||||
"key": "f04a7a2f-c74d-4538-8d5e-879a53501662",
|
||||
"hash": "random:4875da7a9508444ffa706f61961c260d0c6729f6181a86b31fad06df1277b850",
|
||||
"name": "FLUX Dev (Quantized)",
|
||||
"base": "flux",
|
||||
"type": "main"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": "T 5 Encoder (Starter Models can be found in Model Manager)",
|
||||
"value": {
|
||||
"key": "20dcd9ec-5fbb-4012-8401-049e707da5e5",
|
||||
"hash": "random:f986be43ff3502169e4adbdcee158afb0e0a65a1edc4cab16ae59963630cfd8f",
|
||||
"name": "t5_bnb_int8_quantized_encoder",
|
||||
"base": "any",
|
||||
"type": "t5_encoder"
|
||||
}
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_embed_model": {
|
||||
"name": "clip_embed_model",
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 337.09365228062825,
|
||||
"y": 40.63469521079861
|
||||
"x": 381.1882713063478,
|
||||
"y": -95.89663532854017
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -105,8 +222,8 @@
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 824.1970602278849,
|
||||
"y": 146.98251001061735
|
||||
"x": 778.4899149328337,
|
||||
"y": -100.36469216659502
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -135,132 +252,75 @@
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 822.9899179655476,
|
||||
"y": 360.9657214885052
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"type": "flux_text_to_image",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
},
|
||||
"positive_text_conditioning": {
|
||||
"name": "positive_text_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"num_steps": {
|
||||
"name": "num_steps",
|
||||
"label": "Steps (Recommend 30 for Dev, 4 for Schnell)",
|
||||
"value": 30
|
||||
},
|
||||
"guidance": {
|
||||
"name": "guidance",
|
||||
"label": "",
|
||||
"value": 4
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1216.3900791301849,
|
||||
"y": 5.500841807102248
|
||||
"x": 800.9667463219505,
|
||||
"y": 285.8297267547506
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33amax_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90transformer-4fe24f07-f906-4f55-ab2c-9beee56ef5bdtransformer",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-4fe24f07-f906-4f55-ab2c-9beee56ef5bdpositive_text_conditioning",
|
||||
"type": "default",
|
||||
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"target": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_text_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-4fe24f07-f906-4f55-ab2c-9beee56ef5bdseed",
|
||||
"type": "default",
|
||||
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"target": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4fe24f07-f906-4f55-ab2c-9beee56ef5bdlatents-7e5172eb-48c1-44db-a770-8fd83e1435d1latents",
|
||||
"type": "default",
|
||||
"source": "4fe24f07-f906-4f55-ab2c-9beee56ef5bd",
|
||||
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90vae-7e5172eb-48c1-44db-a770-8fd83e1435d1vae",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "7e5172eb-48c1-44db-a770-8fd83e1435d1",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90max_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
|
||||
"type": "default",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "max_seq_len",
|
||||
"targetHandle": "t5_max_seq_len"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33avae-159bdf1b-79e7-4174-b86e-d40e646964c8vae",
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90t5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33atransformer-159bdf1b-79e7-4174-b86e-d40e646964c8transformer",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33at5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33aclip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
|
||||
"id": "reactflow__edge-f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90clip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"source": "f8d9d7c8-9ed7-4bd7-9e42-ab0e89bfac90",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-159bdf1b-79e7-4174-b86e-d40e646964c8positive_text_conditioning",
|
||||
"type": "default",
|
||||
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_text_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-159bdf1b-79e7-4174-b86e-d40e646964c8seed",
|
||||
"type": "default",
|
||||
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -38,6 +38,25 @@ SD1_5_LATENT_RGB_FACTORS = [
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
]
|
||||
|
||||
FLUX_LATENT_RGB_FACTORS = [
|
||||
[-0.0412, 0.0149, 0.0521],
|
||||
[0.0056, 0.0291, 0.0768],
|
||||
[0.0342, -0.0681, -0.0427],
|
||||
[-0.0258, 0.0092, 0.0463],
|
||||
[0.0863, 0.0784, 0.0547],
|
||||
[-0.0017, 0.0402, 0.0158],
|
||||
[0.0501, 0.1058, 0.1152],
|
||||
[-0.0209, -0.0218, -0.0329],
|
||||
[-0.0314, 0.0083, 0.0896],
|
||||
[0.0851, 0.0665, -0.0472],
|
||||
[-0.0534, 0.0238, -0.0024],
|
||||
[0.0452, -0.0026, 0.0048],
|
||||
[0.0892, 0.0831, 0.0881],
|
||||
[-0.1117, -0.0304, -0.0789],
|
||||
[0.0027, -0.0479, -0.0043],
|
||||
[-0.1146, -0.0827, -0.0598],
|
||||
]
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(
|
||||
samples: torch.Tensor, latent_rgb_factors: torch.Tensor, smooth_matrix: Optional[torch.Tensor] = None
|
||||
@@ -94,3 +113,32 @@ def stable_diffusion_step_callback(
|
||||
intermediate_state,
|
||||
ProgressImage(dataURL=dataURL, width=width, height=height),
|
||||
)
|
||||
|
||||
|
||||
def flux_step_callback(
|
||||
context_data: "InvocationContextData",
|
||||
intermediate_state: PipelineIntermediateState,
|
||||
events: "EventServiceBase",
|
||||
is_canceled: Callable[[], bool],
|
||||
) -> None:
|
||||
if is_canceled():
|
||||
raise CanceledException
|
||||
sample = intermediate_state.latents
|
||||
latent_rgb_factors = torch.tensor(FLUX_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
latent_image_perm = sample.permute(1, 2, 0).to(dtype=sample.dtype, device=sample.device)
|
||||
latent_image = latent_image_perm @ latent_rgb_factors
|
||||
latents_ubyte = (
|
||||
((latent_image + 1) / 2).clamp(0, 1).mul(0xFF) # change scale from -1..1 to 0..1 # to 0..255
|
||||
).to(device="cpu", dtype=torch.uint8)
|
||||
image = Image.fromarray(latents_ubyte.cpu().numpy())
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||
|
||||
events.emit_invocation_denoise_progress(
|
||||
context_data.queue_item,
|
||||
context_data.invocation,
|
||||
intermediate_state,
|
||||
ProgressImage(dataURL=dataURL, width=width, height=height),
|
||||
)
|
||||
|
||||
56
invokeai/backend/flux/denoise.py
Normal file
56
invokeai/backend/flux/denoise.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.flux.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
|
||||
|
||||
def denoise(
|
||||
model: Flux,
|
||||
# model input
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[PipelineIntermediateState], None],
|
||||
guidance: float,
|
||||
inpaint_extension: InpaintExtension | None,
|
||||
):
|
||||
step = 0
|
||||
# guidance_vec is ignored for schnell.
|
||||
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
||||
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
|
||||
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
preview_img = img - t_curr * pred
|
||||
img = img + (t_prev - t_curr) * pred
|
||||
|
||||
if inpaint_extension is not None:
|
||||
img = inpaint_extension.merge_intermediate_latents_with_init_latents(img, t_prev)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=step,
|
||||
order=1,
|
||||
total_steps=len(timesteps),
|
||||
timestep=int(t_curr),
|
||||
latents=preview_img,
|
||||
),
|
||||
)
|
||||
step += 1
|
||||
|
||||
return img
|
||||
35
invokeai/backend/flux/inpaint_extension.py
Normal file
35
invokeai/backend/flux/inpaint_extension.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import torch
|
||||
|
||||
|
||||
class InpaintExtension:
|
||||
"""A class for managing inpainting with FLUX."""
|
||||
|
||||
def __init__(self, init_latents: torch.Tensor, inpaint_mask: torch.Tensor, noise: torch.Tensor):
|
||||
"""Initialize InpaintExtension.
|
||||
|
||||
Args:
|
||||
init_latents (torch.Tensor): The initial latents (i.e. un-noised at timestep 0). In 'packed' format.
|
||||
inpaint_mask (torch.Tensor): A mask specifying which elements to inpaint. Range [0, 1]. Values of 1 will be
|
||||
re-generated. Values of 0 will remain unchanged. Values between 0 and 1 can be used to blend the
|
||||
inpainted region with the background. In 'packed' format.
|
||||
noise (torch.Tensor): The noise tensor used to noise the init_latents. In 'packed' format.
|
||||
"""
|
||||
assert init_latents.shape == inpaint_mask.shape == noise.shape
|
||||
self._init_latents = init_latents
|
||||
self._inpaint_mask = inpaint_mask
|
||||
self._noise = noise
|
||||
|
||||
def merge_intermediate_latents_with_init_latents(
|
||||
self, intermediate_latents: torch.Tensor, timestep: float
|
||||
) -> torch.Tensor:
|
||||
"""Merge the intermediate latents with the initial latents for the current timestep using the inpaint mask. I.e.
|
||||
update the intermediate latents to keep the regions that are not being inpainted on the correct noise
|
||||
trajectory.
|
||||
|
||||
This function should be called after each denoising step.
|
||||
"""
|
||||
# Noise the init latents for the current timestep.
|
||||
noised_init_latents = self._noise * timestep + (1.0 - timestep) * self._init_latents
|
||||
|
||||
# Merge the intermediate latents with the noised_init_latents using the inpaint_mask.
|
||||
return intermediate_latents * self._inpaint_mask + noised_init_latents * (1.0 - self._inpaint_mask)
|
||||
@@ -258,16 +258,17 @@ class Decoder(nn.Module):
|
||||
|
||||
|
||||
class DiagonalGaussian(nn.Module):
|
||||
def __init__(self, sample: bool = True, chunk_dim: int = 1):
|
||||
def __init__(self, chunk_dim: int = 1):
|
||||
super().__init__()
|
||||
self.sample = sample
|
||||
self.chunk_dim = chunk_dim
|
||||
|
||||
def forward(self, z: Tensor) -> Tensor:
|
||||
def forward(self, z: Tensor, sample: bool = True, generator: torch.Generator | None = None) -> Tensor:
|
||||
mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim)
|
||||
if self.sample:
|
||||
if sample:
|
||||
std = torch.exp(0.5 * logvar)
|
||||
return mean + std * torch.randn_like(mean)
|
||||
# Unfortunately, torch.randn_like(...) does not accept a generator argument at the time of writing, so we
|
||||
# have to use torch.randn(...) instead.
|
||||
return mean + std * torch.randn(size=mean.size(), generator=generator, dtype=mean.dtype, device=mean.device)
|
||||
else:
|
||||
return mean
|
||||
|
||||
@@ -297,8 +298,21 @@ class AutoEncoder(nn.Module):
|
||||
self.scale_factor = params.scale_factor
|
||||
self.shift_factor = params.shift_factor
|
||||
|
||||
def encode(self, x: Tensor) -> Tensor:
|
||||
z = self.reg(self.encoder(x))
|
||||
def encode(self, x: Tensor, sample: bool = True, generator: torch.Generator | None = None) -> Tensor:
|
||||
"""Run VAE encoding on input tensor x.
|
||||
|
||||
Args:
|
||||
x (Tensor): Input image tensor. Shape: (batch_size, in_channels, height, width).
|
||||
sample (bool, optional): If True, sample from the encoded distribution, else, return the distribution mean.
|
||||
Defaults to True.
|
||||
generator (torch.Generator | None, optional): Optional random number generator for reproducibility.
|
||||
Defaults to None.
|
||||
|
||||
Returns:
|
||||
Tensor: Encoded latent tensor. Shape: (batch_size, z_channels, latent_height, latent_width).
|
||||
"""
|
||||
|
||||
z = self.reg(self.encoder(x), sample=sample, generator=generator)
|
||||
z = self.scale_factor * (z - self.shift_factor)
|
||||
return z
|
||||
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
import math
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
from torch import Tensor
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||
|
||||
|
||||
def get_noise(
|
||||
num_samples: int,
|
||||
height: int,
|
||||
width: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
seed: int,
|
||||
):
|
||||
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
|
||||
rand_device = "cpu"
|
||||
rand_dtype = torch.float16
|
||||
return torch.randn(
|
||||
num_samples,
|
||||
16,
|
||||
# allow for packing
|
||||
2 * math.ceil(height / 16),
|
||||
2 * math.ceil(width / 16),
|
||||
device=rand_device,
|
||||
dtype=rand_dtype,
|
||||
generator=torch.Generator(device=rand_device).manual_seed(seed),
|
||||
).to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
def prepare(t5: HFEncoder, clip: HFEncoder, img: Tensor, prompt: str | list[str]) -> dict[str, Tensor]:
|
||||
bs, c, h, w = img.shape
|
||||
if bs == 1 and not isinstance(prompt, str):
|
||||
bs = len(prompt)
|
||||
|
||||
img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
||||
if img.shape[0] == 1 and bs > 1:
|
||||
img = repeat(img, "1 ... -> bs ...", bs=bs)
|
||||
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3)
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
|
||||
|
||||
if isinstance(prompt, str):
|
||||
prompt = [prompt]
|
||||
txt = t5(prompt)
|
||||
if txt.shape[0] == 1 and bs > 1:
|
||||
txt = repeat(txt, "1 ... -> bs ...", bs=bs)
|
||||
txt_ids = torch.zeros(bs, txt.shape[1], 3)
|
||||
|
||||
vec = clip(prompt)
|
||||
if vec.shape[0] == 1 and bs > 1:
|
||||
vec = repeat(vec, "1 ... -> bs ...", bs=bs)
|
||||
|
||||
return {
|
||||
"img": img,
|
||||
"img_ids": img_ids.to(img.device),
|
||||
"txt": txt.to(img.device),
|
||||
"txt_ids": txt_ids.to(img.device),
|
||||
"vec": vec.to(img.device),
|
||||
}
|
||||
|
||||
|
||||
def time_shift(mu: float, sigma: float, t: Tensor):
|
||||
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
||||
|
||||
|
||||
def get_lin_function(x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15) -> Callable[[float], float]:
|
||||
m = (y2 - y1) / (x2 - x1)
|
||||
b = y1 - m * x1
|
||||
return lambda x: m * x + b
|
||||
|
||||
|
||||
def get_schedule(
|
||||
num_steps: int,
|
||||
image_seq_len: int,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.15,
|
||||
shift: bool = True,
|
||||
) -> list[float]:
|
||||
# extra step for zero
|
||||
timesteps = torch.linspace(1, 0, num_steps + 1)
|
||||
|
||||
# shifting the schedule to favor high timesteps for higher signal images
|
||||
if shift:
|
||||
# eastimate mu based on linear estimation between two points
|
||||
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
|
||||
timesteps = time_shift(mu, 1.0, timesteps)
|
||||
|
||||
return timesteps.tolist()
|
||||
|
||||
|
||||
def denoise(
|
||||
model: Flux,
|
||||
# model input
|
||||
img: Tensor,
|
||||
img_ids: Tensor,
|
||||
txt: Tensor,
|
||||
txt_ids: Tensor,
|
||||
vec: Tensor,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[], None],
|
||||
guidance: float = 4.0,
|
||||
):
|
||||
dtype = model.txt_in.bias.dtype
|
||||
|
||||
# TODO(ryand): This shouldn't be necessary if we manage the dtypes properly in the caller.
|
||||
img = img.to(dtype=dtype)
|
||||
img_ids = img_ids.to(dtype=dtype)
|
||||
txt = txt.to(dtype=dtype)
|
||||
txt_ids = txt_ids.to(dtype=dtype)
|
||||
vec = vec.to(dtype=dtype)
|
||||
|
||||
# this is ignored for schnell
|
||||
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
||||
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
|
||||
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
|
||||
img = img + (t_prev - t_curr) * pred
|
||||
step_callback()
|
||||
|
||||
return img
|
||||
|
||||
|
||||
def unpack(x: Tensor, height: int, width: int) -> Tensor:
|
||||
return rearrange(
|
||||
x,
|
||||
"b (h w) (c ph pw) -> b c (h ph) (w pw)",
|
||||
h=math.ceil(height / 16),
|
||||
w=math.ceil(width / 16),
|
||||
ph=2,
|
||||
pw=2,
|
||||
)
|
||||
|
||||
|
||||
def prepare_latent_img_patches(latent_img: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Convert an input image in latent space to patches for diffusion.
|
||||
|
||||
This implementation was extracted from:
|
||||
https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/sampling.py#L32
|
||||
|
||||
Returns:
|
||||
tuple[Tensor, Tensor]: (img, img_ids), as defined in the original flux repo.
|
||||
"""
|
||||
bs, c, h, w = latent_img.shape
|
||||
|
||||
# Pixel unshuffle with a scale of 2, and flatten the height/width dimensions to get an array of patches.
|
||||
img = rearrange(latent_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
||||
if img.shape[0] == 1 and bs > 1:
|
||||
img = repeat(img, "1 ... -> bs ...", bs=bs)
|
||||
|
||||
# Generate patch position ids.
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3, device=img.device)
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=img.device)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=img.device)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
|
||||
|
||||
return img, img_ids
|
||||
135
invokeai/backend/flux/sampling_utils.py
Normal file
135
invokeai/backend/flux/sampling_utils.py
Normal file
@@ -0,0 +1,135 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
import math
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
|
||||
|
||||
def get_noise(
|
||||
num_samples: int,
|
||||
height: int,
|
||||
width: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
seed: int,
|
||||
):
|
||||
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
|
||||
rand_device = "cpu"
|
||||
rand_dtype = torch.float16
|
||||
return torch.randn(
|
||||
num_samples,
|
||||
16,
|
||||
# allow for packing
|
||||
2 * math.ceil(height / 16),
|
||||
2 * math.ceil(width / 16),
|
||||
device=rand_device,
|
||||
dtype=rand_dtype,
|
||||
generator=torch.Generator(device=rand_device).manual_seed(seed),
|
||||
).to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
def time_shift(mu: float, sigma: float, t: torch.Tensor) -> torch.Tensor:
|
||||
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
||||
|
||||
|
||||
def get_lin_function(x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15) -> Callable[[float], float]:
|
||||
m = (y2 - y1) / (x2 - x1)
|
||||
b = y1 - m * x1
|
||||
return lambda x: m * x + b
|
||||
|
||||
|
||||
def get_schedule(
|
||||
num_steps: int,
|
||||
image_seq_len: int,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.15,
|
||||
shift: bool = True,
|
||||
) -> list[float]:
|
||||
# extra step for zero
|
||||
timesteps = torch.linspace(1, 0, num_steps + 1)
|
||||
|
||||
# shifting the schedule to favor high timesteps for higher signal images
|
||||
if shift:
|
||||
# estimate mu based on linear estimation between two points
|
||||
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
|
||||
timesteps = time_shift(mu, 1.0, timesteps)
|
||||
|
||||
return timesteps.tolist()
|
||||
|
||||
|
||||
def _find_last_index_ge_val(timesteps: list[float], val: float, eps: float = 1e-6) -> int:
|
||||
"""Find the last index in timesteps that is >= val.
|
||||
|
||||
We use epsilon-close equality to avoid potential floating point errors.
|
||||
"""
|
||||
idx = len(list(filter(lambda t: t >= (val - eps), timesteps))) - 1
|
||||
assert idx >= 0
|
||||
return idx
|
||||
|
||||
|
||||
def clip_timestep_schedule(timesteps: list[float], denoising_start: float, denoising_end: float) -> list[float]:
|
||||
"""Clip the timestep schedule to the denoising range.
|
||||
|
||||
Args:
|
||||
timesteps (list[float]): The original timestep schedule: [1.0, ..., 0.0].
|
||||
denoising_start (float): A value in [0, 1] specifying the start of the denoising process. E.g. a value of 0.2
|
||||
would mean that the denoising process start at the last timestep in the schedule >= 0.8.
|
||||
denoising_end (float): A value in [0, 1] specifying the end of the denoising process. E.g. a value of 0.8 would
|
||||
mean that the denoising process end at the last timestep in the schedule >= 0.2.
|
||||
|
||||
Returns:
|
||||
list[float]: The clipped timestep schedule.
|
||||
"""
|
||||
assert 0.0 <= denoising_start <= 1.0
|
||||
assert 0.0 <= denoising_end <= 1.0
|
||||
assert denoising_start <= denoising_end
|
||||
|
||||
t_start_val = 1.0 - denoising_start
|
||||
t_end_val = 1.0 - denoising_end
|
||||
|
||||
t_start_idx = _find_last_index_ge_val(timesteps, t_start_val)
|
||||
t_end_idx = _find_last_index_ge_val(timesteps, t_end_val)
|
||||
|
||||
clipped_timesteps = timesteps[t_start_idx : t_end_idx + 1]
|
||||
|
||||
return clipped_timesteps
|
||||
|
||||
|
||||
def unpack(x: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
||||
"""Unpack flat array of patch embeddings to latent image."""
|
||||
return rearrange(
|
||||
x,
|
||||
"b (h w) (c ph pw) -> b c (h ph) (w pw)",
|
||||
h=math.ceil(height / 16),
|
||||
w=math.ceil(width / 16),
|
||||
ph=2,
|
||||
pw=2,
|
||||
)
|
||||
|
||||
|
||||
def pack(x: torch.Tensor) -> torch.Tensor:
|
||||
"""Pack latent image to flattented array of patch embeddings."""
|
||||
# Pixel unshuffle with a scale of 2, and flatten the height/width dimensions to get an array of patches.
|
||||
return rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
||||
|
||||
|
||||
def generate_img_ids(h: int, w: int, batch_size: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor:
|
||||
"""Generate tensor of image position ids.
|
||||
|
||||
Args:
|
||||
h (int): Height of image in latent space.
|
||||
w (int): Width of image in latent space.
|
||||
batch_size (int): Batch size.
|
||||
device (torch.device): Device.
|
||||
dtype (torch.dtype): dtype.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Image position ids.
|
||||
"""
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3, device=device, dtype=dtype)
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=device, dtype=dtype)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=device, dtype=dtype)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
|
||||
return img_ids
|
||||
@@ -66,12 +66,14 @@ class ModelLoader(ModelLoaderBase):
|
||||
return (model_base / config.path).resolve()
|
||||
|
||||
def _load_and_cache(self, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> ModelLockerBase:
|
||||
stats_name = ":".join([config.base, config.type, config.name, (submodel_type or "")])
|
||||
try:
|
||||
return self._ram_cache.get(config.key, submodel_type)
|
||||
return self._ram_cache.get(config.key, submodel_type, stats_name=stats_name)
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
config.path = str(self._get_model_path(config))
|
||||
self._ram_cache.make_room(self.get_size_fs(config, Path(config.path), submodel_type))
|
||||
loaded_model = self._load_model(config, submodel_type)
|
||||
|
||||
self._ram_cache.put(
|
||||
@@ -83,7 +85,7 @@ class ModelLoader(ModelLoaderBase):
|
||||
return self._ram_cache.get(
|
||||
key=config.key,
|
||||
submodel_type=submodel_type,
|
||||
stats_name=":".join([config.base, config.type, config.name, (submodel_type or "")]),
|
||||
stats_name=stats_name,
|
||||
)
|
||||
|
||||
def get_size_fs(
|
||||
|
||||
@@ -128,7 +128,24 @@ class ModelCacheBase(ABC, Generic[T]):
|
||||
@property
|
||||
@abstractmethod
|
||||
def max_cache_size(self) -> float:
|
||||
"""Return true if the cache is configured to lazily offload models in VRAM."""
|
||||
"""Return the maximum size the RAM cache can grow to."""
|
||||
pass
|
||||
|
||||
@max_cache_size.setter
|
||||
@abstractmethod
|
||||
def max_cache_size(self, value: float) -> None:
|
||||
"""Set the cap on vram cache size."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def max_vram_cache_size(self) -> float:
|
||||
"""Return the maximum size the VRAM cache can grow to."""
|
||||
pass
|
||||
|
||||
@max_vram_cache_size.setter
|
||||
@abstractmethod
|
||||
def max_vram_cache_size(self, value: float) -> float:
|
||||
"""Set the maximum size the VRAM cache can grow to."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@@ -193,15 +210,6 @@ class ModelCacheBase(ABC, Generic[T]):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def exists(
|
||||
self,
|
||||
key: str,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> bool:
|
||||
"""Return true if the model identified by key and submodel_type is in the cache."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cache_size(self) -> int:
|
||||
"""Get the total size of the models currently cached."""
|
||||
|
||||
@@ -1,22 +1,6 @@
|
||||
# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Development team
|
||||
# TODO: Add Stalker's proper name to copyright
|
||||
"""
|
||||
Manage a RAM cache of diffusion/transformer models for fast switching.
|
||||
They are moved between GPU VRAM and CPU RAM as necessary. If the cache
|
||||
grows larger than a preset maximum, then the least recently used
|
||||
model will be cleared and (re)loaded from disk when next needed.
|
||||
|
||||
The cache returns context manager generators designed to load the
|
||||
model into the GPU within the context, and unload outside the
|
||||
context. Use like this:
|
||||
|
||||
cache = ModelCache(max_cache_size=7.5)
|
||||
with cache.get_model('runwayml/stable-diffusion-1-5') as SD1,
|
||||
cache.get_model('stabilityai/stable-diffusion-2') as SD2:
|
||||
do_something_in_GPU(SD1,SD2)
|
||||
|
||||
|
||||
"""
|
||||
""" """
|
||||
|
||||
import gc
|
||||
import math
|
||||
@@ -40,53 +24,74 @@ from invokeai.backend.model_manager.load.model_util import calc_model_size_by_da
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# Maximum size of the cache, in gigs
|
||||
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
|
||||
DEFAULT_MAX_CACHE_SIZE = 6.0
|
||||
|
||||
# amount of GPU memory to hold in reserve for use by generations (GB)
|
||||
DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75
|
||||
|
||||
# actual size of a gig
|
||||
GIG = 1073741824
|
||||
# Size of a GB in bytes.
|
||||
GB = 2**30
|
||||
|
||||
# Size of a MB in bytes.
|
||||
MB = 2**20
|
||||
|
||||
|
||||
class ModelCache(ModelCacheBase[AnyModel]):
|
||||
"""Implementation of ModelCacheBase."""
|
||||
"""A cache for managing models in memory.
|
||||
|
||||
The cache is based on two levels of model storage:
|
||||
- execution_device: The device where most models are executed (typically "cuda", "mps", or "cpu").
|
||||
- storage_device: The device where models are offloaded when not in active use (typically "cpu").
|
||||
|
||||
The model cache is based on the following assumptions:
|
||||
- storage_device_mem_size > execution_device_mem_size
|
||||
- disk_to_storage_device_transfer_time >> storage_device_to_execution_device_transfer_time
|
||||
|
||||
A copy of all models in the cache is always kept on the storage_device. A subset of the models also have a copy on
|
||||
the execution_device.
|
||||
|
||||
Models are moved between the storage_device and the execution_device as necessary. Cache size limits are enforced
|
||||
on both the storage_device and the execution_device. The execution_device cache uses a smallest-first offload
|
||||
policy. The storage_device cache uses a least-recently-used (LRU) offload policy.
|
||||
|
||||
Note: Neither of these offload policies has really been compared against alternatives. It's likely that different
|
||||
policies would be better, although the optimal policies are likely heavily dependent on usage patterns and HW
|
||||
configuration.
|
||||
|
||||
The cache returns context manager generators designed to load the model into the execution device (often GPU) within
|
||||
the context, and unload outside the context.
|
||||
|
||||
Example usage:
|
||||
```
|
||||
cache = ModelCache(max_cache_size=7.5, max_vram_cache_size=6.0)
|
||||
with cache.get_model('runwayml/stable-diffusion-1-5') as SD1:
|
||||
do_something_on_gpu(SD1)
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_cache_size: float = DEFAULT_MAX_CACHE_SIZE,
|
||||
max_vram_cache_size: float = DEFAULT_MAX_VRAM_CACHE_SIZE,
|
||||
max_cache_size: float,
|
||||
max_vram_cache_size: float,
|
||||
execution_device: torch.device = torch.device("cuda"),
|
||||
storage_device: torch.device = torch.device("cpu"),
|
||||
precision: torch.dtype = torch.float16,
|
||||
sequential_offload: bool = False,
|
||||
lazy_offloading: bool = True,
|
||||
sha_chunksize: int = 16777216,
|
||||
log_memory_usage: bool = False,
|
||||
logger: Optional[Logger] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the model RAM cache.
|
||||
|
||||
:param max_cache_size: Maximum size of the RAM cache [6.0 GB]
|
||||
:param max_cache_size: Maximum size of the storage_device cache in GBs.
|
||||
:param max_vram_cache_size: Maximum size of the execution_device cache in GBs.
|
||||
:param execution_device: Torch device to load active model into [torch.device('cuda')]
|
||||
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
|
||||
:param precision: Precision for loaded models [torch.float16]
|
||||
:param lazy_offloading: Keep model in VRAM until another model needs to be loaded
|
||||
:param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially
|
||||
:param log_memory_usage: If True, a memory snapshot will be captured before and after every model cache
|
||||
operation, and the result will be logged (at debug level). There is a time cost to capturing the memory
|
||||
snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's
|
||||
behaviour.
|
||||
:param logger: InvokeAILogger to use (otherwise creates one)
|
||||
"""
|
||||
# allow lazy offloading only when vram cache enabled
|
||||
self._lazy_offloading = lazy_offloading and max_vram_cache_size > 0
|
||||
self._precision: torch.dtype = precision
|
||||
self._max_cache_size: float = max_cache_size
|
||||
self._max_vram_cache_size: float = max_vram_cache_size
|
||||
self._execution_device: torch.device = execution_device
|
||||
@@ -128,6 +133,16 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
"""Set the cap on cache size."""
|
||||
self._max_cache_size = value
|
||||
|
||||
@property
|
||||
def max_vram_cache_size(self) -> float:
|
||||
"""Return the cap on vram cache size."""
|
||||
return self._max_vram_cache_size
|
||||
|
||||
@max_vram_cache_size.setter
|
||||
def max_vram_cache_size(self, value: float) -> None:
|
||||
"""Set the cap on vram cache size."""
|
||||
self._max_vram_cache_size = value
|
||||
|
||||
@property
|
||||
def stats(self) -> Optional[CacheStats]:
|
||||
"""Return collected CacheStats object."""
|
||||
@@ -145,15 +160,6 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
total += cache_record.size
|
||||
return total
|
||||
|
||||
def exists(
|
||||
self,
|
||||
key: str,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> bool:
|
||||
"""Return true if the model identified by key and submodel_type is in the cache."""
|
||||
key = self._make_cache_key(key, submodel_type)
|
||||
return key in self._cached_models
|
||||
|
||||
def put(
|
||||
self,
|
||||
key: str,
|
||||
@@ -203,7 +209,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
# more stats
|
||||
if self.stats:
|
||||
stats_name = stats_name or key
|
||||
self.stats.cache_size = int(self._max_cache_size * GIG)
|
||||
self.stats.cache_size = int(self._max_cache_size * GB)
|
||||
self.stats.high_watermark = max(self.stats.high_watermark, self.cache_size())
|
||||
self.stats.in_cache = len(self._cached_models)
|
||||
self.stats.loaded_model_sizes[stats_name] = max(
|
||||
@@ -231,10 +237,13 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
return model_key
|
||||
|
||||
def offload_unlocked_models(self, size_required: int) -> None:
|
||||
"""Move any unused models from VRAM."""
|
||||
reserved = self._max_vram_cache_size * GIG
|
||||
"""Offload models from the execution_device to make room for size_required.
|
||||
|
||||
:param size_required: The amount of space to clear in the execution_device cache, in bytes.
|
||||
"""
|
||||
reserved = self._max_vram_cache_size * GB
|
||||
vram_in_use = torch.cuda.memory_allocated() + size_required
|
||||
self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM needed for models; max allowed={(reserved/GIG):.2f}GB")
|
||||
self.logger.debug(f"{(vram_in_use/GB):.2f}GB VRAM needed for models; max allowed={(reserved/GB):.2f}GB")
|
||||
for _, cache_entry in sorted(self._cached_models.items(), key=lambda x: x[1].size):
|
||||
if vram_in_use <= reserved:
|
||||
break
|
||||
@@ -245,7 +254,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
cache_entry.loaded = False
|
||||
vram_in_use = torch.cuda.memory_allocated() + size_required
|
||||
self.logger.debug(
|
||||
f"Removing {cache_entry.key} from VRAM to free {(cache_entry.size/GIG):.2f}GB; vram free = {(torch.cuda.memory_allocated()/GIG):.2f}GB"
|
||||
f"Removing {cache_entry.key} from VRAM to free {(cache_entry.size/GB):.2f}GB; vram free = {(torch.cuda.memory_allocated()/GB):.2f}GB"
|
||||
)
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
@@ -303,7 +312,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
self.logger.debug(
|
||||
f"Moved model '{cache_entry.key}' from {source_device} to"
|
||||
f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s."
|
||||
f"Estimated model size: {(cache_entry.size/GIG):.3f} GB."
|
||||
f"Estimated model size: {(cache_entry.size/GB):.3f} GB."
|
||||
f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}"
|
||||
)
|
||||
|
||||
@@ -326,14 +335,14 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
f"Moving model '{cache_entry.key}' from {source_device} to"
|
||||
f" {target_device} caused an unexpected change in VRAM usage. The model's"
|
||||
" estimated size may be incorrect. Estimated model size:"
|
||||
f" {(cache_entry.size/GIG):.3f} GB.\n"
|
||||
f" {(cache_entry.size/GB):.3f} GB.\n"
|
||||
f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}"
|
||||
)
|
||||
|
||||
def print_cuda_stats(self) -> None:
|
||||
"""Log CUDA diagnostics."""
|
||||
vram = "%4.2fG" % (torch.cuda.memory_allocated() / GIG)
|
||||
ram = "%4.2fG" % (self.cache_size() / GIG)
|
||||
vram = "%4.2fG" % (torch.cuda.memory_allocated() / GB)
|
||||
ram = "%4.2fG" % (self.cache_size() / GB)
|
||||
|
||||
in_ram_models = 0
|
||||
in_vram_models = 0
|
||||
@@ -353,17 +362,20 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
)
|
||||
|
||||
def make_room(self, size: int) -> None:
|
||||
"""Make enough room in the cache to accommodate a new model of indicated size."""
|
||||
# calculate how much memory this model will require
|
||||
# multiplier = 2 if self.precision==torch.float32 else 1
|
||||
"""Make enough room in the cache to accommodate a new model of indicated size.
|
||||
|
||||
Note: This function deletes all of the cache's internal references to a model in order to free it. If there are
|
||||
external references to the model, there's nothing that the cache can do about it, and those models will not be
|
||||
garbage-collected.
|
||||
"""
|
||||
bytes_needed = size
|
||||
maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes
|
||||
maximum_size = self.max_cache_size * GB # stored in GB, convert to bytes
|
||||
current_size = self.cache_size()
|
||||
|
||||
if current_size + bytes_needed > maximum_size:
|
||||
self.logger.debug(
|
||||
f"Max cache size exceeded: {(current_size/GIG):.2f}/{self.max_cache_size:.2f} GB, need an additional"
|
||||
f" {(bytes_needed/GIG):.2f} GB"
|
||||
f"Max cache size exceeded: {(current_size/GB):.2f}/{self.max_cache_size:.2f} GB, need an additional"
|
||||
f" {(bytes_needed/GB):.2f} GB"
|
||||
)
|
||||
|
||||
self.logger.debug(f"Before making_room: cached_models={len(self._cached_models)}")
|
||||
@@ -380,7 +392,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
|
||||
if not cache_entry.locked:
|
||||
self.logger.debug(
|
||||
f"Removing {model_key} from RAM cache to free at least {(size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)"
|
||||
f"Removing {model_key} from RAM cache to free at least {(size/GB):.2f} GB (-{(cache_entry.size/GB):.2f} GB)"
|
||||
)
|
||||
current_size -= cache_entry.size
|
||||
models_cleared += 1
|
||||
|
||||
@@ -32,6 +32,9 @@ from invokeai.backend.model_manager.config import (
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.util.model_util import (
|
||||
convert_bundle_to_flux_transformer_checkpoint,
|
||||
)
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
try:
|
||||
@@ -190,6 +193,13 @@ class FluxCheckpointModel(ModelLoader):
|
||||
with SilenceWarnings():
|
||||
model = Flux(params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
|
||||
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
new_sd_size = sum([ten.nelement() * torch.bfloat16.itemsize for ten in sd.values()])
|
||||
self._ram_cache.make_room(new_sd_size)
|
||||
for k in sd.keys():
|
||||
# We need to cast to bfloat16 due to it being the only currently supported dtype for inference
|
||||
sd[k] = sd[k].to(torch.bfloat16)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
@@ -230,5 +240,7 @@ class FluxBnbQuantizednf4bCheckpointModel(ModelLoader):
|
||||
model = Flux(params[config.config_path])
|
||||
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16)
|
||||
sd = load_file(model_path)
|
||||
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
|
||||
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
@@ -108,6 +108,8 @@ class ModelProbe(object):
|
||||
"CLIPVisionModelWithProjection": ModelType.CLIPVision,
|
||||
"T2IAdapter": ModelType.T2IAdapter,
|
||||
"CLIPModel": ModelType.CLIPEmbed,
|
||||
"CLIPTextModel": ModelType.CLIPEmbed,
|
||||
"T5EncoderModel": ModelType.T5Encoder,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@@ -224,7 +226,18 @@ class ModelProbe(object):
|
||||
ckpt = ckpt.get("state_dict", ckpt)
|
||||
|
||||
for key in [str(k) for k in ckpt.keys()]:
|
||||
if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.", "double_blocks.")):
|
||||
if key.startswith(
|
||||
(
|
||||
"cond_stage_model.",
|
||||
"first_stage_model.",
|
||||
"model.diffusion_model.",
|
||||
# FLUX models in the official BFL format contain keys with the "double_blocks." prefix.
|
||||
"double_blocks.",
|
||||
# Some FLUX checkpoint files contain transformer keys prefixed with "model.diffusion_model".
|
||||
# This prefix is typically used to distinguish between multiple models bundled in a single file.
|
||||
"model.diffusion_model.double_blocks.",
|
||||
)
|
||||
):
|
||||
# Keys starting with double_blocks are associated with Flux models
|
||||
return ModelType.Main
|
||||
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
|
||||
@@ -283,9 +296,16 @@ class ModelProbe(object):
|
||||
if (folder_path / "image_encoder.txt").exists():
|
||||
return ModelType.IPAdapter
|
||||
|
||||
i = folder_path / "model_index.json"
|
||||
c = folder_path / "config.json"
|
||||
config_path = i if i.exists() else c if c.exists() else None
|
||||
config_path = None
|
||||
for p in [
|
||||
folder_path / "model_index.json", # pipeline
|
||||
folder_path / "config.json", # most diffusers
|
||||
folder_path / "text_encoder_2" / "config.json", # T5 text encoder
|
||||
folder_path / "text_encoder" / "config.json", # T5 CLIP
|
||||
]:
|
||||
if p.exists():
|
||||
config_path = p
|
||||
break
|
||||
|
||||
if config_path:
|
||||
with open(config_path, "r") as file:
|
||||
@@ -328,7 +348,10 @@ class ModelProbe(object):
|
||||
# TODO: Decide between dev/schnell
|
||||
checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
|
||||
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||
if "guidance_in.out_layer.weight" in state_dict:
|
||||
if (
|
||||
"guidance_in.out_layer.weight" in state_dict
|
||||
or "model.diffusion_model.guidance_in.out_layer.weight" in state_dict
|
||||
):
|
||||
# For flux, this is a key in invokeai.backend.flux.util.params
|
||||
# Due to model type and format being the descriminator for model configs this
|
||||
# is used rather than attempting to support flux with separate model types and format
|
||||
@@ -336,7 +359,7 @@ class ModelProbe(object):
|
||||
config_file = "flux-dev"
|
||||
else:
|
||||
# For flux, this is a key in invokeai.backend.flux.util.params
|
||||
# Due to model type and format being the descriminator for model configs this
|
||||
# Due to model type and format being the discriminator for model configs this
|
||||
# is used rather than attempting to support flux with separate model types and format
|
||||
# If changed in the future, please fix me
|
||||
config_file = "flux-schnell"
|
||||
@@ -443,7 +466,10 @@ class CheckpointProbeBase(ProbeBase):
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
|
||||
if "double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict:
|
||||
if (
|
||||
"double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict
|
||||
or "model.diffusion_model.double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict
|
||||
):
|
||||
return ModelFormat.BnbQuantizednf4b
|
||||
return ModelFormat("checkpoint")
|
||||
|
||||
@@ -470,7 +496,10 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
state_dict = self.checkpoint.get("state_dict") or checkpoint
|
||||
if "double_blocks.0.img_attn.norm.key_norm.scale" in state_dict:
|
||||
if (
|
||||
"double_blocks.0.img_attn.norm.key_norm.scale" in state_dict
|
||||
or "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in state_dict
|
||||
):
|
||||
return BaseModelType.Flux
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 768:
|
||||
@@ -747,8 +776,27 @@ class TextualInversionFolderProbe(FolderProbeBase):
|
||||
|
||||
|
||||
class T5EncoderFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
return BaseModelType.Any
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat.T5Encoder
|
||||
path = self.model_path / "text_encoder_2"
|
||||
if (path / "model.safetensors.index.json").exists():
|
||||
return ModelFormat.T5Encoder
|
||||
files = list(path.glob("*.safetensors"))
|
||||
if len(files) == 0:
|
||||
raise InvalidModelConfigException(f"{self.model_path.as_posix()}: no .safetensors files found")
|
||||
|
||||
# shortcut: look for the quantization in the name
|
||||
if any(x for x in files if "llm_int8" in x.as_posix()):
|
||||
return ModelFormat.BnbQuantizedLlmInt8b
|
||||
|
||||
# more reliable path: probe contents for a 'SCB' key
|
||||
ckpt = read_checkpoint_meta(files[0], scan=True)
|
||||
if any("SCB" in x for x in ckpt.keys()):
|
||||
return ModelFormat.BnbQuantizedLlmInt8b
|
||||
|
||||
raise InvalidModelConfigException(f"{self.model_path.as_posix()}: unknown model format")
|
||||
|
||||
|
||||
class ONNXFolderProbe(PipelineFolderProbe):
|
||||
|
||||
@@ -133,3 +133,29 @@ def lora_token_vector_length(checkpoint: Dict[str, torch.Tensor]) -> Optional[in
|
||||
break
|
||||
|
||||
return lora_token_vector_length
|
||||
|
||||
|
||||
def convert_bundle_to_flux_transformer_checkpoint(
|
||||
transformer_state_dict: dict[str, torch.Tensor],
|
||||
) -> dict[str, torch.Tensor]:
|
||||
original_state_dict: dict[str, torch.Tensor] = {}
|
||||
keys_to_remove: list[str] = []
|
||||
|
||||
for k, v in transformer_state_dict.items():
|
||||
if not k.startswith("model.diffusion_model"):
|
||||
keys_to_remove.append(k) # This can be removed in the future if we only want to delete transformer keys
|
||||
continue
|
||||
if k.endswith("scale"):
|
||||
# Scale math must be done at bfloat16 due to our current flux model
|
||||
# support limitations at inference time
|
||||
v = v.to(dtype=torch.bfloat16)
|
||||
new_key = k.replace("model.diffusion_model.", "")
|
||||
original_state_dict[new_key] = v
|
||||
keys_to_remove.append(k)
|
||||
|
||||
# Remove processed keys from the original dictionary, leaving others in case
|
||||
# other model state dicts need to be pulled
|
||||
for k in keys_to_remove:
|
||||
del transformer_state_dict[k]
|
||||
|
||||
return original_state_dict
|
||||
|
||||
@@ -54,8 +54,10 @@ class InvokeLinear8bitLt(bnb.nn.Linear8bitLt):
|
||||
|
||||
# See `bnb.nn.Linear8bitLt._save_to_state_dict()` for the serialization logic of SCB and weight_format.
|
||||
scb = state_dict.pop(prefix + "SCB", None)
|
||||
# weight_format is unused, but we pop it so we can validate that there are no unexpected keys.
|
||||
_weight_format = state_dict.pop(prefix + "weight_format", None)
|
||||
|
||||
# Currently, we only support weight_format=0.
|
||||
weight_format = state_dict.pop(prefix + "weight_format", None)
|
||||
assert weight_format == 0
|
||||
|
||||
# TODO(ryand): Technically, we should be using `strict`, `missing_keys`, `unexpected_keys`, and `error_msgs`
|
||||
# rather than raising an exception to correctly implement this API.
|
||||
@@ -89,6 +91,14 @@ class InvokeLinear8bitLt(bnb.nn.Linear8bitLt):
|
||||
)
|
||||
self.bias = bias if bias is None else torch.nn.Parameter(bias)
|
||||
|
||||
# Reset the state. The persisted fields are based on the initialization behaviour in
|
||||
# `bnb.nn.Linear8bitLt.__init__()`.
|
||||
new_state = bnb.MatmulLtState()
|
||||
new_state.threshold = self.state.threshold
|
||||
new_state.has_fp16_weights = False
|
||||
new_state.use_pool = self.state.use_pool
|
||||
self.state = new_state
|
||||
|
||||
|
||||
def _convert_linear_layers_to_llm_8bit(
|
||||
module: torch.nn.Module, ignore_modules: set[str], outlier_threshold: float, prefix: str = ""
|
||||
|
||||
@@ -43,6 +43,11 @@ class FLUXConditioningInfo:
|
||||
clip_embeds: torch.Tensor
|
||||
t5_embeds: torch.Tensor
|
||||
|
||||
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
|
||||
self.clip_embeds = self.clip_embeds.to(device=device, dtype=dtype)
|
||||
self.t5_embeds = self.t5_embeds.to(device=device, dtype=dtype)
|
||||
return self
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConditioningFieldData:
|
||||
|
||||
@@ -3,10 +3,9 @@ Initialization file for invokeai.backend.util
|
||||
"""
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.backend.util.util import GIG, Chdir, directory_size
|
||||
from invokeai.backend.util.util import Chdir, directory_size
|
||||
|
||||
__all__ = [
|
||||
"GIG",
|
||||
"directory_size",
|
||||
"Chdir",
|
||||
"InvokeAILogger",
|
||||
|
||||
@@ -7,9 +7,6 @@ from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
|
||||
# actual size of a gig
|
||||
GIG = 1073741824
|
||||
|
||||
|
||||
def slugify(value: str, allow_unicode: bool = False) -> str:
|
||||
"""
|
||||
|
||||
@@ -16,6 +16,14 @@ module.exports = {
|
||||
'no-promise-executor-return': 'error',
|
||||
// https://eslint.org/docs/latest/rules/require-await
|
||||
'require-await': 'error',
|
||||
'no-restricted-properties': [
|
||||
'error',
|
||||
{
|
||||
object: 'crypto',
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
],
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
|
||||
@@ -11,6 +11,8 @@ const config: KnipConfig = {
|
||||
'src/features/nodes/types/v2/**',
|
||||
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.20",
|
||||
"@invoke-ai/ui-library": "^0.0.32",
|
||||
"@invoke-ai/ui-library": "^0.0.34",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
@@ -92,7 +92,7 @@
|
||||
"react-i18next": "^14.1.3",
|
||||
"react-icons": "^5.2.1",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.0.23",
|
||||
"react-resizable-panels": "^2.1.2",
|
||||
"react-use": "^17.5.1",
|
||||
"react-virtuoso": "^4.9.0",
|
||||
"reactflow": "^11.11.4",
|
||||
@@ -136,6 +136,7 @@
|
||||
"@vitest/coverage-v8": "^1.5.0",
|
||||
"@vitest/ui": "^1.5.0",
|
||||
"concurrently": "^8.2.2",
|
||||
"csstype": "^3.1.3",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-plugin-i18next": "^6.0.9",
|
||||
|
||||
65
invokeai/frontend/web/pnpm-lock.yaml
generated
65
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.0.20
|
||||
version: 5.0.20
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.32
|
||||
version: 0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.34
|
||||
version: 0.0.34(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.2)(react@18.3.1)
|
||||
@@ -126,8 +126,8 @@ dependencies:
|
||||
specifier: 9.1.2
|
||||
version: 9.1.2(@types/react@18.3.3)(react@18.3.1)(redux@5.0.1)
|
||||
react-resizable-panels:
|
||||
specifier: ^2.0.23
|
||||
version: 2.0.23(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^2.1.2
|
||||
version: 2.1.2(react-dom@18.3.1)(react@18.3.1)
|
||||
react-use:
|
||||
specifier: ^17.5.1
|
||||
version: 17.5.1(react-dom@18.3.1)(react@18.3.1)
|
||||
@@ -238,6 +238,9 @@ devDependencies:
|
||||
concurrently:
|
||||
specifier: ^8.2.2
|
||||
version: 8.2.2
|
||||
csstype:
|
||||
specifier: ^3.1.3
|
||||
version: 3.1.3
|
||||
dpdm:
|
||||
specifier: ^3.14.0
|
||||
version: 3.14.0
|
||||
@@ -2053,7 +2056,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/dom-utils': 2.1.0
|
||||
react: 18.3.1
|
||||
react-focus-lock: 2.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
react-focus-lock: 2.13.2(@types/react@18.3.3)(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- '@types/react'
|
||||
dev: false
|
||||
@@ -2250,7 +2253,7 @@ packages:
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
react-remove-scroll: 2.5.10(@types/react@18.3.3)(react@18.3.1)
|
||||
react-remove-scroll: 2.6.0(@types/react@18.3.3)(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- '@types/react'
|
||||
dev: false
|
||||
@@ -3571,8 +3574,8 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-JxAoblrDu/cZ4ha9KO4ry5OWvyLUE1Dj28i+ciMaDNUpC/cN+IyiTbUBoFoPaoN5JP8Zpd/MYCcmF2qsziHDzg==}
|
||||
/@invoke-ai/ui-library@0.0.34(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-iDSjFQV2U4LfQ8+UdZ9Uy6J1iKKTSsXM0uhkWrwcIghbgN5QwY3ABVLhqJrSWVTwp7puEDhe/lRQ9QhTZBkVzw==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
@@ -10008,8 +10011,8 @@ packages:
|
||||
resolution: {integrity: sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==}
|
||||
dev: false
|
||||
|
||||
/react-focus-lock@2.13.0(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-w7aIcTwZwNzUp2fYQDMICy+khFwVmKmOrLF8kNsPS+dz4Oi/oxoVJ2wCMVvX6rWGriM/+mYaTyp1MRmkcs2amw==}
|
||||
/react-focus-lock@2.13.2(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-T/7bsofxYqnod2xadvuwjGKHOoL5GH7/EIPI5UyEvaU/c2CcphvGI371opFtuY/SYdbMsNiuF4HsHQ50nA/TKQ==}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
@@ -10144,25 +10147,6 @@ packages:
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/react-remove-scroll@2.5.10(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-m3zvBRANPBw3qxVVjEIPEQinkcwlFZ4qyomuWVpNJdv4c6MvHfXV0C3L9Jx5rr3HeBHKNRX+1jreB5QloDIJjA==}
|
||||
engines: {node: '>=10'}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
peerDependenciesMeta:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@types/react': 18.3.3
|
||||
react: 18.3.1
|
||||
react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1)
|
||||
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1)
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
|
||||
/react-remove-scroll@2.5.5(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==}
|
||||
engines: {node: '>=10'}
|
||||
@@ -10182,8 +10166,27 @@ packages:
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
|
||||
/react-resizable-panels@2.0.23(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-8ZKTwTU11t/FYwiwhMdtZYYyFxic5U5ysRu2YwfkAgDbUJXFvnWSJqhnzkSlW+mnDoNAzDCrJhdOSXBPA76wug==}
|
||||
/react-remove-scroll@2.6.0(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==}
|
||||
engines: {node: '>=10'}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
peerDependenciesMeta:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@types/react': 18.3.3
|
||||
react: 18.3.1
|
||||
react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1)
|
||||
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1)
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
|
||||
/react-resizable-panels@2.1.2(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-Ku2Bo7JvE8RpHhl4X1uhkdeT9auPBoxAOlGTqomDUUrBAX2mVGuHYZTcWvlnJSgx0QyHIxHECgGB5XVPUbUOkQ==}
|
||||
peerDependencies:
|
||||
react: ^16.14.0 || ^17.0.0 || ^18.0.0
|
||||
react-dom: ^16.14.0 || ^17.0.0 || ^18.0.0
|
||||
|
||||
@@ -127,7 +127,14 @@
|
||||
"bulkDownloadRequestedDesc": "Dein Download wird vorbereitet. Dies kann ein paar Momente dauern.",
|
||||
"bulkDownloadRequestFailed": "Problem beim Download vorbereiten",
|
||||
"bulkDownloadFailed": "Download fehlgeschlagen",
|
||||
"alwaysShowImageSizeBadge": "Zeige immer Bilder Größe Abzeichen"
|
||||
"alwaysShowImageSizeBadge": "Zeige immer Bilder Größe Abzeichen",
|
||||
"selectForCompare": "Zum Vergleichen auswählen",
|
||||
"compareImage": "Bilder vergleichen",
|
||||
"exitSearch": "Suche beenden",
|
||||
"newestFirst": "Neueste zuerst",
|
||||
"oldestFirst": "Älteste zuerst",
|
||||
"openInViewer": "Im Viewer öffnen",
|
||||
"swapImages": "Bilder tauschen"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
@@ -631,7 +638,8 @@
|
||||
"archived": "Archiviert",
|
||||
"noBoards": "Kein {boardType}} Ordner",
|
||||
"hideBoards": "Ordner verstecken",
|
||||
"viewBoards": "Ordner ansehen"
|
||||
"viewBoards": "Ordner ansehen",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt."
|
||||
},
|
||||
"controlnet": {
|
||||
"showAdvanced": "Zeige Erweitert",
|
||||
@@ -781,7 +789,9 @@
|
||||
"batchFieldValues": "Stapelverarbeitungswerte",
|
||||
"batchQueued": "Stapelverarbeitung eingereiht",
|
||||
"graphQueued": "Graph eingereiht",
|
||||
"graphFailedToQueue": "Fehler beim Einreihen des Graphen"
|
||||
"graphFailedToQueue": "Fehler beim Einreihen des Graphen",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generationen"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@@ -1146,5 +1156,10 @@
|
||||
"noMatchingTriggers": "Keine passenden Trigger",
|
||||
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"queue": "Warteschlange"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +93,7 @@
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"on": "On",
|
||||
"off": "Off",
|
||||
"or": "or",
|
||||
"checkpoint": "Checkpoint",
|
||||
"communityLabel": "Community",
|
||||
@@ -134,6 +135,7 @@
|
||||
"nodes": "Workflows",
|
||||
"notInstalled": "Not $t(common.installed)",
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"openInViewer": "Open in Viewer",
|
||||
"orderBy": "Order By",
|
||||
"outpaint": "outpaint",
|
||||
"outputs": "Outputs",
|
||||
@@ -164,10 +166,10 @@
|
||||
"alpha": "Alpha",
|
||||
"selected": "Selected",
|
||||
"tab": "Tab",
|
||||
"viewing": "Viewing",
|
||||
"viewingDesc": "Review images in a large gallery view",
|
||||
"editing": "Editing",
|
||||
"editingDesc": "Edit on the Control Layers canvas",
|
||||
"view": "View",
|
||||
"viewDesc": "Review images in a large gallery view",
|
||||
"edit": "Edit",
|
||||
"editDesc": "Edit on the Canvas",
|
||||
"comparing": "Comparing",
|
||||
"comparingDesc": "Comparing two images",
|
||||
"enabled": "Enabled",
|
||||
@@ -328,9 +330,13 @@
|
||||
"completedIn": "Completed in",
|
||||
"batch": "Batch",
|
||||
"origin": "Origin",
|
||||
"originCanvas": "Canvas",
|
||||
"originWorkflows": "Workflows",
|
||||
"originOther": "Other",
|
||||
"destination": "Destination",
|
||||
"upscaling": "Upscaling",
|
||||
"canvas": "Canvas",
|
||||
"generation": "Generation",
|
||||
"workflows": "Workflows",
|
||||
"other": "Other",
|
||||
"gallery": "Gallery",
|
||||
"batchFieldValues": "Batch Field Values",
|
||||
"item": "Item",
|
||||
"session": "Session",
|
||||
@@ -369,6 +375,7 @@
|
||||
"useCache": "Use Cache"
|
||||
},
|
||||
"gallery": {
|
||||
"gallery": "Gallery",
|
||||
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
|
||||
"assets": "Assets",
|
||||
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
|
||||
@@ -381,11 +388,11 @@
|
||||
"deleteImage_one": "Delete Image",
|
||||
"deleteImage_other": "Delete {{count}} Images",
|
||||
"deleteImagePermanent": "Deleted images cannot be restored.",
|
||||
"displayBoardSearch": "Display Board Search",
|
||||
"displaySearch": "Display Search",
|
||||
"displayBoardSearch": "Board Search",
|
||||
"displaySearch": "Image Search",
|
||||
"download": "Download",
|
||||
"exitBoardSearch": "Exit Board Search",
|
||||
"exitSearch": "Exit Search",
|
||||
"exitSearch": "Exit Image Search",
|
||||
"featuresWillReset": "If you delete this image, those features will immediately be reset.",
|
||||
"galleryImageSize": "Image Size",
|
||||
"gallerySettings": "Gallery Settings",
|
||||
@@ -431,7 +438,8 @@
|
||||
"compareHelp1": "Hold <Kbd>Alt</Kbd> while clicking a gallery image or using the arrow keys to change the compare image.",
|
||||
"compareHelp2": "Press <Kbd>M</Kbd> to cycle through comparison modes.",
|
||||
"compareHelp3": "Press <Kbd>C</Kbd> to swap the compared images.",
|
||||
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit."
|
||||
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit.",
|
||||
"toggleMiniViewer": "Toggle Mini Viewer"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Search Hotkeys",
|
||||
@@ -702,6 +710,8 @@
|
||||
"availableModels": "Available Models",
|
||||
"baseModel": "Base Model",
|
||||
"cancel": "Cancel",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"clipVision": "CLIP Vision",
|
||||
"config": "Config",
|
||||
"convert": "Convert",
|
||||
"convertingModelBegin": "Converting Model. Please wait.",
|
||||
@@ -789,6 +799,7 @@
|
||||
"settings": "Settings",
|
||||
"simpleModelPlaceholder": "URL or path to a local file or diffusers folder",
|
||||
"source": "Source",
|
||||
"spandrelImageToImage": "Image to Image (Spandrel)",
|
||||
"starterModels": "Starter Models",
|
||||
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
|
||||
"syncModels": "Sync Models",
|
||||
@@ -797,6 +808,7 @@
|
||||
"loraTriggerPhrases": "LoRA Trigger Phrases",
|
||||
"mainModelTriggerPhrases": "Main Model Trigger Phrases",
|
||||
"typePhraseHere": "Type phrase here",
|
||||
"t5Encoder": "T5 Encoder",
|
||||
"upcastAttention": "Upcast Attention",
|
||||
"uploadImage": "Upload Image",
|
||||
"urlOrLocalPath": "URL or Local Path",
|
||||
@@ -1006,6 +1018,8 @@
|
||||
"noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.",
|
||||
"incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.",
|
||||
"noModelSelected": "No model selected",
|
||||
"canvasManagerNotLoaded": "Canvas Manager not loaded",
|
||||
"canvasBusy": "Canvas is busy",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
@@ -1037,12 +1051,11 @@
|
||||
"scaledHeight": "Scaled H",
|
||||
"scaledWidth": "Scaled W",
|
||||
"scheduler": "Scheduler",
|
||||
"seamlessXAxis": "Seamless Tiling X Axis",
|
||||
"seamlessYAxis": "Seamless Tiling Y Axis",
|
||||
"seamlessXAxis": "Seamless X Axis",
|
||||
"seamlessYAxis": "Seamless Y Axis",
|
||||
"seed": "Seed",
|
||||
"imageActions": "Image Actions",
|
||||
"sendToImg2Img": "Send to Image to Image",
|
||||
"sendToUnifiedCanvas": "Send To Unified Canvas",
|
||||
"sendToCanvas": "Send To Canvas",
|
||||
"sendToUpscale": "Send To Upscale",
|
||||
"showOptionsPanel": "Show Side Panel (O or T)",
|
||||
"shuffle": "Shuffle Seed",
|
||||
@@ -1183,8 +1196,8 @@
|
||||
"problemSavingMaskDesc": "Unable to export mask",
|
||||
"prunedQueue": "Pruned Queue",
|
||||
"resetInitialImage": "Reset Initial Image",
|
||||
"sentToImageToImage": "Sent To Image To Image",
|
||||
"sentToUnifiedCanvas": "Sent to Unified Canvas",
|
||||
"sentToCanvas": "Sent to Canvas",
|
||||
"sentToUpscale": "Sent to Upscale",
|
||||
"serverError": "Server Error",
|
||||
"sessionRef": "Session: {{sessionId}}",
|
||||
"setAsCanvasInitialImage": "Set as canvas initial image",
|
||||
@@ -1646,6 +1659,20 @@
|
||||
"storeNotInitialized": "Store is not initialized"
|
||||
},
|
||||
"controlLayers": {
|
||||
"bookmark": "Bookmark for Quick Switch",
|
||||
"fitBboxToLayers": "Fit Bbox To Layers",
|
||||
"removeBookmark": "Remove Bookmark",
|
||||
"saveCanvasToGallery": "Save Canvas to Gallery",
|
||||
"saveBboxToGallery": "Save Bbox to Gallery",
|
||||
"sendBboxToRegionalIPAdapter": "Send Bbox to Regional IP Adapter",
|
||||
"sendBboxToGlobalIPAdapter": "Send Bbox to Global IP Adapter",
|
||||
"sendBboxToControlLayer": "Send Bbox to Control Layer",
|
||||
"sendBboxToRasterLayer": "Send Bbox to Raster Layer",
|
||||
"savedToGalleryOk": "Saved to Gallery",
|
||||
"savedToGalleryError": "Error saving to gallery",
|
||||
"mergeVisible": "Merge Visible",
|
||||
"mergeVisibleOk": "Merged visible layers",
|
||||
"mergeVisibleError": "Error merging visible layers",
|
||||
"clearHistory": "Clear History",
|
||||
"generateMode": "Generate",
|
||||
"generateModeDesc": "Create individual images. Generated images are added directly to the gallery.",
|
||||
@@ -1657,6 +1684,7 @@
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
"compositeMaskedRegions": "Composite Masked Regions",
|
||||
"addLayer": "Add Layer",
|
||||
"duplicate": "Duplicate",
|
||||
"moveToFront": "Move to Front",
|
||||
@@ -1675,35 +1703,49 @@
|
||||
"deletePrompt": "Delete Prompt",
|
||||
"resetRegion": "Reset Region",
|
||||
"debugLayers": "Debug Layers",
|
||||
"showHUD": "Show HUD",
|
||||
"rectangle": "Rectangle",
|
||||
"maskFill": "Mask Fill",
|
||||
"addPositivePrompt": "Add $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Add $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Add $t(common.ipAdapter)",
|
||||
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
|
||||
"addControlLayer": "Add $t(controlLayers.controlLayer)",
|
||||
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
|
||||
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"raster": "Raster",
|
||||
"rasterLayer_one": "Raster Layer",
|
||||
"controlLayer_one": "Control Layer",
|
||||
"inpaintMask_one": "Inpaint Mask",
|
||||
"regionalGuidance_one": "Regional Guidance",
|
||||
"ipAdapter_one": "IP Adapter",
|
||||
"rasterLayer_other": "Raster Layers",
|
||||
"controlLayer_other": "Control Layers",
|
||||
"inpaintMask_other": "Inpaint Masks",
|
||||
"regionalGuidance_other": "Regional Guidance",
|
||||
"ipAdapter_other": "IP Adapters",
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"sendingToCanvas": "Sending to Canvas",
|
||||
"sendingToGallery": "Sending to Gallery",
|
||||
"sendToGallery": "Send To Gallery",
|
||||
"sendToGalleryDesc": "Generations will be sent to the gallery.",
|
||||
"sendToCanvas": "Send To Canvas",
|
||||
"sendToCanvasDesc": "Generations will be staged onto the canvas.",
|
||||
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
|
||||
"controlLayer_withCount_one": "$t(controlLayers.controlLayer)",
|
||||
"inpaintMask_withCount_one": "$t(controlLayers.inpaintMask)",
|
||||
"regionalGuidance_withCount_one": "$t(controlLayers.regionalGuidance)",
|
||||
"ipAdapter_withCount_one": "$t(controlLayers.ipAdapter)",
|
||||
"rasterLayer_withCount_other": "Raster Layers",
|
||||
"controlLayer_withCount_other": "Control Layers",
|
||||
"inpaintMask_withCount_other": "Inpaint Masks",
|
||||
"regionalGuidance_withCount_other": "Regional Guidance",
|
||||
"ipAdapter_withCount_other": "IP Adapters",
|
||||
"opacity": "Opacity",
|
||||
"regionalGuidance_withCount_hidden": "Regional Guidance ({{count}} hidden)",
|
||||
"controlAdapters_withCount_hidden": "Control Adapters ({{count}} hidden)",
|
||||
"controlLayers_withCount_hidden": "Control Layers ({{count}} hidden)",
|
||||
"rasterLayers_withCount_hidden": "Raster Layers ({{count}} hidden)",
|
||||
"ipAdapters_withCount_hidden": "IP Adapters ({{count}} hidden)",
|
||||
"globalIPAdapters_withCount_hidden": "Global IP Adapters ({{count}} hidden)",
|
||||
"inpaintMasks_withCount_hidden": "Inpaint Masks ({{count}} hidden)",
|
||||
"regionalGuidance_withCount_visible": "Regional Guidance ({{count}})",
|
||||
"controlAdapters_withCount_visible": "Control Adapters ({{count}})",
|
||||
"controlLayers_withCount_visible": "Control Layers ({{count}})",
|
||||
"rasterLayers_withCount_visible": "Raster Layers ({{count}})",
|
||||
"ipAdapters_withCount_visible": "IP Adapters ({{count}})",
|
||||
"globalIPAdapters_withCount_visible": "Global IP Adapters ({{count}})",
|
||||
"inpaintMasks_withCount_visible": "Inpaint Masks ({{count}})",
|
||||
"globalControlAdapter": "Global $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
@@ -1716,8 +1758,10 @@
|
||||
"clearProcessor": "Clear Processor",
|
||||
"resetProcessor": "Reset Processor to Defaults",
|
||||
"noLayersAdded": "No Layers Added",
|
||||
"layers_one": "Layer",
|
||||
"layers_other": "Layers",
|
||||
"layer_one": "Layer",
|
||||
"layer_other": "Layers",
|
||||
"layer_withCount_one": "Layer ({{count}})",
|
||||
"layer_withCount_other": "Layers ({{count}})",
|
||||
"objects_zero": "empty",
|
||||
"objects_one": "{{count}} object",
|
||||
"objects_other": "{{count}} objects",
|
||||
@@ -1737,6 +1781,7 @@
|
||||
"flipHorizontal": "Flip Horizontal",
|
||||
"flipVertical": "Flip Vertical",
|
||||
"fill": {
|
||||
"fillColor": "Fill Color",
|
||||
"fillStyle": "Fill Style",
|
||||
"solid": "Solid",
|
||||
"grid": "Grid",
|
||||
@@ -1752,16 +1797,54 @@
|
||||
"bbox": "Bbox",
|
||||
"move": "Move",
|
||||
"view": "View",
|
||||
"transform": "Transform",
|
||||
"colorPicker": "Color Picker"
|
||||
},
|
||||
"filter": {
|
||||
"filter": "Filter",
|
||||
"filters": "Filters",
|
||||
"filterType": "Filter Type",
|
||||
"preview": "Preview",
|
||||
"autoProcess": "Auto Process",
|
||||
"reset": "Reset",
|
||||
"process": "Process",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel",
|
||||
"spandrel": {
|
||||
"label": "Image-to-Image Model",
|
||||
"description": "Run an image-to-image model on the selected layer.",
|
||||
"paramModel": "Model",
|
||||
"paramAutoScale": "Auto Scale",
|
||||
"paramAutoScaleDesc": "The selected model will be run until the target scale is reached.",
|
||||
"paramScale": "Target Scale"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
"transform": "Transform",
|
||||
"fitToBbox": "Fit to Bbox",
|
||||
"reset": "Reset",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel"
|
||||
},
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"label": "Snap to Grid",
|
||||
"on": "On",
|
||||
"off": "Off"
|
||||
}
|
||||
},
|
||||
"HUD": {
|
||||
"bbox": "Bbox",
|
||||
"scaledBbox": "Scaled Bbox",
|
||||
"autoSave": "Auto Save",
|
||||
"entityStatus": {
|
||||
"selectedEntity": "Selected Entity",
|
||||
"selectedEntityIs": "Selected Entity is",
|
||||
"isFiltering": "is filtering",
|
||||
"isTransforming": "is transforming",
|
||||
"isLocked": "is locked",
|
||||
"isHidden": "is hidden",
|
||||
"isDisabled": "is disabled",
|
||||
"enabled": "Enabled"
|
||||
}
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -1848,7 +1931,9 @@
|
||||
"queue": "Queue",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"upscaling": "Upscaling",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Gallery",
|
||||
"galleryTab": "$t(ui.tabs.gallery) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -86,15 +86,15 @@
|
||||
"loadMore": "Cargar más",
|
||||
"noImagesInGallery": "No hay imágenes para mostrar",
|
||||
"deleteImage_one": "Eliminar Imagen",
|
||||
"deleteImage_many": "",
|
||||
"deleteImage_other": "",
|
||||
"deleteImage_many": "Eliminar {{count}} Imágenes",
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Atajos de teclado",
|
||||
"appHotkeys": "Atajos de applicación",
|
||||
"appHotkeys": "Atajos de aplicación",
|
||||
"generalHotkeys": "Atajos generales",
|
||||
"galleryHotkeys": "Atajos de galería",
|
||||
"unifiedCanvasHotkeys": "Atajos de lienzo unificado",
|
||||
@@ -535,7 +535,7 @@
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
@@ -549,7 +549,13 @@
|
||||
"imagesWithCount_other": "{{count}} imágenes",
|
||||
"assetsWithCount_one": "{{count}} activo",
|
||||
"assetsWithCount_many": "{{count}} activos",
|
||||
"assetsWithCount_other": "{{count}} activos"
|
||||
"assetsWithCount_other": "{{count}} activos",
|
||||
"hideBoards": "Ocultar Paneles",
|
||||
"addPrivateBoard": "Agregar un tablero privado",
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archived": "Archivado"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
|
||||
@@ -496,7 +496,9 @@
|
||||
"main": "Principali",
|
||||
"noModelsInstalledDesc1": "Installa i modelli con",
|
||||
"ipAdapters": "Adattatori IP",
|
||||
"noMatchingModels": "Nessun modello corrispondente"
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
"starterModelsInModelManager": "I modelli iniziali possono essere trovati in Gestione Modelli",
|
||||
"spandrelImageToImage": "Immagine a immagine (Spandrel)"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -510,7 +512,7 @@
|
||||
"perlinNoise": "Rumore Perlin",
|
||||
"type": "Tipo",
|
||||
"strength": "Forza",
|
||||
"upscaling": "Ampliamento",
|
||||
"upscaling": "Amplia",
|
||||
"scale": "Scala",
|
||||
"imageFit": "Adatta l'immagine iniziale alle dimensioni di output",
|
||||
"scaleBeforeProcessing": "Scala prima dell'elaborazione",
|
||||
@@ -593,7 +595,7 @@
|
||||
"globalPositivePromptPlaceholder": "Prompt positivo globale",
|
||||
"globalNegativePromptPlaceholder": "Prompt negativo globale",
|
||||
"processImage": "Elabora Immagine",
|
||||
"sendToUpscale": "Invia a Ampliare",
|
||||
"sendToUpscale": "Invia a Amplia",
|
||||
"postProcessing": "Post-elaborazione (Shift + U)"
|
||||
},
|
||||
"settings": {
|
||||
@@ -1420,7 +1422,7 @@
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Metodo di ampliamento",
|
||||
"paragraphs": [
|
||||
"Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione."
|
||||
"Metodo utilizzato per ampliare l'immagine per la correzione ad alta risoluzione."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
@@ -1528,7 +1530,7 @@
|
||||
},
|
||||
"upscaleModel": {
|
||||
"paragraphs": [
|
||||
"Il modello di ampliamento (Upscale), scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
|
||||
"Il modello di ampliamento, scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
|
||||
],
|
||||
"heading": "Modello di ampliamento"
|
||||
},
|
||||
@@ -1720,26 +1722,27 @@
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Coda",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"upscaling": "Ampliamento",
|
||||
"upscaling": "Amplia",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"creativity": "Creatività",
|
||||
"structure": "Struttura",
|
||||
"upscaleModel": "Modello di Ampliamento",
|
||||
"upscaleModel": "Modello di ampliamento",
|
||||
"scale": "Scala",
|
||||
"missingModelsWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare i modelli richiesti:",
|
||||
"mainModelDesc": "Modello principale (architettura SD1.5 o SDXL)",
|
||||
"tileControlNetModelDesc": "Modello Tile ControlNet per l'architettura del modello principale scelto",
|
||||
"upscaleModelDesc": "Modello per l'ampliamento (da immagine a immagine)",
|
||||
"upscaleModelDesc": "Modello per l'ampliamento (immagine a immagine)",
|
||||
"missingUpscaleInitialImage": "Immagine iniziale mancante per l'ampliamento",
|
||||
"missingUpscaleModel": "Modello per l’ampliamento mancante",
|
||||
"missingTileControlNetModel": "Nessun modello ControlNet Tile valido installato",
|
||||
"postProcessingModel": "Modello di post-elaborazione",
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata."
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -1789,6 +1792,7 @@
|
||||
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
||||
"noTemplates": "Nessun modello",
|
||||
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
||||
"templateActions": "Azioni modello"
|
||||
"templateActions": "Azioni modello",
|
||||
"promptTemplateCleared": "Modello di prompt cancellato"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,7 +501,8 @@
|
||||
"noModelsInstalled": "Нет установленных моделей",
|
||||
"noModelsInstalledDesc1": "Установите модели с помощью",
|
||||
"noMatchingModels": "Нет подходящих моделей",
|
||||
"ipAdapters": "IP адаптеры"
|
||||
"ipAdapters": "IP адаптеры",
|
||||
"starterModelsInModelManager": "Стартовые модели можно найти в Менеджере моделей"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Изображения",
|
||||
@@ -1758,7 +1759,8 @@
|
||||
"postProcessingModel": "Модель постобработки",
|
||||
"tileControlNetModelDesc": "Модель ControlNet для выбранной архитектуры основной модели",
|
||||
"missingModelsWarning": "Зайдите в <LinkComponent>Менеджер моделей</LinkComponent> чтоб установить необходимые модели:",
|
||||
"postProcessingMissingModelWarning": "Посетите <LinkComponent>Менеджер моделей</LinkComponent>, чтобы установить модель постобработки (img2img)."
|
||||
"postProcessingMissingModelWarning": "Посетите <LinkComponent>Менеджер моделей</LinkComponent>, чтобы установить модель постобработки (img2img).",
|
||||
"upscale": "Увеличить"
|
||||
},
|
||||
"stylePresets": {
|
||||
"noMatchingTemplates": "Нет подходящих шаблонов",
|
||||
@@ -1804,7 +1806,8 @@
|
||||
"noTemplates": "Нет шаблонов",
|
||||
"promptTemplatesDesc2": "Используйте строку-заполнитель <Pre>{{placeholder}}</Pre>, чтобы указать место, куда должен быть включен ваш запрос в шаблоне.",
|
||||
"searchByName": "Поиск по имени",
|
||||
"shared": "Общий"
|
||||
"shared": "Общий",
|
||||
"promptTemplateCleared": "Шаблон запроса создан"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Пригласите членов команды",
|
||||
|
||||
@@ -154,7 +154,8 @@
|
||||
"displaySearch": "显示搜索",
|
||||
"stretchToFit": "拉伸以适应",
|
||||
"exitCompare": "退出对比",
|
||||
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。"
|
||||
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。",
|
||||
"go": "运行"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "快捷键",
|
||||
@@ -494,7 +495,9 @@
|
||||
"huggingFacePlaceholder": "所有者或模型名称",
|
||||
"huggingFaceRepoID": "HuggingFace仓库ID",
|
||||
"loraTriggerPhrases": "LoRA 触发词",
|
||||
"ipAdapters": "IP适配器"
|
||||
"ipAdapters": "IP适配器",
|
||||
"spandrelImageToImage": "图生图(Spandrel)",
|
||||
"starterModelsInModelManager": "您可以在模型管理器中找到初始模型"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -695,7 +698,9 @@
|
||||
"outOfMemoryErrorDesc": "您当前的生成设置已超出系统处理能力.请调整设置后再次尝试.",
|
||||
"parametersSet": "参数已恢复",
|
||||
"errorCopied": "错误信息已复制",
|
||||
"modelImportCanceled": "模型导入已取消"
|
||||
"modelImportCanceled": "模型导入已取消",
|
||||
"importFailed": "导入失败",
|
||||
"importSuccessful": "导入成功"
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "图层",
|
||||
@@ -1705,12 +1710,55 @@
|
||||
"missingModelsWarning": "请访问<LinkComponent>模型管理器</LinkComponent> 安装所需的模型:",
|
||||
"mainModelDesc": "主模型(SD1.5或SDXL架构)",
|
||||
"exceedsMaxSize": "放大设置超出了最大尺寸限制",
|
||||
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择."
|
||||
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择.",
|
||||
"upscale": "放大"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "邀请团队成员",
|
||||
"professional": "专业",
|
||||
"professionalUpsell": "可在 Invoke 的专业版中使用.点击此处或访问 invoke.com/pricing 了解更多详情.",
|
||||
"shareAccess": "共享访问权限"
|
||||
},
|
||||
"stylePresets": {
|
||||
"positivePrompt": "正向提示词",
|
||||
"preview": "预览",
|
||||
"deleteImage": "删除图像",
|
||||
"deleteTemplate": "删除模版",
|
||||
"deleteTemplate2": "您确定要删除这个模板吗?请注意,删除后无法恢复.",
|
||||
"importTemplates": "导入提示模板,支持CSV或JSON格式",
|
||||
"insertPlaceholder": "插入一个占位符",
|
||||
"myTemplates": "我的模版",
|
||||
"name": "名称",
|
||||
"type": "类型",
|
||||
"unableToDeleteTemplate": "无法删除提示模板",
|
||||
"updatePromptTemplate": "更新提示词模版",
|
||||
"exportPromptTemplates": "导出我的提示模板为CSV格式",
|
||||
"exportDownloaded": "导出已下载",
|
||||
"noMatchingTemplates": "无匹配的模版",
|
||||
"promptTemplatesDesc1": "提示模板可以帮助您在编写提示时添加预设的文本内容.",
|
||||
"promptTemplatesDesc3": "如果您没有使用占位符,那么模板的内容将会被添加到您提示的末尾.",
|
||||
"searchByName": "按名称搜索",
|
||||
"shared": "已分享",
|
||||
"sharedTemplates": "已分享的模版",
|
||||
"templateActions": "模版操作",
|
||||
"templateDeleted": "提示模版已删除",
|
||||
"toggleViewMode": "切换显示模式",
|
||||
"uploadImage": "上传图像",
|
||||
"active": "激活",
|
||||
"choosePromptTemplate": "选择提示词模板",
|
||||
"clearTemplateSelection": "清除模版选择",
|
||||
"copyTemplate": "拷贝模版",
|
||||
"createPromptTemplate": "创建提示词模版",
|
||||
"defaultTemplates": "默认模版",
|
||||
"editTemplate": "编辑模版",
|
||||
"exportFailed": "无法生成并下载CSV文件",
|
||||
"flatten": "将选定的模板内容合并到当前提示中",
|
||||
"negativePrompt": "反向提示词",
|
||||
"promptTemplateCleared": "提示模板已清除",
|
||||
"useForTemplate": "用于提示词模版",
|
||||
"viewList": "预览模版列表",
|
||||
"viewModeTooltip": "这是您的提示在当前选定的模板下的预览效果。如需编辑提示,请直接在文本框中点击进行修改.",
|
||||
"noTemplates": "无模版",
|
||||
"private": "私密"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicP
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import { activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
import SettingsModal from 'features/system/components/SettingsModal/SettingsModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
@@ -43,10 +44,17 @@ interface Props {
|
||||
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
|
||||
};
|
||||
selectedWorkflowId?: string;
|
||||
destination?: TabName | undefined;
|
||||
selectedStylePresetId?: string;
|
||||
destination?: TabName;
|
||||
}
|
||||
|
||||
const App = ({ config = DEFAULT_CONFIG, selectedImage, selectedWorkflowId, destination }: Props) => {
|
||||
const App = ({
|
||||
config = DEFAULT_CONFIG,
|
||||
selectedImage,
|
||||
selectedWorkflowId,
|
||||
selectedStylePresetId,
|
||||
destination,
|
||||
}: Props) => {
|
||||
const language = useAppSelector(selectLanguage);
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -85,6 +93,12 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage, selectedWorkflowId, desti
|
||||
}
|
||||
}, [selectedWorkflowId, getAndLoadWorkflow]);
|
||||
|
||||
useEffect(() => {
|
||||
if (selectedStylePresetId) {
|
||||
dispatch(activeStylePresetIdChanged(selectedStylePresetId));
|
||||
}
|
||||
}, [dispatch, selectedStylePresetId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (destination) {
|
||||
dispatch(setActiveTab(destination));
|
||||
|
||||
@@ -45,6 +45,7 @@ interface Props extends PropsWithChildren {
|
||||
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
|
||||
};
|
||||
selectedWorkflowId?: string;
|
||||
selectedStylePresetId?: string;
|
||||
destination?: TabName;
|
||||
customStarUi?: CustomStarUi;
|
||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||
@@ -66,6 +67,7 @@ const InvokeAIUI = ({
|
||||
queueId,
|
||||
selectedImage,
|
||||
selectedWorkflowId,
|
||||
selectedStylePresetId,
|
||||
destination,
|
||||
customStarUi,
|
||||
socketOptions,
|
||||
@@ -227,6 +229,7 @@ const InvokeAIUI = ({
|
||||
config={config}
|
||||
selectedImage={selectedImage}
|
||||
selectedWorkflowId={selectedWorkflowId}
|
||||
selectedStylePresetId={selectedStylePresetId}
|
||||
destination={destination}
|
||||
/>
|
||||
</AppDndContext>
|
||||
|
||||
@@ -21,10 +21,16 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
|
||||
direction,
|
||||
shadows: {
|
||||
..._theme.shadows,
|
||||
selected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverSelected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverUnselected:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
selectedForCompare:
|
||||
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-400)',
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
hoverSelectedForCompare:
|
||||
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-300)',
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
});
|
||||
}, [direction]);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { TypedStartListening } from '@reduxjs/toolkit';
|
||||
import { createListenerMiddleware } from '@reduxjs/toolkit';
|
||||
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
|
||||
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
|
||||
import { addStagingListeners } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
|
||||
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
|
||||
@@ -9,6 +9,7 @@ import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddlewar
|
||||
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
|
||||
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
|
||||
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
|
||||
import { addCancellationsListeners } from 'app/store/middleware/listenerMiddleware/listeners/cancellationsListeners';
|
||||
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
|
||||
import { addEnqueueRequestedNodes } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes';
|
||||
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
|
||||
@@ -40,6 +41,8 @@ export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
||||
|
||||
const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
||||
|
||||
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
|
||||
|
||||
/**
|
||||
* The RTK listener middleware is a lightweight alternative sagas/observables.
|
||||
*
|
||||
@@ -119,3 +122,5 @@ addDynamicPromptsListener(startAppListening);
|
||||
|
||||
addSetDefaultSettingsListener(startAppListening);
|
||||
// addControlAdapterPreprocessor(startAppListening);
|
||||
|
||||
addCancellationsListeners(startAppListening);
|
||||
|
||||
@@ -1,37 +1,34 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import {
|
||||
sessionStagingAreaImageAccepted,
|
||||
sessionStagingAreaReset,
|
||||
} from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { canvasReset, rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { stagingAreaImageAccepted, stagingAreaReset } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { $lastCanvasProgressEvent } from 'services/events/setEventListeners';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('canvas');
|
||||
|
||||
const matchCanvasOrStagingAreaRest = isAnyOf(stagingAreaReset, canvasReset);
|
||||
|
||||
export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: sessionStagingAreaReset,
|
||||
matcher: matchCanvasOrStagingAreaRest,
|
||||
effect: async (_, { dispatch }) => {
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.cancelByBatchOrigin.initiate(
|
||||
{ origin: 'canvas' },
|
||||
queueApi.endpoints.cancelByBatchDestination.initiate(
|
||||
{ destination: 'canvas' },
|
||||
{ fixedCacheKey: 'cancelByBatchOrigin' }
|
||||
)
|
||||
);
|
||||
const { canceled } = await req.unwrap();
|
||||
req.reset();
|
||||
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
|
||||
if (canceled > 0) {
|
||||
log.debug(`Canceled ${canceled} canvas batches`);
|
||||
toast({
|
||||
@@ -52,11 +49,11 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: sessionStagingAreaImageAccepted,
|
||||
actionCreator: stagingAreaImageAccepted,
|
||||
effect: (action, api) => {
|
||||
const { index } = action.payload;
|
||||
const state = api.getState();
|
||||
const stagingAreaImage = state.canvasSession.stagedImages[index];
|
||||
const stagingAreaImage = state.canvasStagingArea.stagedImages[index];
|
||||
|
||||
assert(stagingAreaImage, 'No staged image found to accept');
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
@@ -68,8 +65,8 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
objects: [imageObject],
|
||||
};
|
||||
|
||||
api.dispatch(rasterLayerAdded({ overrides, isSelected: true }));
|
||||
api.dispatch(sessionStagingAreaReset());
|
||||
api.dispatch(rasterLayerAdded({ overrides, isSelected: false }));
|
||||
api.dispatch(stagingAreaReset());
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,137 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { $lastCanvasProgressEvent } from 'features/controlLayers/store/canvasSlice';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
/**
|
||||
* To prevent a race condition where a progress event arrives after a successful cancellation, we need to keep track of
|
||||
* cancellations:
|
||||
* - In the route handlers above, we track and update the cancellations object
|
||||
* - When the user queues a, we should reset the cancellations, also handled int he route handlers above
|
||||
* - When we get a progress event, we should check if the event is cancelled before setting the event
|
||||
*
|
||||
* We have a few ways that cancellations are effected, so we need to track them all:
|
||||
* - by queue item id (in this case, we will compare the session_id and not the item_id)
|
||||
* - by batch id
|
||||
* - by destination
|
||||
* - by clearing the queue
|
||||
*/
|
||||
type Cancellations = {
|
||||
sessionIds: Set<string>;
|
||||
batchIds: Set<string>;
|
||||
destinations: Set<string>;
|
||||
clearQueue: boolean;
|
||||
};
|
||||
|
||||
const resetCancellations = (): void => {
|
||||
cancellations.clearQueue = false;
|
||||
cancellations.sessionIds.clear();
|
||||
cancellations.batchIds.clear();
|
||||
cancellations.destinations.clear();
|
||||
};
|
||||
|
||||
const cancellations: Cancellations = {
|
||||
sessionIds: new Set(),
|
||||
batchIds: new Set(),
|
||||
destinations: new Set(),
|
||||
clearQueue: false,
|
||||
} as Readonly<Cancellations>;
|
||||
|
||||
/**
|
||||
* Checks if an item is cancelled, used to prevent race conditions with event handling.
|
||||
*
|
||||
* To use this, provide the session_id, batch_id and destination from the event payload.
|
||||
*/
|
||||
export const getIsCancelled = (item: {
|
||||
session_id: string;
|
||||
batch_id: string;
|
||||
destination?: string | null;
|
||||
}): boolean => {
|
||||
if (cancellations.clearQueue) {
|
||||
return true;
|
||||
}
|
||||
if (cancellations.sessionIds.has(item.session_id)) {
|
||||
return true;
|
||||
}
|
||||
if (cancellations.batchIds.has(item.batch_id)) {
|
||||
return true;
|
||||
}
|
||||
if (item.destination && cancellations.destinations.has(item.destination)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
export const addCancellationsListeners = (startAppListening: AppStartListening) => {
|
||||
// When we get a cancellation, we may need to clear the last progress event - next few listeners handle those cases.
|
||||
// Maybe we could use the `getIsCancelled` util here, but I think that could introduce _another_ race condition...
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,
|
||||
effect: () => {
|
||||
resetCancellations();
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.cancelByBatchDestination.matchFulfilled,
|
||||
effect: (action) => {
|
||||
cancellations.destinations.add(action.meta.arg.originalArgs.destination);
|
||||
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.cancelQueueItem.matchFulfilled,
|
||||
effect: (action) => {
|
||||
cancellations.sessionIds.add(action.payload.session_id);
|
||||
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.cancelByBatchIds.matchFulfilled,
|
||||
effect: (action) => {
|
||||
for (const batch_id of action.meta.arg.originalArgs.batch_ids) {
|
||||
cancellations.batchIds.add(batch_id);
|
||||
}
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.clearQueue.matchFulfilled,
|
||||
effect: () => {
|
||||
cancellations.clearQueue = true;
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -4,8 +4,12 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import type { Result } from 'common/util/result';
|
||||
import { isErr, withResult, withResultAsync } from 'common/util/result';
|
||||
import { $canvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { sessionStagingAreaReset, sessionStartedStaging } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { $canvasManager } from 'features/controlLayers/store/canvasSlice';
|
||||
import {
|
||||
selectIsStaging,
|
||||
stagingAreaReset,
|
||||
stagingAreaStartedStaging,
|
||||
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
@@ -31,14 +35,14 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
|
||||
let didStartStaging = false;
|
||||
|
||||
if (!state.canvasSession.isStaging && state.canvasSession.mode === 'compose') {
|
||||
dispatch(sessionStartedStaging());
|
||||
if (!selectIsStaging(state) && state.canvasSettings.sendToCanvas) {
|
||||
dispatch(stagingAreaStartedStaging());
|
||||
didStartStaging = true;
|
||||
}
|
||||
|
||||
const abortStaging = () => {
|
||||
if (didStartStaging && getState().canvasSession.isStaging) {
|
||||
dispatch(sessionStagingAreaReset());
|
||||
if (didStartStaging && selectIsStaging(getState())) {
|
||||
dispatch(stagingAreaReset());
|
||||
}
|
||||
};
|
||||
|
||||
@@ -70,7 +74,11 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
|
||||
const { g, noise, posCond } = buildGraphResult.value;
|
||||
|
||||
const prepareBatchResult = withResult(() => prepareLinearUIBatch(state, g, prepend, noise, posCond));
|
||||
const destination = state.canvasSettings.sendToCanvas ? 'canvas' : 'gallery';
|
||||
|
||||
const prepareBatchResult = withResult(() =>
|
||||
prepareLinearUIBatch(state, g, prepend, noise, posCond, 'generation', destination)
|
||||
);
|
||||
|
||||
if (isErr(prepareBatchResult)) {
|
||||
log.error({ error: serializeError(prepareBatchResult.error) }, 'Failed to prepare batch');
|
||||
|
||||
@@ -32,6 +32,7 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
workflow: builtWorkflow,
|
||||
runs: state.params.iterations,
|
||||
origin: 'workflows',
|
||||
destination: 'gallery',
|
||||
},
|
||||
prepend: action.payload.prepend,
|
||||
};
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
@@ -11,12 +10,11 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'upscaling',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const { shouldShowProgressInViewer } = state.ui;
|
||||
const { prepend } = action.payload;
|
||||
|
||||
const { g, noise, posCond } = await buildMultidiffusionUpscaleGraph(state);
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond);
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
@@ -25,9 +23,6 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
);
|
||||
try {
|
||||
await req.unwrap();
|
||||
if (shouldShowProgressInViewer) {
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
}
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
//TODO(psyche): handle image deletion (canvas sessions?)
|
||||
//TODO(psyche): handle image deletion (canvas staging area?)
|
||||
|
||||
// Some utils to delete images from different parts of the app
|
||||
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectDefaultControlAdapter } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
ipaImageChanged,
|
||||
@@ -12,7 +13,7 @@ import type { CanvasControlLayerState, CanvasRasterLayerState } from 'features/c
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { isValidDrop } from 'features/dnd/util/isValidDrop';
|
||||
import { imageToCompareChanged, isImageViewerOpenChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
@@ -103,11 +104,14 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const state = getState();
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
const defaultControlAdapter = selectDefaultControlAdapter(state);
|
||||
const overrides: Partial<CanvasControlLayerState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
controlAdapter: defaultControlAdapter,
|
||||
};
|
||||
dispatch(controlLayerAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
@@ -142,7 +146,6 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
dispatch(imageToCompareChanged(imageDTO));
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { ipaImageChanged, rgIPAdapterImageChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -44,6 +45,8 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
if (!autoAddBoardId || autoAddBoardId === 'none') {
|
||||
const title = postUploadAction.title || DEFAULT_UPLOADED_TOAST.title;
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, title });
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
} else {
|
||||
// Add this image to the board
|
||||
dispatch(
|
||||
@@ -67,6 +70,8 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description,
|
||||
});
|
||||
dispatch(boardIdSelected({ boardId: autoAddBoardId }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import { modelChanged, refinerModelChanged, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/DocumentSize/calculateNewSize';
|
||||
import { calculateNewSize } from 'features/parameters/components/Bbox/calculateNewSize';
|
||||
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { zParameterModel, zParameterVAEModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
|
||||
@@ -6,12 +6,14 @@ import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
|
||||
import { canvasSessionPersistConfig, canvasSessionSlice } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
|
||||
import {
|
||||
canvasStagingAreaPersistConfig,
|
||||
canvasStagingAreaSlice,
|
||||
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { toolPersistConfig, toolSlice } from 'features/controlLayers/store/toolSlice';
|
||||
import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice';
|
||||
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
@@ -63,9 +65,8 @@ const allReducers = {
|
||||
[upscaleSlice.name]: upscaleSlice.reducer,
|
||||
[stylePresetSlice.name]: stylePresetSlice.reducer,
|
||||
[paramsSlice.name]: paramsSlice.reducer,
|
||||
[toolSlice.name]: toolSlice.reducer,
|
||||
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
|
||||
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
|
||||
[canvasStagingAreaSlice.name]: canvasStagingAreaSlice.reducer,
|
||||
[lorasSlice.name]: lorasSlice.reducer,
|
||||
};
|
||||
|
||||
@@ -109,9 +110,8 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[upscalePersistConfig.name]: upscalePersistConfig,
|
||||
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
|
||||
[paramsPersistConfig.name]: paramsPersistConfig,
|
||||
[toolPersistConfig.name]: toolPersistConfig,
|
||||
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
|
||||
[canvasSessionPersistConfig.name]: canvasSessionPersistConfig,
|
||||
[canvasStagingAreaPersistConfig.name]: canvasStagingAreaPersistConfig,
|
||||
[lorasPersistConfig.name]: lorasPersistConfig,
|
||||
};
|
||||
|
||||
|
||||
@@ -6,18 +6,51 @@ import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import ImageContextMenu from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import type { MouseEvent, ReactElement, ReactNode, SyntheticEvent } from 'react';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { PiImageBold, PiUploadSimpleBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
import IAIDraggable from './IAIDraggable';
|
||||
import IAIDroppable from './IAIDroppable';
|
||||
import SelectionOverlay from './SelectionOverlay';
|
||||
|
||||
const defaultUploadElement = <Icon as={PiUploadSimpleBold} boxSize={16} />;
|
||||
|
||||
const defaultNoContentFallback = <IAINoContentFallback icon={PiImageBold} />;
|
||||
|
||||
const sx: SystemStyleObject = {
|
||||
'.gallery-image-container::before': {
|
||||
content: '""',
|
||||
display: 'inline-block',
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
bottom: 0,
|
||||
pointerEvents: 'none',
|
||||
borderRadius: 'base',
|
||||
},
|
||||
'&[data-selected="selected"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&[data-selected="selectedForCompare"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
'&:hover>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&:hover[data-selected="selected"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&:hover[data-selected="selectedForCompare"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
};
|
||||
|
||||
type IAIDndImageProps = FlexProps & {
|
||||
imageDTO: ImageDTO | undefined;
|
||||
onError?: (event: SyntheticEvent<HTMLImageElement>) => void;
|
||||
@@ -75,13 +108,11 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
const handleMouseOver = useCallback(
|
||||
(e: MouseEvent<HTMLDivElement>) => {
|
||||
if (onMouseOver) {
|
||||
onMouseOver(e);
|
||||
}
|
||||
setIsHovered(true);
|
||||
},
|
||||
[onMouseOver]
|
||||
);
|
||||
@@ -90,7 +121,6 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
if (onMouseOut) {
|
||||
onMouseOut(e);
|
||||
}
|
||||
setIsHovered(false);
|
||||
},
|
||||
[onMouseOut]
|
||||
);
|
||||
@@ -141,10 +171,13 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
minH={minSize ? minSize : undefined}
|
||||
userSelect="none"
|
||||
cursor={isDragDisabled || !imageDTO ? 'default' : 'pointer'}
|
||||
sx={withHoverOverlay ? sx : undefined}
|
||||
data-selected={isSelectedForCompare ? 'selectedForCompare' : isSelected ? 'selected' : undefined}
|
||||
{...rest}
|
||||
>
|
||||
{imageDTO && (
|
||||
<Flex
|
||||
className="gallery-image-container"
|
||||
w="full"
|
||||
h="full"
|
||||
position={fitContainer ? 'absolute' : 'relative'}
|
||||
@@ -167,11 +200,6 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
data-testid={dataTestId}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
|
||||
<SelectionOverlay
|
||||
isSelected={isSelected}
|
||||
isSelectedForCompare={isSelectedForCompare}
|
||||
isHovered={withHoverOverlay ? isHovered : false}
|
||||
/>
|
||||
</Flex>
|
||||
)}
|
||||
{!imageDTO && !isUploadDisabled && (
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
import { Box } from '@invoke-ai/ui-library';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
type Props = {
|
||||
isSelected: boolean;
|
||||
isSelectedForCompare: boolean;
|
||||
isHovered: boolean;
|
||||
};
|
||||
const SelectionOverlay = ({ isSelected, isSelectedForCompare, isHovered }: Props) => {
|
||||
const shadow = useMemo(() => {
|
||||
if (isSelectedForCompare && isHovered) {
|
||||
return 'hoverSelectedForCompare';
|
||||
}
|
||||
if (isSelectedForCompare && !isHovered) {
|
||||
return 'selectedForCompare';
|
||||
}
|
||||
if (isSelected && isHovered) {
|
||||
return 'hoverSelected';
|
||||
}
|
||||
if (isSelected && !isHovered) {
|
||||
return 'selected';
|
||||
}
|
||||
if (!isSelected && isHovered) {
|
||||
return 'hoverUnselected';
|
||||
}
|
||||
return undefined;
|
||||
}, [isHovered, isSelected, isSelectedForCompare]);
|
||||
return (
|
||||
<Box
|
||||
className="selection-box"
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineEnd={0}
|
||||
bottom={0}
|
||||
insetInlineStart={0}
|
||||
borderRadius="base"
|
||||
opacity={isSelected || isSelectedForCompare ? 1 : 0.7}
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
pointerEvents="none"
|
||||
shadow={shadow}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(SelectionOverlay);
|
||||
@@ -1,52 +1,74 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import type { WritableAtom } from 'nanostores';
|
||||
import { useCallback, useMemo, useState } from 'react';
|
||||
import { atom } from 'nanostores';
|
||||
import { useCallback, useState } from 'react';
|
||||
|
||||
export const useBoolean = (initialValue: boolean) => {
|
||||
const [isTrue, set] = useState(initialValue);
|
||||
const setTrue = useCallback(() => set(true), []);
|
||||
const setFalse = useCallback(() => set(false), []);
|
||||
const toggle = useCallback(() => set((v) => !v), []);
|
||||
type UseBoolean = {
|
||||
isTrue: boolean;
|
||||
setTrue: () => void;
|
||||
setFalse: () => void;
|
||||
set: (value: boolean) => void;
|
||||
toggle: () => void;
|
||||
};
|
||||
|
||||
const api = useMemo(
|
||||
() => ({
|
||||
/**
|
||||
* Creates a hook to manage a boolean state. The boolean is stored in a nanostores atom.
|
||||
* Returns a tuple containing the hook and the atom. Use this for global boolean state.
|
||||
* @param initialValue Initial value of the boolean
|
||||
*/
|
||||
export const buildUseBoolean = (initialValue: boolean): [() => UseBoolean, WritableAtom<boolean>] => {
|
||||
const $boolean = atom(initialValue);
|
||||
|
||||
const setTrue = () => {
|
||||
$boolean.set(true);
|
||||
};
|
||||
const setFalse = () => {
|
||||
$boolean.set(false);
|
||||
};
|
||||
const set = (value: boolean) => {
|
||||
$boolean.set(value);
|
||||
};
|
||||
const toggle = () => {
|
||||
$boolean.set(!$boolean.get());
|
||||
};
|
||||
|
||||
const useBoolean = () => {
|
||||
const isTrue = useStore($boolean);
|
||||
|
||||
return {
|
||||
isTrue,
|
||||
set,
|
||||
setTrue,
|
||||
setFalse,
|
||||
set,
|
||||
toggle,
|
||||
}),
|
||||
[isTrue, set, setTrue, setFalse, toggle]
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
return api;
|
||||
return [useBoolean, $boolean] as const;
|
||||
};
|
||||
|
||||
export const buildUseBoolean = ($boolean: WritableAtom<boolean>) => {
|
||||
return () => {
|
||||
const setTrue = useCallback(() => {
|
||||
$boolean.set(true);
|
||||
}, []);
|
||||
const setFalse = useCallback(() => {
|
||||
$boolean.set(false);
|
||||
}, []);
|
||||
const set = useCallback((value: boolean) => {
|
||||
$boolean.set(value);
|
||||
}, []);
|
||||
const toggle = useCallback(() => {
|
||||
$boolean.set(!$boolean.get());
|
||||
}, []);
|
||||
/**
|
||||
* Hook to manage a boolean state. Use this for a local boolean state.
|
||||
* @param initialValue Initial value of the boolean
|
||||
*/
|
||||
export const useBoolean = (initialValue: boolean) => {
|
||||
const [isTrue, set] = useState(initialValue);
|
||||
|
||||
const api = useMemo(
|
||||
() => ({
|
||||
setTrue,
|
||||
setFalse,
|
||||
set,
|
||||
toggle,
|
||||
$boolean,
|
||||
}),
|
||||
[set, setFalse, setTrue, toggle]
|
||||
);
|
||||
const setTrue = useCallback(() => {
|
||||
set(true);
|
||||
}, [set]);
|
||||
const setFalse = useCallback(() => {
|
||||
set(false);
|
||||
}, [set]);
|
||||
const toggle = useCallback(() => {
|
||||
set((val) => !val);
|
||||
}, [set]);
|
||||
|
||||
return api;
|
||||
return {
|
||||
isTrue,
|
||||
setTrue,
|
||||
setFalse,
|
||||
set,
|
||||
toggle,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { addScope, removeScope, setScopes } from 'common/hooks/interactionScopes';
|
||||
import { useClearQueue } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { useCancelCurrentQueueItem } from 'features/queue/hooks/useCancelCurrentQueueItem';
|
||||
import { useClearQueue } from 'features/queue/hooks/useClearQueue';
|
||||
import { useQueueBack } from 'features/queue/hooks/useQueueBack';
|
||||
import { useQueueFront } from 'features/queue/hooks/useQueueFront';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
@@ -114,4 +114,13 @@ export const useGlobalHotkeys = () => {
|
||||
},
|
||||
[dispatch, isModelManagerEnabled]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
isModelManagerEnabled ? '6' : '5',
|
||||
() => {
|
||||
dispatch(setActiveTab('gallery'));
|
||||
setScopes([]);
|
||||
},
|
||||
[dispatch, isModelManagerEnabled]
|
||||
);
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@ import { useStore } from '@nanostores/react';
|
||||
import { $isConnected } from 'app/hooks/useSocketIO';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
@@ -17,6 +18,7 @@ import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
|
||||
@@ -28,7 +30,7 @@ const LAYER_TYPE_TO_TKEY = {
|
||||
control_layer: 'controlLayers.globalControlAdapter',
|
||||
} as const;
|
||||
|
||||
const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
const createSelector = (templates: Templates, isConnected: boolean, canvasIsBusy: boolean) =>
|
||||
createMemoizedSelector(
|
||||
[
|
||||
selectSystemSlice,
|
||||
@@ -117,6 +119,10 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
reasons.push({ content: i18n.t('upscaling.missingTileControlNetModel') });
|
||||
}
|
||||
} else {
|
||||
if (canvasIsBusy) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasBusy') });
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
@@ -128,7 +134,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
canvas.controlLayers.entities
|
||||
.filter((controlLayer) => controlLayer.isEnabled)
|
||||
.forEach((controlLayer, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY['control_layer']);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
@@ -158,7 +164,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
canvas.ipAdapters.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
@@ -186,7 +192,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
canvas.regions.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
@@ -223,7 +229,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
canvas.rasterLayers.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
@@ -240,10 +246,17 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
}
|
||||
);
|
||||
|
||||
const dummyAtom = atom(true);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const templates = useStore($templates);
|
||||
const isConnected = useStore($isConnected);
|
||||
const selector = useMemo(() => createSelector(templates, isConnected), [templates, isConnected]);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsBusy = useStore(canvasManager?.$isBusy ?? dummyAtom);
|
||||
const selector = useMemo(
|
||||
() => createSelector(templates, isConnected, canvasIsBusy),
|
||||
[templates, isConnected, canvasIsBusy]
|
||||
);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
export const roundDownToMultiple = (num: number, multiple: number): number => {
|
||||
return Math.floor(num / multiple) * multiple;
|
||||
};
|
||||
export const roundUpToMultiple = (num: number, multiple: number): number => {
|
||||
return Math.ceil(num / multiple) * multiple;
|
||||
};
|
||||
|
||||
export const roundToMultiple = (num: number, multiple: number): number => {
|
||||
return Math.round(num / multiple) * multiple;
|
||||
|
||||
@@ -1,52 +1,40 @@
|
||||
import { Button, ButtonGroup, Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
useAddControlLayer,
|
||||
useAddInpaintMask,
|
||||
useAddIPAdapter,
|
||||
useAddRasterLayer,
|
||||
useAddRegionalGuidance,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasAddEntityButtons = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(ipaAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addInpaintMask = useAddInpaintMask();
|
||||
const addRegionalGuidance = useAddRegionalGuidance();
|
||||
const addRasterLayer = useAddRasterLayer();
|
||||
const addControlLayer = useAddControlLayer();
|
||||
const addIPAdapter = useAddIPAdapter();
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" w="full" h="full" alignItems="center" justifyContent="center">
|
||||
<ButtonGroup orientation="vertical" isAttached={false}>
|
||||
<Flex flexDir="column" w="full" h="full" alignItems="center">
|
||||
<ButtonGroup position="relative" orientation="vertical" isAttached={false} top="20%">
|
||||
<Button variant="ghost" justifyContent="flex-start" leftIcon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask', { count: 1 })}
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</Button>
|
||||
<Button variant="ghost" justifyContent="flex-start" leftIcon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance', { count: 1 })}
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</Button>
|
||||
<Button variant="ghost" justifyContent="flex-start" leftIcon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer', { count: 1 })}
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</Button>
|
||||
<Button variant="ghost" justifyContent="flex-start" leftIcon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer', { count: 1 })}
|
||||
{t('controlLayers.controlLayer')}
|
||||
</Button>
|
||||
<Button variant="ghost" justifyContent="flex-start" leftIcon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.ipAdapter', { count: 1 })}
|
||||
{t('controlLayers.globalIPAdapter')}
|
||||
</Button>
|
||||
</ButtonGroup>
|
||||
</Flex>
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
useIsSavingCanvas,
|
||||
useSaveBboxAsControlLayer,
|
||||
useSaveBboxAsGlobalIPAdapter,
|
||||
useSaveBboxAsRasterLayer,
|
||||
useSaveBboxAsRegionalGuidanceIPAdapter,
|
||||
useSaveBboxToGallery,
|
||||
useSaveCanvasToGallery,
|
||||
} from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFloppyDiskBold, PiShareFatBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasContextMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isSaving = useIsSavingCanvas();
|
||||
const saveCanvasToGallery = useSaveCanvasToGallery();
|
||||
const saveBboxToGallery = useSaveBboxToGallery();
|
||||
const saveBboxAsRegionalGuidanceIPAdapter = useSaveBboxAsRegionalGuidanceIPAdapter();
|
||||
const saveBboxAsIPAdapter = useSaveBboxAsGlobalIPAdapter();
|
||||
const saveBboxAsRasterLayer = useSaveBboxAsRasterLayer();
|
||||
const saveBboxAsControlLayer = useSaveBboxAsControlLayer();
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiFloppyDiskBold />} isLoading={isSaving.isTrue} onClick={saveCanvasToGallery}>
|
||||
{t('controlLayers.saveCanvasToGallery')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFloppyDiskBold />} isLoading={isSaving.isTrue} onClick={saveBboxToGallery}>
|
||||
{t('controlLayers.saveBboxToGallery')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiShareFatBold />} isLoading={isSaving.isTrue} onClick={saveBboxAsIPAdapter}>
|
||||
{t('controlLayers.sendBboxToGlobalIPAdapter')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiShareFatBold />} isLoading={isSaving.isTrue} onClick={saveBboxAsRegionalGuidanceIPAdapter}>
|
||||
{t('controlLayers.sendBboxToRegionalIPAdapter')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiShareFatBold />} isLoading={isSaving.isTrue} onClick={saveBboxAsControlLayer}>
|
||||
{t('controlLayers.sendBboxToControlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiShareFatBold />} isLoading={isSaving.isTrue} onClick={saveBboxAsRasterLayer}>
|
||||
{t('controlLayers.sendBboxToRasterLayer')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasContextMenuItems.displayName = 'CanvasContextMenuItems';
|
||||
@@ -1,7 +1,6 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import IAIDroppable from 'common/components/IAIDroppable';
|
||||
import type { AddControlLayerFromImageDropData, AddRasterLayerFromImageDropData } from 'features/dnd/types';
|
||||
import { useIsImageViewerOpen } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { memo } from 'react';
|
||||
|
||||
const addRasterLayerFromImageDropData: AddRasterLayerFromImageDropData = {
|
||||
@@ -15,12 +14,6 @@ const addControlLayerFromImageDropData: AddControlLayerFromImageDropData = {
|
||||
};
|
||||
|
||||
export const CanvasDropArea = memo(() => {
|
||||
const isImageViewerOpen = useIsImageViewerOpen();
|
||||
|
||||
if (isImageViewerOpen) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Flex position="absolute" top={0} right={0} bottom="50%" left={0} gap={2} pointerEvents="none">
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { EntityListActionBarAddLayerButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuButton';
|
||||
import { EntityListActionBarDeleteButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarDeleteButton';
|
||||
import { EntityListActionBarSelectedEntityFill } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityFill';
|
||||
import { SelectedEntityOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityOpacity';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const EntityListActionBar = memo(() => {
|
||||
return (
|
||||
<Flex w="full" py={1} px={1} gap={2} alignItems="center">
|
||||
<SelectedEntityOpacity />
|
||||
<Spacer />
|
||||
<EntityListActionBarSelectedEntityFill />
|
||||
<EntityListActionBarAddLayerButton />
|
||||
<EntityListActionBarDeleteButton />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBar.displayName = 'EntityListActionBar';
|
||||
@@ -1,28 +0,0 @@
|
||||
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityListMenuItems } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuItems';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListActionBarAddLayerButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
size="sm"
|
||||
tooltip={t('controlLayers.addLayer')}
|
||||
aria-label={t('controlLayers.addLayer')}
|
||||
icon={<PiPlusBold />}
|
||||
variant="ghost"
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
/>
|
||||
<MenuList>
|
||||
<CanvasEntityListMenuItems />
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarAddLayerButton.displayName = 'EntityListActionBarAddLayerButton';
|
||||
@@ -1,54 +0,0 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasEntityListMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(ipaAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.ipAdapter', { count: 1 })}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityListMenuItems.displayName = 'CanvasEntityListMenu';
|
||||
@@ -1,39 +0,0 @@
|
||||
import { IconButton, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { allEntitiesDeleted, entityDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectEntityCount, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleFill } from 'react-icons/pi';
|
||||
|
||||
export const EntityListActionBarDeleteButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const entityCount = useAppSelector(selectEntityCount);
|
||||
const shift = useShiftModifier();
|
||||
const onClick = useCallback(() => {
|
||||
if (shift) {
|
||||
dispatch(allEntitiesDeleted());
|
||||
return;
|
||||
}
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityDeleted({ entityIdentifier: selectedEntityIdentifier }));
|
||||
}, [dispatch, selectedEntityIdentifier, shift]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={onClick}
|
||||
isDisabled={shift ? entityCount === 0 : !selectedEntityIdentifier}
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
aria-label={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
|
||||
tooltip={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
|
||||
icon={<PiTrashSimpleFill />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarDeleteButton.displayName = 'EntityListActionBarDeleteButton';
|
||||
@@ -0,0 +1,54 @@
|
||||
import { IconButton, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
useAddControlLayer,
|
||||
useAddInpaintMask,
|
||||
useAddIPAdapter,
|
||||
useAddRasterLayer,
|
||||
useAddRegionalGuidance,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const addInpaintMask = useAddInpaintMask();
|
||||
const addRegionalGuidance = useAddRegionalGuidance();
|
||||
const addRasterLayer = useAddRasterLayer();
|
||||
const addControlLayer = useAddControlLayer();
|
||||
const addIPAdapter = useAddIPAdapter();
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
tooltip={t('controlLayers.addLayer')}
|
||||
aria-label={t('controlLayers.addLayer')}
|
||||
icon={<PiPlusBold />}
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
/>
|
||||
<MenuList>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.globalIPAdapter')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListGlobalActionBarAddLayerMenu.displayName = 'EntityListGlobalActionBarAddLayerMenu';
|
||||
@@ -0,0 +1,26 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { EntityListGlobalActionBarAddLayerMenu } from 'features/controlLayers/components/CanvasEntityList/EntityListGlobalActionBarAddLayerMenu';
|
||||
import { EntityListSelectedEntityActionBarDuplicateButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarDuplicateButton';
|
||||
import { EntityListSelectedEntityActionBarFill } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFill';
|
||||
import { EntityListSelectedEntityActionBarFilterButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFilterButton';
|
||||
import { EntityListSelectedEntityActionBarOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarOpacity';
|
||||
import { EntityListSelectedEntityActionBarTransformButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarTransformButton';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const EntityListSelectedEntityActionBar = memo(() => {
|
||||
return (
|
||||
<Flex w="full" gap={2} alignItems="center" ps={1}>
|
||||
<EntityListSelectedEntityActionBarOpacity />
|
||||
<Spacer />
|
||||
<EntityListSelectedEntityActionBarFill />
|
||||
<Flex h="full">
|
||||
<EntityListSelectedEntityActionBarFilterButton />
|
||||
<EntityListSelectedEntityActionBarTransformButton />
|
||||
<EntityListSelectedEntityActionBarDuplicateButton />
|
||||
<EntityListGlobalActionBarAddLayerMenu />
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBar.displayName = 'EntityListSelectedEntityActionBar';
|
||||
@@ -0,0 +1,34 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { entityDuplicated } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyFill } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarDuplicateButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const onClick = useCallback(() => {
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityDuplicated({ entityIdentifier: selectedEntityIdentifier }));
|
||||
}, [dispatch, selectedEntityIdentifier]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={onClick}
|
||||
isDisabled={!selectedEntityIdentifier}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.duplicate')}
|
||||
tooltip={t('controlLayers.duplicate')}
|
||||
icon={<PiCopyFill />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarDuplicateButton.displayName = 'EntityListSelectedEntityActionBarDuplicateButton';
|
||||
@@ -9,7 +9,7 @@ import { type FillStyle, isMaskEntityIdentifier, type RgbColor } from 'features/
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const EntityListActionBarSelectedEntityFill = memo(() => {
|
||||
export const EntityListSelectedEntityActionBarFill = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
@@ -67,4 +67,4 @@ export const EntityListActionBarSelectedEntityFill = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarSelectedEntityFill.displayName = 'EntityListActionBarSelectedEntityFill';
|
||||
EntityListSelectedEntityActionBarFill.displayName = 'EntityListSelectedEntityActionBarFill';
|
||||
@@ -0,0 +1,37 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityFilter } from 'features/controlLayers/hooks/useEntityFilter';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { isFilterableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiShootingStarBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarFilterButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const filter = useEntityFilter(selectedEntityIdentifier);
|
||||
|
||||
if (!selectedEntityIdentifier) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isFilterableEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={filter.start}
|
||||
isDisabled={filter.isDisabled}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.filter.filter')}
|
||||
tooltip={t('controlLayers.filter.filter')}
|
||||
icon={<PiShootingStarBold />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarFilterButton.displayName = 'EntityListSelectedEntityActionBarFilterButton';
|
||||
@@ -22,7 +22,7 @@ import {
|
||||
selectEntity,
|
||||
selectSelectedEntityIdentifier,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import { isDrawableEntity } from 'features/controlLayers/store/types';
|
||||
import { isRenderableEntity } from 'features/controlLayers/store/types';
|
||||
import { clamp, round } from 'lodash-es';
|
||||
import type { KeyboardEvent } from 'react';
|
||||
import { memo, useCallback, useEffect, useState } from 'react';
|
||||
@@ -37,11 +37,11 @@ function formatPct(v: number | string) {
|
||||
return `${round(Number(v), 2).toLocaleString()}%`;
|
||||
}
|
||||
|
||||
function mapSliderValueToOpacity(value: number) {
|
||||
function mapSliderValueToRawValue(value: number) {
|
||||
return value / 100;
|
||||
}
|
||||
|
||||
function mapOpacityToSliderValue(opacity: number) {
|
||||
function mapRawValueToSliderValue(opacity: number) {
|
||||
return opacity * 100;
|
||||
}
|
||||
|
||||
@@ -50,14 +50,14 @@ function formatSliderValue(value: number) {
|
||||
}
|
||||
|
||||
const marks = [
|
||||
mapOpacityToSliderValue(0),
|
||||
mapOpacityToSliderValue(0.25),
|
||||
mapOpacityToSliderValue(0.5),
|
||||
mapOpacityToSliderValue(0.75),
|
||||
mapOpacityToSliderValue(1),
|
||||
mapRawValueToSliderValue(0),
|
||||
mapRawValueToSliderValue(0.25),
|
||||
mapRawValueToSliderValue(0.5),
|
||||
mapRawValueToSliderValue(0.75),
|
||||
mapRawValueToSliderValue(1),
|
||||
];
|
||||
|
||||
const sliderDefaultValue = mapOpacityToSliderValue(1);
|
||||
const sliderDefaultValue = mapRawValueToSliderValue(1);
|
||||
|
||||
const snapCandidates = marks.slice(1, marks.length - 1);
|
||||
|
||||
@@ -70,14 +70,14 @@ const selectOpacity = createSelector(selectCanvasSlice, (canvas) => {
|
||||
if (!selectedEntity) {
|
||||
return 1; // fallback to 100% opacity
|
||||
}
|
||||
if (!isDrawableEntity(selectedEntity)) {
|
||||
if (!isRenderableEntity(selectedEntity)) {
|
||||
return 1; // fallback to 100% opacity
|
||||
}
|
||||
// Opacity is a float from 0-1, but we want to display it as a percentage
|
||||
return selectedEntity.opacity;
|
||||
});
|
||||
|
||||
export const SelectedEntityOpacity = memo(() => {
|
||||
export const EntityListSelectedEntityActionBarOpacity = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
@@ -95,7 +95,7 @@ export const SelectedEntityOpacity = memo(() => {
|
||||
if (!$shift.get()) {
|
||||
snappedOpacity = snapToNearest(opacity, snapCandidates, 2);
|
||||
}
|
||||
const mappedOpacity = mapSliderValueToOpacity(snappedOpacity);
|
||||
const mappedOpacity = mapSliderValueToRawValue(snappedOpacity);
|
||||
|
||||
dispatch(entityOpacityChanged({ entityIdentifier: selectedEntityIdentifier, opacity: mappedOpacity }));
|
||||
},
|
||||
@@ -157,7 +157,7 @@ export const SelectedEntityOpacity = memo(() => {
|
||||
clampValueOnBlur={false}
|
||||
variant="outline"
|
||||
>
|
||||
<NumberInputField paddingInlineEnd={7} />
|
||||
<NumberInputField paddingInlineEnd={7} _focusVisible={{ zIndex: 0 }} />
|
||||
<PopoverTrigger>
|
||||
<IconButton
|
||||
aria-label="open-slider"
|
||||
@@ -193,4 +193,4 @@ export const SelectedEntityOpacity = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
SelectedEntityOpacity.displayName = 'SelectedEntityOpacity';
|
||||
EntityListSelectedEntityActionBarOpacity.displayName = 'EntityListSelectedEntityActionBarOpacity';
|
||||
@@ -0,0 +1,37 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityTransform } from 'features/controlLayers/hooks/useEntityTransform';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { isTransformableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFrameCornersBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarTransformButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const transform = useEntityTransform(selectedEntityIdentifier);
|
||||
|
||||
if (!selectedEntityIdentifier) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isTransformableEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={transform.start}
|
||||
isDisabled={transform.isDisabled}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.transform.transform')}
|
||||
tooltip={t('controlLayers.transform.transform')}
|
||||
icon={<PiFrameCornersBold />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarTransformButton.displayName = 'EntityListSelectedEntityActionBarTransformButton';
|
||||
@@ -1,29 +0,0 @@
|
||||
import { Button, ButtonGroup } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectCanvasSessionSlice, sessionModeChanged } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectCanvasMode = createSelector(selectCanvasSessionSlice, (canvasSession) => canvasSession.mode);
|
||||
|
||||
export const CanvasModeSwitcher = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const mode = useAppSelector(selectCanvasMode);
|
||||
const onClickGenerate = useCallback(() => dispatch(sessionModeChanged({ mode: 'generate' })), [dispatch]);
|
||||
const onClickCompose = useCallback(() => dispatch(sessionModeChanged({ mode: 'compose' })), [dispatch]);
|
||||
|
||||
return (
|
||||
<ButtonGroup variant="outline">
|
||||
<Button onClick={onClickGenerate} colorScheme={mode === 'generate' ? 'invokeBlue' : 'base'}>
|
||||
{t('controlLayers.generateMode')}
|
||||
</Button>
|
||||
<Button onClick={onClickCompose} colorScheme={mode === 'compose' ? 'invokeBlue' : 'base'}>
|
||||
{t('controlLayers.composeMode')}
|
||||
</Button>
|
||||
</ButtonGroup>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasModeSwitcher.displayName = 'CanvasModeSwitcher';
|
||||
@@ -2,17 +2,18 @@ import { Divider, Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasAddEntityButtons } from 'features/controlLayers/components/CanvasAddEntityButtons';
|
||||
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList/CanvasEntityList';
|
||||
import { EntityListActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBar';
|
||||
import { EntityListSelectedEntityActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBar';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectHasEntities } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const CanvasPanelContent = memo(() => {
|
||||
const hasEntities = useAppSelector(selectHasEntities);
|
||||
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<Flex flexDir="column" gap={2} w="full" h="full">
|
||||
<EntityListActionBar />
|
||||
<EntityListSelectedEntityActionBar />
|
||||
<Divider py={0} />
|
||||
{!hasEntities && <CanvasAddEntityButtons />}
|
||||
{hasEntities && <CanvasEntityList />}
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
import { Flex, Grid, GridItem, IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo, useCallback, useState } from 'react';
|
||||
import {
|
||||
PiArrowDownBold,
|
||||
PiArrowDownLeftBold,
|
||||
PiArrowDownRightBold,
|
||||
PiArrowLeftBold,
|
||||
PiArrowRightBold,
|
||||
PiArrowUpBold,
|
||||
PiArrowUpLeftBold,
|
||||
PiArrowUpRightBold,
|
||||
PiSquareBold,
|
||||
} from 'react-icons/pi';
|
||||
|
||||
type ResizeDirection =
|
||||
| 'up-left'
|
||||
| 'up'
|
||||
| 'up-right'
|
||||
| 'left'
|
||||
| 'center-out'
|
||||
| 'right'
|
||||
| 'down-left'
|
||||
| 'down'
|
||||
| 'down-right';
|
||||
|
||||
export const CanvasResizer = memo(() => {
|
||||
const [resizeDirection, setResizeDirection] = useState<ResizeDirection>('center-out');
|
||||
|
||||
const setDirUpLeft = useCallback(() => {
|
||||
setResizeDirection('up-left');
|
||||
}, []);
|
||||
|
||||
const setDirUp = useCallback(() => {
|
||||
setResizeDirection('up');
|
||||
}, []);
|
||||
|
||||
const setDirUpRight = useCallback(() => {
|
||||
setResizeDirection('up-right');
|
||||
}, []);
|
||||
|
||||
const setDirLeft = useCallback(() => {
|
||||
setResizeDirection('left');
|
||||
}, []);
|
||||
|
||||
const setDirCenterOut = useCallback(() => {
|
||||
setResizeDirection('center-out');
|
||||
}, []);
|
||||
|
||||
const setDirRight = useCallback(() => {
|
||||
setResizeDirection('right');
|
||||
}, []);
|
||||
|
||||
const setDirDownLeft = useCallback(() => {
|
||||
setResizeDirection('down-left');
|
||||
}, []);
|
||||
|
||||
const setDirDown = useCallback(() => {
|
||||
setResizeDirection('down');
|
||||
}, []);
|
||||
|
||||
const setDirDownRight = useCallback(() => {
|
||||
setResizeDirection('down-right');
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<Flex p={2}>
|
||||
<Grid gridTemplateRows="1fr 1fr 1fr" gridTemplateColumns="1fr 1fr 1fr" gap={2}>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirUpLeft}
|
||||
aria-label="up-left"
|
||||
icon={<PiArrowUpLeftBold />}
|
||||
variant={resizeDirection === 'up-left' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirUp}
|
||||
aria-label="up"
|
||||
icon={<PiArrowUpBold />}
|
||||
variant={resizeDirection === 'up' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirUpRight}
|
||||
aria-label="up-right"
|
||||
icon={<PiArrowUpRightBold />}
|
||||
variant={resizeDirection === 'up-right' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirLeft}
|
||||
aria-label="left"
|
||||
icon={<PiArrowLeftBold />}
|
||||
variant={resizeDirection === 'left' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirCenterOut}
|
||||
aria-label="center-out"
|
||||
icon={<PiSquareBold />}
|
||||
variant={resizeDirection === 'center-out' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirRight}
|
||||
aria-label="right"
|
||||
icon={<PiArrowRightBold />}
|
||||
variant={resizeDirection === 'right' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirDownLeft}
|
||||
aria-label="down-left"
|
||||
icon={<PiArrowDownLeftBold />}
|
||||
variant={resizeDirection === 'down-left' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirDown}
|
||||
aria-label="down"
|
||||
icon={<PiArrowDownBold />}
|
||||
variant={resizeDirection === 'down' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirDownRight}
|
||||
aria-label="down-right"
|
||||
icon={<PiArrowDownRightBold />}
|
||||
variant={resizeDirection === 'down-right' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
</Grid>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasResizer.displayName = 'CanvasResizer';
|
||||
@@ -0,0 +1,95 @@
|
||||
import { useDndContext } from '@dnd-kit/core';
|
||||
import { Box, Spacer, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useScopeOnFocus } from 'common/hooks/interactionScopes';
|
||||
import { CanvasPanelContent } from 'features/controlLayers/components/CanvasPanelContent';
|
||||
import { CanvasSendToToggle } from 'features/controlLayers/components/CanvasSendToToggle';
|
||||
import { selectSendToCanvas } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectEntityCount } from 'features/controlLayers/store/selectors';
|
||||
import GalleryPanelContent from 'features/gallery/components/GalleryPanelContent';
|
||||
import { memo, useCallback, useMemo, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const CanvasRightPanelContent = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const [tab, setTab] = useState(0);
|
||||
useScopeOnFocus('gallery', ref);
|
||||
|
||||
return (
|
||||
<Tabs index={tab} onChange={setTab} w="full" h="full" display="flex" flexDir="column">
|
||||
<TabList alignItems="center">
|
||||
<PanelTabs setTab={setTab} />
|
||||
<Spacer />
|
||||
<CanvasSendToToggle />
|
||||
</TabList>
|
||||
<TabPanels w="full" h="full">
|
||||
<TabPanel w="full" h="full" p={0} pt={3}>
|
||||
<CanvasPanelContent />
|
||||
</TabPanel>
|
||||
<TabPanel w="full" h="full" p={0} pt={3}>
|
||||
<GalleryPanelContent />
|
||||
</TabPanel>
|
||||
</TabPanels>
|
||||
</Tabs>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasRightPanelContent.displayName = 'CanvasRightPanelContent';
|
||||
|
||||
const PanelTabs = memo(({ setTab }: { setTab: (val: number) => void }) => {
|
||||
const { t } = useTranslation();
|
||||
const entityCount = useAppSelector(selectEntityCount);
|
||||
const sendToCanvas = useAppSelector(selectSendToCanvas);
|
||||
const tabTimeout = useRef<number | null>(null);
|
||||
const dndCtx = useDndContext();
|
||||
|
||||
const onOnMouseOverLayersTab = useCallback(() => {
|
||||
tabTimeout.current = window.setTimeout(() => {
|
||||
if (dndCtx.active) {
|
||||
setTab(0);
|
||||
}
|
||||
}, 300);
|
||||
}, [dndCtx.active, setTab]);
|
||||
|
||||
const onOnMouseOverGalleryTab = useCallback(() => {
|
||||
tabTimeout.current = window.setTimeout(() => {
|
||||
if (dndCtx.active) {
|
||||
setTab(1);
|
||||
}
|
||||
}, 300);
|
||||
}, [dndCtx.active, setTab]);
|
||||
|
||||
const onMouseOut = useCallback(() => {
|
||||
if (tabTimeout.current) {
|
||||
clearTimeout(tabTimeout.current);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const layersTabLabel = useMemo(() => {
|
||||
if (entityCount === 0) {
|
||||
return t('controlLayers.layer_other');
|
||||
}
|
||||
return `${t('controlLayers.layer_other')} (${entityCount})`;
|
||||
}, [entityCount, t]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Tab position="relative" onMouseOver={onOnMouseOverLayersTab} onMouseOut={onMouseOut} w={32}>
|
||||
<Box as="span" w="full">
|
||||
{layersTabLabel}
|
||||
</Box>
|
||||
{sendToCanvas && (
|
||||
<Box position="absolute" top={2} right={2} h={2} w={2} bg="invokeYellow.300" borderRadius="full" />
|
||||
)}
|
||||
</Tab>
|
||||
<Tab position="relative" onMouseOver={onOnMouseOverGalleryTab} onMouseOut={onMouseOut}>
|
||||
{t('gallery.gallery')}
|
||||
{!sendToCanvas && (
|
||||
<Box position="absolute" top={2} right={2} h={2} w={2} bg="invokeYellow.300" borderRadius="full" />
|
||||
)}
|
||||
</Tab>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
PanelTabs.displayName = 'PanelTabs';
|
||||
@@ -0,0 +1,77 @@
|
||||
import {
|
||||
Button,
|
||||
Flex,
|
||||
Icon,
|
||||
Popover,
|
||||
PopoverArrow,
|
||||
PopoverBody,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectSendToCanvas, settingsSendToCanvasChanged } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCaretDownBold, PiCheckBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasSendToToggle = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const sendToCanvas = useAppSelector(selectSendToCanvas);
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const enableSendToCanvas = useCallback(() => {
|
||||
dispatch(settingsSendToCanvasChanged(true));
|
||||
}, [dispatch]);
|
||||
|
||||
const disableSendToCanvas = useCallback(() => {
|
||||
dispatch(settingsSendToCanvasChanged(false));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="link"
|
||||
data-testid="toggle-viewer-menu-button"
|
||||
pointerEvents="auto"
|
||||
rightIcon={<PiCaretDownBold />}
|
||||
>
|
||||
{sendToCanvas ? t('controlLayers.sendingToCanvas') : t('controlLayers.sendingToGallery')}
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent p={2} pointerEvents="auto">
|
||||
<PopoverArrow />
|
||||
<PopoverBody>
|
||||
<Flex flexDir="column">
|
||||
<Button onClick={disableSendToCanvas} variant="ghost" h="auto" w="auto" p={2}>
|
||||
<Flex gap={2} w="full">
|
||||
<Icon as={PiCheckBold} visibility={!sendToCanvas ? 'visible' : 'hidden'} />
|
||||
<Flex flexDir="column" gap={2} alignItems="flex-start">
|
||||
<Text fontWeight="semibold">{t('controlLayers.sendToGallery')}</Text>
|
||||
<Text fontWeight="normal" variant="subtext">
|
||||
{t('controlLayers.sendToGalleryDesc')}
|
||||
</Text>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Button>
|
||||
<Button onClick={enableSendToCanvas} variant="ghost" h="auto" w="auto" p={2}>
|
||||
<Flex gap={2} w="full">
|
||||
<Icon as={PiCheckBold} visibility={sendToCanvas ? 'visible' : 'hidden'} />
|
||||
<Flex flexDir="column" gap={2} alignItems="flex-start">
|
||||
<Text fontWeight="semibold">{t('controlLayers.sendToCanvas')}</Text>
|
||||
<Text fontWeight="normal" variant="subtext">
|
||||
{t('controlLayers.sendToCanvasDesc')}
|
||||
</Text>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Button>
|
||||
</Flex>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasSendToToggle.displayName = 'CanvasSendToToggle';
|
||||
@@ -0,0 +1,104 @@
|
||||
import { ContextMenu, Flex, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useScopeOnFocus } from 'common/hooks/interactionScopes';
|
||||
import { CanvasContextMenuItems } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuItems';
|
||||
import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea';
|
||||
import { Filter } from 'features/controlLayers/components/Filters/Filter';
|
||||
import { CanvasHUD } from 'features/controlLayers/components/HUD/CanvasHUD';
|
||||
import { CanvasSelectedEntityStatusAlert } from 'features/controlLayers/components/HUD/CanvasSelectedEntityStatusAlert';
|
||||
import { InvokeCanvasComponent } from 'features/controlLayers/components/InvokeCanvasComponent';
|
||||
import { StagingAreaIsStagingGate } from 'features/controlLayers/components/StagingArea/StagingAreaIsStagingGate';
|
||||
import { StagingAreaToolbar } from 'features/controlLayers/components/StagingArea/StagingAreaToolbar';
|
||||
import { CanvasToolbar } from 'features/controlLayers/components/Toolbar/CanvasToolbar';
|
||||
import { Transform } from 'features/controlLayers/components/Transform/Transform';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { TRANSPARENCY_CHECKERBOARD_PATTERN_DATAURL } from 'features/controlLayers/konva/patterns/transparency-checkerboard-pattern';
|
||||
import { selectDynamicGrid, selectShowHUD } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
|
||||
export const CanvasTabContent = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const dynamicGrid = useAppSelector(selectDynamicGrid);
|
||||
const showHUD = useAppSelector(selectShowHUD);
|
||||
|
||||
const renderMenu = useCallback(() => {
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<MenuList>
|
||||
<CanvasContextMenuItems />
|
||||
</MenuList>
|
||||
</CanvasManagerProviderGate>
|
||||
);
|
||||
}, []);
|
||||
|
||||
useScopeOnFocus('canvas', ref);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
tabIndex={-1}
|
||||
ref={ref}
|
||||
borderRadius="base"
|
||||
position="relative"
|
||||
flexDirection="column"
|
||||
height="full"
|
||||
width="full"
|
||||
gap={2}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<CanvasToolbar />
|
||||
<ContextMenu<HTMLDivElement> renderMenu={renderMenu}>
|
||||
{(ref) => (
|
||||
<Flex
|
||||
ref={ref}
|
||||
position="relative"
|
||||
w="full"
|
||||
h="full"
|
||||
bg={dynamicGrid ? 'base.850' : 'base.900'}
|
||||
borderRadius="base"
|
||||
>
|
||||
{!dynamicGrid && (
|
||||
<Flex
|
||||
position="absolute"
|
||||
borderRadius="base"
|
||||
bgImage={TRANSPARENCY_CHECKERBOARD_PATTERN_DATAURL}
|
||||
top={0}
|
||||
right={0}
|
||||
bottom={0}
|
||||
left={0}
|
||||
opacity={0.1}
|
||||
/>
|
||||
)}
|
||||
<InvokeCanvasComponent />
|
||||
<CanvasManagerProviderGate>
|
||||
{showHUD && (
|
||||
<Flex position="absolute" top={1} insetInlineStart={1} pointerEvents="none">
|
||||
<CanvasHUD />
|
||||
</Flex>
|
||||
)}
|
||||
<Flex position="absolute" top={1} insetInlineEnd={1} pointerEvents="none">
|
||||
<CanvasSelectedEntityStatusAlert />
|
||||
</Flex>
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
)}
|
||||
</ContextMenu>
|
||||
<Flex position="absolute" bottom={4} gap={2} align="center" justify="center">
|
||||
<CanvasManagerProviderGate>
|
||||
<StagingAreaIsStagingGate>
|
||||
<StagingAreaToolbar />
|
||||
</StagingAreaIsStagingGate>
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
<Flex position="absolute" bottom={4}>
|
||||
<CanvasManagerProviderGate>
|
||||
<Filter />
|
||||
<Transform />
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
<CanvasDropArea />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasTabContent.displayName = 'CanvasTabContent';
|
||||
@@ -1,14 +1,13 @@
|
||||
import { Spacer } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
|
||||
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
|
||||
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
|
||||
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { ControlLayerBadges } from 'features/controlLayers/components/ControlLayer/ControlLayerBadges';
|
||||
import { ControlLayerControlAdapter } from 'features/controlLayers/components/ControlLayer/ControlLayerControlAdapter';
|
||||
import { EntityLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { ControlLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
@@ -22,21 +21,20 @@ export const ControlLayer = memo(({ id }: Props) => {
|
||||
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<EntityLayerAdapterGate>
|
||||
<ControlLayerAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<ControlLayerBadges />
|
||||
<CanvasEntityIsLockedToggle />
|
||||
<CanvasEntityEnabledToggle />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<CanvasEntitySettingsWrapper>
|
||||
<ControlLayerControlAdapter />
|
||||
</CanvasEntitySettingsWrapper>
|
||||
</CanvasEntityContainer>
|
||||
</EntityLayerAdapterGate>
|
||||
</ControlLayerAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,21 +1,35 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { createMemoizedAppSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
|
||||
import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { ControlLayerControlAdapterControlMode } from 'features/controlLayers/components/ControlLayer/ControlLayerControlAdapterControlMode';
|
||||
import { ControlLayerControlAdapterModel } from 'features/controlLayers/components/ControlLayer/ControlLayerControlAdapterModel';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useControlLayerControlAdapter } from 'features/controlLayers/hooks/useLayerControlAdapter';
|
||||
import {
|
||||
controlLayerBeginEndStepPctChanged,
|
||||
controlLayerControlModeChanged,
|
||||
controlLayerModelChanged,
|
||||
controlLayerWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import type { ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type { ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
const useControlLayerControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) => {
|
||||
const selectControlAdapter = useMemo(
|
||||
() =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return layer.controlAdapter;
|
||||
}),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const controlAdapter = useAppSelector(selectControlAdapter);
|
||||
return controlAdapter;
|
||||
};
|
||||
|
||||
export const ControlLayerControlAdapter = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useControlNetAndT2IAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
@@ -17,8 +14,6 @@ type Props = {
|
||||
|
||||
export const ControlLayerControlAdapterModel = memo(({ modelKey, onChange: onChangeModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const canvasManager = useCanvasManager();
|
||||
const currentBaseModel = useAppSelector(selectBase);
|
||||
const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
@@ -29,29 +24,8 @@ export const ControlLayerControlAdapterModel = memo(({ modelKey, onChange: onCha
|
||||
return;
|
||||
}
|
||||
onChangeModel(modelConfig);
|
||||
|
||||
// When we set the model for the first time, we'll set the default filter settings and open the filter popup
|
||||
|
||||
if (modelKey) {
|
||||
// If there is already a model key, this is not the first time we're setting the model
|
||||
return;
|
||||
}
|
||||
|
||||
// Open the filter popup by setting this entity as the filtering entity
|
||||
if (!canvasManager.filter.$adapter.get()) {
|
||||
// Update the filter, preferring the model's default
|
||||
if (isFilterType(modelConfig.default_settings?.preprocessor)) {
|
||||
canvasManager.filter.$config.set(
|
||||
IMAGE_FILTERS[modelConfig.default_settings.preprocessor].buildDefaults(modelConfig.base)
|
||||
);
|
||||
} else {
|
||||
canvasManager.filter.$config.set(IMAGE_FILTERS.canny_image_processor.buildDefaults(modelConfig.base));
|
||||
}
|
||||
canvasManager.filter.initialize(entityIdentifier);
|
||||
canvasManager.filter.previewFilter();
|
||||
}
|
||||
},
|
||||
[canvasManager.filter, entityIdentifier, modelKey, onChangeModel]
|
||||
[onChangeModel]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { controlLayerConvertedToRasterLayer } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -9,6 +10,7 @@ import { PiLightningBold } from 'react-icons/pi';
|
||||
export const ControlLayerMenuItemsControlToRaster = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
|
||||
const convertControlLayerToRasterLayer = useCallback(() => {
|
||||
@@ -16,7 +18,7 @@ export const ControlLayerMenuItemsControlToRaster = memo(() => {
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={convertControlLayerToRasterLayer} icon={<PiLightningBold />}>
|
||||
<MenuItem onClick={convertControlLayerToRasterLayer} icon={<PiLightningBold />} isDisabled={isBusy}>
|
||||
{t('controlLayers.convertToRasterLayer')}
|
||||
</MenuItem>
|
||||
);
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import type { Meta, StoryObj } from '@storybook/react';
|
||||
import { CanvasEditor } from 'features/controlLayers/components/ControlLayersEditor';
|
||||
|
||||
const meta: Meta<typeof CanvasEditor> = {
|
||||
title: 'Feature/ControlLayers',
|
||||
tags: ['autodocs'],
|
||||
component: CanvasEditor,
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof CanvasEditor>;
|
||||
|
||||
const Component = () => {
|
||||
return (
|
||||
<Flex w={1500} h={1500}>
|
||||
<CanvasEditor />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
render: Component,
|
||||
};
|
||||
@@ -1,51 +0,0 @@
|
||||
/* eslint-disable i18next/no-literal-string */
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useScopeOnFocus } from 'common/hooks/interactionScopes';
|
||||
import { CanvasDropArea } from 'features/controlLayers/components/CanvasDropArea';
|
||||
import { ControlLayersToolbar } from 'features/controlLayers/components/ControlLayersToolbar';
|
||||
import { Filter } from 'features/controlLayers/components/Filters/Filter';
|
||||
import { StageComponent } from 'features/controlLayers/components/StageComponent';
|
||||
import { StagingAreaIsStagingGate } from 'features/controlLayers/components/StagingArea/StagingAreaIsStagingGate';
|
||||
import { StagingAreaToolbar } from 'features/controlLayers/components/StagingArea/StagingAreaToolbar';
|
||||
import { Transform } from 'features/controlLayers/components/Transform';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { memo, useRef } from 'react';
|
||||
|
||||
export const CanvasEditor = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
useScopeOnFocus('canvas', ref);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
tabIndex={-1}
|
||||
ref={ref}
|
||||
borderRadius="base"
|
||||
position="relative"
|
||||
flexDirection="column"
|
||||
height="full"
|
||||
width="full"
|
||||
gap={2}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<ControlLayersToolbar />
|
||||
<StageComponent />
|
||||
<Flex position="absolute" bottom={8} gap={2} align="center" justify="center">
|
||||
<CanvasManagerProviderGate>
|
||||
<StagingAreaIsStagingGate>
|
||||
<StagingAreaToolbar />
|
||||
</StagingAreaIsStagingGate>
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
<Flex position="absolute" bottom={8}>
|
||||
<CanvasManagerProviderGate>
|
||||
<Filter />
|
||||
<Transform />
|
||||
</CanvasManagerProviderGate>
|
||||
</Flex>
|
||||
<CanvasDropArea />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEditor.displayName = 'CanvasEditor';
|
||||
@@ -1,38 +0,0 @@
|
||||
/* eslint-disable i18next/no-literal-string */
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { CanvasModeSwitcher } from 'features/controlLayers/components/CanvasModeSwitcher';
|
||||
import { CanvasResetViewButton } from 'features/controlLayers/components/CanvasResetViewButton';
|
||||
import { CanvasScale } from 'features/controlLayers/components/CanvasScale';
|
||||
import { CanvasSettingsPopover } from 'features/controlLayers/components/Settings/CanvasSettingsPopover';
|
||||
import { ToolChooser } from 'features/controlLayers/components/Tool/ToolChooser';
|
||||
import { ToolFillColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
|
||||
import { UndoRedoButtonGroup } from 'features/controlLayers/components/UndoRedoButtonGroup';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton';
|
||||
import { ViewerToggleMenu } from 'features/gallery/components/ImageViewer/ViewerToggleMenu';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const ControlLayersToolbar = memo(() => {
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<Flex w="full" gap={2} alignItems="center">
|
||||
<ToggleProgressButton />
|
||||
<ToolChooser />
|
||||
<Spacer />
|
||||
<ToolSettings />
|
||||
<Spacer />
|
||||
<CanvasScale />
|
||||
<CanvasResetViewButton />
|
||||
<Spacer />
|
||||
<ToolFillColorPicker />
|
||||
<CanvasModeSwitcher />
|
||||
<UndoRedoButtonGroup />
|
||||
<CanvasSettingsPopover />
|
||||
<ViewerToggleMenu />
|
||||
</Flex>
|
||||
</CanvasManagerProviderGate>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayersToolbar.displayName = 'ControlLayersToolbar';
|
||||
@@ -1,37 +1,49 @@
|
||||
import { Button, ButtonGroup, Flex, Heading } from '@invoke-ai/ui-library';
|
||||
import { Button, ButtonGroup, Flex, FormControl, FormLabel, Heading, Spacer, Switch } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { FilterSettings } from 'features/controlLayers/components/Filters/FilterSettings';
|
||||
import { FilterTypeSelect } from 'features/controlLayers/components/Filters/FilterTypeSelect';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
|
||||
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
|
||||
import {
|
||||
selectAutoProcessFilter,
|
||||
settingsAutoProcessFilterToggled,
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { type FilterConfig, IMAGE_FILTERS } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCheckBold, PiShootingStarBold, PiXBold } from 'react-icons/pi';
|
||||
import { PiArrowsCounterClockwiseBold, PiCheckBold, PiShootingStarBold, PiXBold } from 'react-icons/pi';
|
||||
|
||||
export const Filter = memo(() => {
|
||||
const FilterBox = memo(({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
|
||||
const { t } = useTranslation();
|
||||
const canvasManager = useCanvasManager();
|
||||
const config = useStore(canvasManager.filter.$config);
|
||||
const isFiltering = useStore(canvasManager.filter.$isFiltering);
|
||||
const isProcessing = useStore(canvasManager.filter.$isProcessing);
|
||||
const dispatch = useAppDispatch();
|
||||
const config = useStore(adapter.filterer.$filterConfig);
|
||||
const isProcessing = useStore(adapter.filterer.$isProcessing);
|
||||
const hasProcessed = useStore(adapter.filterer.$hasProcessed);
|
||||
const autoProcessFilter = useAppSelector(selectAutoProcessFilter);
|
||||
|
||||
const onChangeFilterConfig = useCallback(
|
||||
(filterConfig: FilterConfig) => {
|
||||
canvasManager.filter.$config.set(filterConfig);
|
||||
adapter.filterer.$filterConfig.set(filterConfig);
|
||||
},
|
||||
[canvasManager.filter.$config]
|
||||
[adapter.filterer.$filterConfig]
|
||||
);
|
||||
|
||||
const onChangeFilterType = useCallback(
|
||||
(filterType: FilterConfig['type']) => {
|
||||
canvasManager.filter.$config.set(IMAGE_FILTERS[filterType].buildDefaults());
|
||||
adapter.filterer.$filterConfig.set(IMAGE_FILTERS[filterType].buildDefaults());
|
||||
},
|
||||
[canvasManager.filter.$config]
|
||||
[adapter.filterer.$filterConfig]
|
||||
);
|
||||
|
||||
if (!isFiltering) {
|
||||
return null;
|
||||
}
|
||||
const onChangeAutoProcessFilter = useCallback(() => {
|
||||
dispatch(settingsAutoProcessFilterToggled());
|
||||
}, [dispatch]);
|
||||
|
||||
const isValid = useMemo(() => {
|
||||
return IMAGE_FILTERS[config.type].validateConfig?.(config as never) ?? true;
|
||||
}, [config]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
@@ -46,36 +58,56 @@ export const Filter = memo(() => {
|
||||
transitionProperty="height"
|
||||
transitionDuration="normal"
|
||||
>
|
||||
<Heading size="md" color="base.300" userSelect="none">
|
||||
{t('controlLayers.filter.filter')}
|
||||
</Heading>
|
||||
<Flex w="full">
|
||||
<Heading size="md" color="base.300" userSelect="none">
|
||||
{t('controlLayers.filter.filter')}
|
||||
</Heading>
|
||||
<Spacer />
|
||||
<FormControl w="min-content">
|
||||
<FormLabel m={0}>{t('controlLayers.filter.autoProcess')}</FormLabel>
|
||||
<Switch size="sm" isChecked={autoProcessFilter} onChange={onChangeAutoProcessFilter} />
|
||||
</FormControl>
|
||||
</Flex>
|
||||
<FilterTypeSelect filterType={config.type} onChange={onChangeFilterType} />
|
||||
<FilterSettings filterConfig={config} onChange={onChangeFilterConfig} />
|
||||
<ButtonGroup isAttached={false} size="sm" alignSelf="self-end">
|
||||
<ButtonGroup isAttached={false} size="sm" w="full">
|
||||
<Button
|
||||
variant="ghost"
|
||||
leftIcon={<PiShootingStarBold />}
|
||||
onClick={canvasManager.filter.previewFilter}
|
||||
onClick={adapter.filterer.process}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.preview')}
|
||||
loadingText={t('controlLayers.filter.process')}
|
||||
isDisabled={!isValid}
|
||||
>
|
||||
{t('controlLayers.filter.preview')}
|
||||
{t('controlLayers.filter.process')}
|
||||
</Button>
|
||||
<Spacer />
|
||||
<Button
|
||||
leftIcon={<PiArrowsCounterClockwiseBold />}
|
||||
onClick={adapter.filterer.reset}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.reset')}
|
||||
variant="ghost"
|
||||
>
|
||||
{t('controlLayers.filter.reset')}
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
leftIcon={<PiCheckBold />}
|
||||
onClick={canvasManager.filter.applyFilter}
|
||||
onClick={adapter.filterer.apply}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.apply')}
|
||||
isDisabled={!isValid || !hasProcessed}
|
||||
>
|
||||
{t('controlLayers.filter.apply')}
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
leftIcon={<PiXBold />}
|
||||
onClick={canvasManager.filter.cancelFilter}
|
||||
onClick={adapter.filterer.cancel}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.cancel')}
|
||||
isDisabled={!isValid}
|
||||
>
|
||||
{t('controlLayers.filter.cancel')}
|
||||
</Button>
|
||||
@@ -84,4 +116,16 @@ export const Filter = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
FilterBox.displayName = 'FilterBox';
|
||||
|
||||
export const Filter = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const adapter = useStore(canvasManager.stateApi.$filteringAdapter);
|
||||
if (!adapter) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <FilterBox adapter={adapter} />;
|
||||
};
|
||||
|
||||
Filter.displayName = 'Filter';
|
||||
|
||||
@@ -10,6 +10,7 @@ import { FilterMediapipeFace } from 'features/controlLayers/components/Filters/F
|
||||
import { FilterMidasDepth } from 'features/controlLayers/components/Filters/FilterMidasDepth';
|
||||
import { FilterMlsdImage } from 'features/controlLayers/components/Filters/FilterMlsdImage';
|
||||
import { FilterPidi } from 'features/controlLayers/components/Filters/FilterPidi';
|
||||
import { FilterSpandrel } from 'features/controlLayers/components/Filters/FilterSpandrel';
|
||||
import type { FilterConfig } from 'features/controlLayers/store/types';
|
||||
import { IMAGE_FILTERS } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
@@ -64,6 +65,10 @@ export const FilterSettings = memo(({ filterConfig, onChange }: Props) => {
|
||||
return <FilterPidi config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
if (filterConfig.type === 'spandrel_filter') {
|
||||
return <FilterSpandrel config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<IAINoContentFallback
|
||||
py={4}
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
import {
|
||||
Box,
|
||||
Combobox,
|
||||
CompositeNumberInput,
|
||||
CompositeSlider,
|
||||
Flex,
|
||||
FormControl,
|
||||
FormHelperText,
|
||||
FormLabel,
|
||||
Switch,
|
||||
Tooltip,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useModelCombobox } from 'common/hooks/useModelCombobox';
|
||||
import type { SpandrelFilterConfig } from 'features/controlLayers/store/types';
|
||||
import { IMAGE_FILTERS } from 'features/controlLayers/store/types';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { useCallback, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useSpandrelImageToImageModels } from 'services/api/hooks/modelsByType';
|
||||
import type { SpandrelImageToImageModelConfig } from 'services/api/types';
|
||||
|
||||
import type { FilterComponentProps } from './types';
|
||||
|
||||
type Props = FilterComponentProps<SpandrelFilterConfig>;
|
||||
const DEFAULTS = IMAGE_FILTERS['spandrel_filter'].buildDefaults();
|
||||
|
||||
export const FilterSpandrel = ({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const [modelConfigs, { isLoading }] = useSpandrelImageToImageModels();
|
||||
|
||||
const tooltipLabel = useMemo(() => {
|
||||
if (!modelConfigs.length || !config.model) {
|
||||
return;
|
||||
}
|
||||
return modelConfigs.find((m) => m.key === config.model?.key)?.description;
|
||||
}, [modelConfigs, config.model]);
|
||||
|
||||
const _onChange = useCallback(
|
||||
(v: SpandrelImageToImageModelConfig | null) => {
|
||||
onChange({ ...config, model: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const {
|
||||
options,
|
||||
value,
|
||||
onChange: onChangeModel,
|
||||
placeholder,
|
||||
noOptionsMessage,
|
||||
} = useModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChange,
|
||||
selectedModel: config.model,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
const onScaleChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, scale: v });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
const onAutoscaleChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, autoScale: e.target.checked });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!config.model) {
|
||||
onChangeModel(options[0] ?? null);
|
||||
}
|
||||
}, [config.model, onChangeModel, options]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormControl w="full" orientation="vertical">
|
||||
<Flex w="full" alignItems="center">
|
||||
<FormLabel m={0} flexGrow={1}>
|
||||
{t('controlLayers.filter.spandrel.paramAutoScale')}
|
||||
</FormLabel>
|
||||
<Switch size="sm" isChecked={config.autoScale} onChange={onAutoscaleChanged} />
|
||||
</Flex>
|
||||
<FormHelperText>{t('controlLayers.filter.spandrel.paramAutoScaleDesc')}</FormHelperText>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!config.autoScale}>
|
||||
<FormLabel m={0}>{t('controlLayers.filter.spandrel.paramScale')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.scale}
|
||||
onChange={onScaleChanged}
|
||||
defaultValue={DEFAULTS.scale}
|
||||
min={1}
|
||||
max={16}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.scale}
|
||||
onChange={onScaleChanged}
|
||||
defaultValue={DEFAULTS.scale}
|
||||
min={1}
|
||||
max={16}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlLayers.filter.spandrel.paramModel')}</FormLabel>
|
||||
<Tooltip label={tooltipLabel}>
|
||||
<Box w="full">
|
||||
<Combobox
|
||||
value={value}
|
||||
placeholder={placeholder}
|
||||
options={options}
|
||||
onChange={onChangeModel}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
isDisabled={options.length === 0}
|
||||
/>
|
||||
</Box>
|
||||
</Tooltip>
|
||||
</FormControl>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
FilterSpandrel.displayName = 'FilterSpandrel';
|
||||
@@ -0,0 +1,24 @@
|
||||
import { Grid } from '@invoke-ai/ui-library';
|
||||
import { CanvasHUDItemBbox } from 'features/controlLayers/components/HUD/CanvasHUDItemBbox';
|
||||
import { CanvasHUDItemScaledBbox } from 'features/controlLayers/components/HUD/CanvasHUDItemScaledBbox';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const CanvasHUD = memo(() => {
|
||||
return (
|
||||
<Grid
|
||||
bg="base.900"
|
||||
borderBottomEndRadius="base"
|
||||
p={2}
|
||||
gap={1}
|
||||
borderRadius="base"
|
||||
templateColumns="1fr 1fr"
|
||||
opacity={0.6}
|
||||
minW={64}
|
||||
>
|
||||
<CanvasHUDItemBbox />
|
||||
<CanvasHUDItemScaledBbox />
|
||||
</Grid>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasHUD.displayName = 'CanvasHUD';
|
||||
@@ -0,0 +1,26 @@
|
||||
import { GridItem, Text } from '@invoke-ai/ui-library';
|
||||
import type { Property } from 'csstype';
|
||||
import { memo } from 'react';
|
||||
|
||||
type Props = {
|
||||
label: string;
|
||||
value: string | number;
|
||||
color?: Property.Color;
|
||||
};
|
||||
|
||||
export const CanvasHUDItem = memo(({ label, value, color }: Props) => {
|
||||
return (
|
||||
<>
|
||||
<GridItem>
|
||||
<Text textAlign="end">{label}: </Text>
|
||||
</GridItem>
|
||||
<GridItem fontWeight="semibold">
|
||||
<Text textAlign="end" color={color}>
|
||||
{value}
|
||||
</Text>
|
||||
</GridItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasHUDItem.displayName = 'CanvasHUDItem';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user