mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 12:47:57 -05:00
Compare commits
783 Commits
v4.2.9.dev
...
ryan/flux-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e0c2b13558 | ||
|
|
475e92e305 | ||
|
|
99a12fb604 | ||
|
|
1a12c48f6e | ||
|
|
f1c8fd16b5 | ||
|
|
f44a6a7fb2 | ||
|
|
da780c2243 | ||
|
|
742f6781d5 | ||
|
|
d2ffabf276 | ||
|
|
39e28d5e24 | ||
|
|
3f7c233f4d | ||
|
|
af75a8ea99 | ||
|
|
ddfe57e648 | ||
|
|
7e8dd9e8ed | ||
|
|
50f8d6db1b | ||
|
|
961fbe1ba4 | ||
|
|
7342d18734 | ||
|
|
554a4dc592 | ||
|
|
90e486c976 | ||
|
|
ceb5d50568 | ||
|
|
e4cca62a90 | ||
|
|
008f672e47 | ||
|
|
53ae86068c | ||
|
|
7de3c1943f | ||
|
|
a0f36dea31 | ||
|
|
69a2f8d53d | ||
|
|
3f2a61e0a6 | ||
|
|
f45b925bbf | ||
|
|
d2870a512d | ||
|
|
e1e5f970e6 | ||
|
|
0712684dc9 | ||
|
|
edae8a1617 | ||
|
|
9c1cf3e860 | ||
|
|
b6cef9d440 | ||
|
|
ebb92bee26 | ||
|
|
d6c553ca5e | ||
|
|
8b6512cc90 | ||
|
|
a6b998c125 | ||
|
|
5275782533 | ||
|
|
ede3bd8e64 | ||
|
|
da2583b894 | ||
|
|
9210970130 | ||
|
|
2a022a811c | ||
|
|
1a53e8dc5c | ||
|
|
4e12e23b69 | ||
|
|
fd56b35982 | ||
|
|
71e0abe653 | ||
|
|
56956ccf78 | ||
|
|
6d46d82028 | ||
|
|
3ed29a16a8 | ||
|
|
b67c369bdb | ||
|
|
e774b6879e | ||
|
|
e7d95c3724 | ||
|
|
1b65884dbe | ||
|
|
eff9ddc980 | ||
|
|
400ef8cdc3 | ||
|
|
b0ec3de40a | ||
|
|
b38b8bc90c | ||
|
|
a5ab5e5146 | ||
|
|
61fc30b345 | ||
|
|
46d0ba8ce2 | ||
|
|
5a3e0d76d9 | ||
|
|
5eb919f602 | ||
|
|
2301b388e8 | ||
|
|
dbf13999a0 | ||
|
|
a37592f9f3 | ||
|
|
60d4514fd8 | ||
|
|
9709da901c | ||
|
|
44df59e9e9 | ||
|
|
fbe80ceab2 | ||
|
|
a86822db4d | ||
|
|
f024cb1d05 | ||
|
|
6b2d900b54 | ||
|
|
3d6d5affb5 | ||
|
|
99b683fc1f | ||
|
|
d5cd50c3ea | ||
|
|
d7cde0fc23 | ||
|
|
541605edb4 | ||
|
|
0194344de2 | ||
|
|
34f3cb3116 | ||
|
|
5ab4818eb6 | ||
|
|
60d2541934 | ||
|
|
8d87549ebe | ||
|
|
4cb5854990 | ||
|
|
6f4d3d0395 | ||
|
|
93e9e64b3a | ||
|
|
2bdfc340aa | ||
|
|
2a1bc3e044 | ||
|
|
b4d006d14b | ||
|
|
464603e0ea | ||
|
|
864e471e5a | ||
|
|
670e054fe0 | ||
|
|
0abd81ac80 | ||
|
|
1870daffa1 | ||
|
|
d6d27a82a6 | ||
|
|
ff0d2fcc92 | ||
|
|
a2969816fa | ||
|
|
6b20d1564d | ||
|
|
bf484bc90e | ||
|
|
fc58d34d25 | ||
|
|
c15793b794 | ||
|
|
1e32be827e | ||
|
|
8422908b70 | ||
|
|
d10ff59f9c | ||
|
|
eab1f50a6f | ||
|
|
6e346884e3 | ||
|
|
1c9fd1f19a | ||
|
|
28385d06d1 | ||
|
|
12e6f1be89 | ||
|
|
e1a66e22e9 | ||
|
|
b3569e5c0d | ||
|
|
c64693fffd | ||
|
|
ce9f17726f | ||
|
|
5f62dc6699 | ||
|
|
07cb12eef7 | ||
|
|
9e9f465552 | ||
|
|
e148cc810b | ||
|
|
160f54d1ea | ||
|
|
480856a528 | ||
|
|
97aad2ab2f | ||
|
|
2b93dbd96a | ||
|
|
ce4c79a8d9 | ||
|
|
151b4efd3f | ||
|
|
16806e5d8d | ||
|
|
8e01d295db | ||
|
|
fd00e40ca7 | ||
|
|
029158ef3a | ||
|
|
96b74f4a79 | ||
|
|
b1e85f8b60 | ||
|
|
aa418f0aba | ||
|
|
8b747b022b | ||
|
|
ed4b5dfac3 | ||
|
|
b189937bc9 | ||
|
|
e176e48fa3 | ||
|
|
4931bdace5 | ||
|
|
c3b52a1853 | ||
|
|
b201541cb0 | ||
|
|
ba54a05efd | ||
|
|
6746870591 | ||
|
|
542844c6a3 | ||
|
|
4e5f4dadf2 | ||
|
|
1c15c2cb03 | ||
|
|
a041f1f388 | ||
|
|
d0b62c88c9 | ||
|
|
0fd4dd4513 | ||
|
|
4d3ed34232 | ||
|
|
74de22349d | ||
|
|
18ad271225 | ||
|
|
f92730080c | ||
|
|
f83b500645 | ||
|
|
1349e73a1a | ||
|
|
1fdb702557 | ||
|
|
4df531b7c0 | ||
|
|
a5a077964e | ||
|
|
944719cb9c | ||
|
|
92ae679314 | ||
|
|
771c3210b7 | ||
|
|
517946f66e | ||
|
|
eb09253b4e | ||
|
|
d81cd050ef | ||
|
|
ae5ed18f12 | ||
|
|
9026180533 | ||
|
|
437ea1109b | ||
|
|
95177a7389 | ||
|
|
d01af064f9 | ||
|
|
d50ee14d0b | ||
|
|
096e8deac5 | ||
|
|
e3b6ad7076 | ||
|
|
23c93509e0 | ||
|
|
f5eb6a06b5 | ||
|
|
db99b773bc | ||
|
|
daa0064947 | ||
|
|
ea062ab01a | ||
|
|
0c81a435f4 | ||
|
|
be7254dbf8 | ||
|
|
f49cee976d | ||
|
|
c246fc98b3 | ||
|
|
45e155d392 | ||
|
|
c82e17916f | ||
|
|
d9359bac23 | ||
|
|
ae65f89999 | ||
|
|
dd8b25260d | ||
|
|
4f76f5f848 | ||
|
|
3cdc5d869f | ||
|
|
19aa747b8f | ||
|
|
e20ae31d96 | ||
|
|
09fd415527 | ||
|
|
50768a957e | ||
|
|
3942e2a501 | ||
|
|
1a51842277 | ||
|
|
d001a36e14 | ||
|
|
8c65f60e7d | ||
|
|
d48ce8168e | ||
|
|
a955ab6bee | ||
|
|
81bfd4cc08 | ||
|
|
65f1944a93 | ||
|
|
b68845f43f | ||
|
|
bb994751ee | ||
|
|
f3aad7a494 | ||
|
|
80a69e0867 | ||
|
|
e2f2bdbbc2 | ||
|
|
ecda2b1681 | ||
|
|
d00e006784 | ||
|
|
9a6411f2c8 | ||
|
|
b05b0281af | ||
|
|
fb9bce6636 | ||
|
|
92eebd6aaf | ||
|
|
4484981c97 | ||
|
|
8cff753c81 | ||
|
|
b5681f1657 | ||
|
|
abb74fa664 | ||
|
|
ff88536b4a | ||
|
|
cb20c3b313 | ||
|
|
e8335fe7c4 | ||
|
|
749ff3eb71 | ||
|
|
6877db12c9 | ||
|
|
bbdbe36ada | ||
|
|
fca09d79cc | ||
|
|
719cc12d82 | ||
|
|
b8fed9a554 | ||
|
|
e0ea8b72a6 | ||
|
|
df41564c4c | ||
|
|
42ec07daad | ||
|
|
f33e3d63d5 | ||
|
|
451ee78f31 | ||
|
|
65ea492a75 | ||
|
|
afb35d9717 | ||
|
|
f6624322d8 | ||
|
|
00a4504406 | ||
|
|
2d737f824c | ||
|
|
174c136abc | ||
|
|
eb4dcf4453 | ||
|
|
df6ee189db | ||
|
|
d558aefcc7 | ||
|
|
2adffc84d4 | ||
|
|
5b1035d64c | ||
|
|
da48a5d533 | ||
|
|
f22366a427 | ||
|
|
7def35b1c0 | ||
|
|
ace87948dd | ||
|
|
04555f3916 | ||
|
|
dce1fb0d02 | ||
|
|
1617ee0e6f | ||
|
|
ee94ac3d32 | ||
|
|
10066b349b | ||
|
|
db8084fda1 | ||
|
|
f85536de22 | ||
|
|
7c47e7cfc3 | ||
|
|
37ee1ab35b | ||
|
|
488b682489 | ||
|
|
9601d99c01 | ||
|
|
56aa6a3114 | ||
|
|
4f60cec997 | ||
|
|
e012832386 | ||
|
|
b9ce1cfc16 | ||
|
|
17dd8bb37b | ||
|
|
459d59aac4 | ||
|
|
5cb26fac9f | ||
|
|
3b8c9bb34b | ||
|
|
f9d380107c | ||
|
|
f8b60da938 | ||
|
|
f5fd25d235 | ||
|
|
0097958f62 | ||
|
|
7f8e0c00d9 | ||
|
|
1ef5db035d | ||
|
|
89ff9b8b88 | ||
|
|
bac0ce1e69 | ||
|
|
04f78a99ad | ||
|
|
f4d8809758 | ||
|
|
06dd144c92 | ||
|
|
9b3ec12a3e | ||
|
|
82d50bfcc9 | ||
|
|
7563214a6d | ||
|
|
d99dbdfe7c | ||
|
|
d9fe16bab4 | ||
|
|
db50525442 | ||
|
|
e8190f4389 | ||
|
|
e5e59bf801 | ||
|
|
dd7d4da5e3 | ||
|
|
f394584dff | ||
|
|
1a06b5f1c6 | ||
|
|
9a089495a1 | ||
|
|
c5c8859463 | ||
|
|
6a6efc4574 | ||
|
|
e6bc861ebf | ||
|
|
1499cea82e | ||
|
|
f55282f9bf | ||
|
|
452784068b | ||
|
|
e6b841126b | ||
|
|
31ce4f9283 | ||
|
|
60b3dc846e | ||
|
|
7bb2dc0075 | ||
|
|
7f437adaba | ||
|
|
5a1309cf6e | ||
|
|
f56648be3c | ||
|
|
15735dda6e | ||
|
|
1f1777f7a6 | ||
|
|
167c8ba4ec | ||
|
|
cc7ae42baa | ||
|
|
5fe844c5d9 | ||
|
|
23248dad90 | ||
|
|
caeefdf2ed | ||
|
|
d40d6291a0 | ||
|
|
fd38668f55 | ||
|
|
583654d176 | ||
|
|
59cba2f860 | ||
|
|
772f0b80a1 | ||
|
|
8d8272ee53 | ||
|
|
fef1dddd50 | ||
|
|
725da6e875 | ||
|
|
257b18230a | ||
|
|
a8de6406c5 | ||
|
|
dd2e68bf00 | ||
|
|
7825e325df | ||
|
|
33b3268f83 | ||
|
|
3dbd8212aa | ||
|
|
3694f337bc | ||
|
|
ab77997746 | ||
|
|
5fa7910664 | ||
|
|
8dbb473fde | ||
|
|
4a1240a709 | ||
|
|
664987f2aa | ||
|
|
9e391ec431 | ||
|
|
06944b3ea7 | ||
|
|
f48b949aa8 | ||
|
|
b4166083c5 | ||
|
|
56d53b18f0 | ||
|
|
20961215e7 | ||
|
|
49c75ca381 | ||
|
|
cf6751cc06 | ||
|
|
6cc828b628 | ||
|
|
ddeffb3ef1 | ||
|
|
95b606683f | ||
|
|
0598b89738 | ||
|
|
c2be63a811 | ||
|
|
639304197b | ||
|
|
c4a85cf1bf | ||
|
|
cff80524a8 | ||
|
|
2d1b13bde7 | ||
|
|
220b78d0e7 | ||
|
|
efb97c301e | ||
|
|
cd865347eb | ||
|
|
54ccb9846d | ||
|
|
22a2849683 | ||
|
|
2bae67cfe9 | ||
|
|
de8e8d9f68 | ||
|
|
eced34a72a | ||
|
|
591e8162c1 | ||
|
|
f4998bc308 | ||
|
|
39a49fb585 | ||
|
|
2b9073da36 | ||
|
|
d3aa54f7bd | ||
|
|
f0a959f6fe | ||
|
|
9a5b702013 | ||
|
|
018807d678 | ||
|
|
cf5e8bf4ea | ||
|
|
03ae65863c | ||
|
|
3b7b6d6404 | ||
|
|
e9171c80f6 | ||
|
|
0fd3881b3a | ||
|
|
01ac4c3b3e | ||
|
|
f1fcc98a09 | ||
|
|
b2823569f0 | ||
|
|
3bd98e62de | ||
|
|
318672be53 | ||
|
|
c5a05691fe | ||
|
|
04fcb9e8e6 | ||
|
|
a1534b6503 | ||
|
|
0aa4b1575d | ||
|
|
85eb6ad616 | ||
|
|
9fd2841df0 | ||
|
|
bd23dcd751 | ||
|
|
4d480093d9 | ||
|
|
bb0d2b6ce2 | ||
|
|
0d863a876b | ||
|
|
3fadfd3bbb | ||
|
|
401152f16f | ||
|
|
b69350e9ee | ||
|
|
7b429e0a54 | ||
|
|
3d23fe1fe0 | ||
|
|
d4117f5595 | ||
|
|
2686210887 | ||
|
|
9a804b7986 | ||
|
|
ef0699310d | ||
|
|
afa2da3d2d | ||
|
|
ac1132b5bc | ||
|
|
0276dac38f | ||
|
|
5a3dd83167 | ||
|
|
9f587009cd | ||
|
|
c5ed5e866e | ||
|
|
1f10bc1d63 | ||
|
|
311451b3c9 | ||
|
|
a48e5d9cb0 | ||
|
|
ad92010778 | ||
|
|
01e8988fcc | ||
|
|
d6fec0a0df | ||
|
|
37dc7ee595 | ||
|
|
6d79dc61d2 | ||
|
|
966bc67001 | ||
|
|
4c66a0dcd0 | ||
|
|
50051ee147 | ||
|
|
621f12a1bc | ||
|
|
741b22041d | ||
|
|
f358bb9364 | ||
|
|
65bbc0f00f | ||
|
|
7bf0e554ea | ||
|
|
82b1d8dab8 | ||
|
|
5dda364b2c | ||
|
|
c4e95684b5 | ||
|
|
a0d644ac42 | ||
|
|
37198159c9 | ||
|
|
7170adf3a2 | ||
|
|
cc50578faf | ||
|
|
e80d8b4365 | ||
|
|
30050a23b9 | ||
|
|
706a3c8f2b | ||
|
|
384601898a | ||
|
|
94eb5e638f | ||
|
|
5629c54d55 | ||
|
|
1303396d0e | ||
|
|
bcd5bcf8d7 | ||
|
|
787a4422cb | ||
|
|
5d52633c78 | ||
|
|
1d45444104 | ||
|
|
dd84f2ca64 | ||
|
|
b1c4a91de0 | ||
|
|
187ef3548e | ||
|
|
4abf24a2f6 | ||
|
|
2435ce34be | ||
|
|
e7841824ef | ||
|
|
10596073ac | ||
|
|
405994ee7a | ||
|
|
534d4fa495 | ||
|
|
2aa413d44f | ||
|
|
e6ebb0390e | ||
|
|
5fb9ffca6f | ||
|
|
bd62bab91f | ||
|
|
54edd3f101 | ||
|
|
a889a762b8 | ||
|
|
2163f65be7 | ||
|
|
78471b4bc3 | ||
|
|
af99238a96 | ||
|
|
4e5937036d | ||
|
|
6edc7bbd1d | ||
|
|
db437da726 | ||
|
|
95a9bacd01 | ||
|
|
e95e776733 | ||
|
|
760c7a3076 | ||
|
|
7dd1aec767 | ||
|
|
976b1a5fee | ||
|
|
b79a5e46e2 | ||
|
|
02ddfc5aac | ||
|
|
57f3107dba | ||
|
|
acde3d8952 | ||
|
|
be4983fcbb | ||
|
|
39c8bded65 | ||
|
|
e8f678adde | ||
|
|
e1666c85b7 | ||
|
|
6469cd6e24 | ||
|
|
b6032fd186 | ||
|
|
7a546349e4 | ||
|
|
375c7494b6 | ||
|
|
ac0cc91046 | ||
|
|
918254b600 | ||
|
|
814c3bed09 | ||
|
|
d94ceb25b0 | ||
|
|
619d469fa5 | ||
|
|
02c2308938 | ||
|
|
cf66e6d4ce | ||
|
|
8df40d2d94 | ||
|
|
9942d9a1dc | ||
|
|
835431ad9a | ||
|
|
b5c2b8fdec | ||
|
|
bbcc242280 | ||
|
|
e4ff850ca8 | ||
|
|
9117753a70 | ||
|
|
8095a17f0c | ||
|
|
0d1af8e26e | ||
|
|
b5834002a5 | ||
|
|
f2ba9c5d20 | ||
|
|
2fac67d8a5 | ||
|
|
36e07269e8 | ||
|
|
a35a2a6c8f | ||
|
|
050f258c8e | ||
|
|
4bad6d005a | ||
|
|
22287c9362 | ||
|
|
ee4b27c051 | ||
|
|
93c4454b8d | ||
|
|
5fc2a6a4ad | ||
|
|
c7d2766f2e | ||
|
|
06d76ed362 | ||
|
|
4a1fc2a91f | ||
|
|
0578bf0890 | ||
|
|
e3984cd006 | ||
|
|
f2e197f4e7 | ||
|
|
3cf9a53f88 | ||
|
|
c8d42e64c5 | ||
|
|
82e91afed2 | ||
|
|
13e3fc5e7a | ||
|
|
a32a2c3782 | ||
|
|
73611a7d83 | ||
|
|
7a012e4487 | ||
|
|
8935e6e7c2 | ||
|
|
8af572d502 | ||
|
|
8a0e2d9475 | ||
|
|
6d39a86dbd | ||
|
|
25d16bc779 | ||
|
|
805343f525 | ||
|
|
054c3becc0 | ||
|
|
e317f0ce29 | ||
|
|
a98d92a6c7 | ||
|
|
919f8b1386 | ||
|
|
7cd510a501 | ||
|
|
1b9aeaaea0 | ||
|
|
9b176de649 | ||
|
|
bd63cc0562 | ||
|
|
5580131017 | ||
|
|
5ae4bff91c | ||
|
|
67f06b2f6e | ||
|
|
5be89533f2 | ||
|
|
e54cc241cd | ||
|
|
a17d1f2186 | ||
|
|
23952baaff | ||
|
|
3d286ab8c3 | ||
|
|
2bb64b99e6 | ||
|
|
e26fb33ca7 | ||
|
|
6ab3e9048b | ||
|
|
7a1170f96c | ||
|
|
436ee920bb | ||
|
|
cd09b49e77 | ||
|
|
8a4b4ec4fe | ||
|
|
2b7e6b44ec | ||
|
|
989330af83 | ||
|
|
6c8971748f | ||
|
|
906d70b495 | ||
|
|
a036413f6a | ||
|
|
bb52dccc7a | ||
|
|
d19479941d | ||
|
|
820adec14a | ||
|
|
64efb6b486 | ||
|
|
479063564d | ||
|
|
ba0e4bdc62 | ||
|
|
fc34fec30a | ||
|
|
d69ab7fc86 | ||
|
|
eee0ffd6db | ||
|
|
dcf9e8f2a7 | ||
|
|
8adb0d8fa9 | ||
|
|
3d4c18abf6 | ||
|
|
eba1d054ef | ||
|
|
58b6923bc7 | ||
|
|
ad5c815ade | ||
|
|
d0c0b5e7c4 | ||
|
|
758badb05a | ||
|
|
6bad5bf2d7 | ||
|
|
fbae3fca60 | ||
|
|
fd42c82c83 | ||
|
|
35f9bd57fd | ||
|
|
90f7e4851e | ||
|
|
4ec45a22c7 | ||
|
|
c2b746a3e3 | ||
|
|
2c5e76aa8b | ||
|
|
7ea21370b2 | ||
|
|
ae5e7845bb | ||
|
|
f96a83eecf | ||
|
|
9ce74d8eff | ||
|
|
59ff96a085 | ||
|
|
b82c8d87a3 | ||
|
|
513f95e221 | ||
|
|
34729f7703 | ||
|
|
433b9d6380 | ||
|
|
0cbc684cb8 | ||
|
|
56f5698fc6 | ||
|
|
6e4dc2a69a | ||
|
|
137e9aa820 | ||
|
|
13e8710de9 | ||
|
|
767337fb8e | ||
|
|
d4a0e7899b | ||
|
|
181f54afd3 | ||
|
|
7900a7e2c0 | ||
|
|
ffb9b94719 | ||
|
|
115d938e8e | ||
|
|
53b6959bd5 | ||
|
|
184baaf579 | ||
|
|
eeaa17fbee | ||
|
|
beb4d73f04 | ||
|
|
8c9472cf4e | ||
|
|
ebaa6769b0 | ||
|
|
74de066363 | ||
|
|
148ca3b7d8 | ||
|
|
05ca8951a6 | ||
|
|
95b94a2aa7 | ||
|
|
8661152a73 | ||
|
|
145775021d | ||
|
|
2fd9575cd3 | ||
|
|
749cdcc39e | ||
|
|
9fc4008bfc | ||
|
|
f80127772e | ||
|
|
37b02ba467 | ||
|
|
971da20198 | ||
|
|
f55711c14b | ||
|
|
2f6e4c4a4a | ||
|
|
a0fc840835 | ||
|
|
b65866cb2e | ||
|
|
dffa0bb2fe | ||
|
|
8e56452df8 | ||
|
|
839e24e597 | ||
|
|
44c68f8551 | ||
|
|
5b17bbaac2 | ||
|
|
a9ec37ea79 | ||
|
|
8ed4351a9a | ||
|
|
c7b88219d3 | ||
|
|
8189af0f41 | ||
|
|
083b7d99c8 | ||
|
|
682c2f5c75 | ||
|
|
e56b5e6966 | ||
|
|
5a8fb2af90 | ||
|
|
8d08d456b6 | ||
|
|
a6c2497b35 | ||
|
|
0fcd203b6c | ||
|
|
e91562c245 | ||
|
|
9a0a48a939 | ||
|
|
c28224d574 | ||
|
|
a2840d31bd | ||
|
|
847d1c534c | ||
|
|
dc51374601 | ||
|
|
9680bd61fe | ||
|
|
fdb27d836d | ||
|
|
4d0567823a | ||
|
|
d0cfe632c9 | ||
|
|
03809763a6 | ||
|
|
41ff92592c | ||
|
|
3c754032c9 | ||
|
|
92a1d41eac | ||
|
|
8a0f723b28 | ||
|
|
f5474f18d6 | ||
|
|
2c729946a2 | ||
|
|
e7933cdae1 | ||
|
|
a012cc7041 | ||
|
|
fc2bb5014c | ||
|
|
002fddbf6e | ||
|
|
5d1b6452b0 | ||
|
|
1ea31f6952 | ||
|
|
b19bbc9212 | ||
|
|
16ce3da31f | ||
|
|
91bf5ac9a2 | ||
|
|
9d51882192 | ||
|
|
ac99d61e17 | ||
|
|
b21c28e8fe | ||
|
|
361d3383fc | ||
|
|
54ff94ec38 | ||
|
|
07beb170be | ||
|
|
eafa536c56 | ||
|
|
abdb5abbc1 | ||
|
|
a1dbf426ec | ||
|
|
30ba131704 | ||
|
|
e3f0fb539e | ||
|
|
d6667c773b | ||
|
|
3bd180882c | ||
|
|
1bb7f40b0a | ||
|
|
93d1140a31 | ||
|
|
4235885d47 | ||
|
|
6dc8f5b42e | ||
|
|
bf8d2250ca | ||
|
|
1b2d045be1 | ||
|
|
04df9f5873 | ||
|
|
849b775e55 | ||
|
|
728e21b5ae | ||
|
|
d3a183fe1d | ||
|
|
9ab9d0948f | ||
|
|
7bb6f18175 | ||
|
|
ac0f93f2c2 | ||
|
|
8a75b1411a | ||
|
|
0d552d0ba6 | ||
|
|
6ee0064ce0 | ||
|
|
5c6cd1e897 | ||
|
|
5fcaae39df | ||
|
|
7899c0ef78 | ||
|
|
543af856de | ||
|
|
3e21106336 | ||
|
|
9295985082 | ||
|
|
3ccd58af50 | ||
|
|
3f56c93b8c | ||
|
|
1311276a27 | ||
|
|
327788b1d6 | ||
|
|
1c6015ca73 | ||
|
|
4eaedbb981 | ||
|
|
2c52b77187 | ||
|
|
70527bf931 | ||
|
|
2911de8d7b | ||
|
|
62037ce577 | ||
|
|
e5bff7646a | ||
|
|
ce4b1f7f8d | ||
|
|
09bf3e7d29 | ||
|
|
18d61c2408 | ||
|
|
efac5c8f06 | ||
|
|
dd9f71203d | ||
|
|
3b51509f18 | ||
|
|
324033bdf8 | ||
|
|
d5c32dc2e7 | ||
|
|
b8c8276645 | ||
|
|
c6bf9193e2 | ||
|
|
17911ecf64 | ||
|
|
13bb45934c | ||
|
|
54ba852e71 | ||
|
|
bc85ef6e65 | ||
|
|
856b0f81d5 | ||
|
|
060fe11663 | ||
|
|
9dab54c1ed | ||
|
|
0f7a422153 | ||
|
|
058bf94c93 | ||
|
|
1a0600772f | ||
|
|
d54c18f8c3 | ||
|
|
5fc0bc5136 | ||
|
|
6f0a2d1104 | ||
|
|
9be3e0050d | ||
|
|
11596e45d1 | ||
|
|
ca3913a3c8 | ||
|
|
a6c900ef83 | ||
|
|
209f9e26a0 | ||
|
|
f9eb25b861 | ||
|
|
a3a5e81fdb | ||
|
|
0d73d9dfd3 | ||
|
|
7cdea43a37 | ||
|
|
638d16ce6e | ||
|
|
9a860dbab5 | ||
|
|
5c2a48bba8 | ||
|
|
05338bdba3 | ||
|
|
b32eeada1b | ||
|
|
acc1fefa77 | ||
|
|
a850ffa537 | ||
|
|
2bcb53fe03 | ||
|
|
94fc73ed95 | ||
|
|
df9f998671 | ||
|
|
be3ad43a07 | ||
|
|
5aa155c39f | ||
|
|
c21a21c2aa | ||
|
|
91bcdc10eb | ||
|
|
f18c8e2239 | ||
|
|
2db7608401 | ||
|
|
506632206c | ||
|
|
234a1b6571 | ||
|
|
c9d45d864f | ||
|
|
c0177516f2 | ||
|
|
accf2b5831 | ||
|
|
2f14f83a9a | ||
|
|
262968d0c9 | ||
|
|
244ac735af | ||
|
|
b919bcfc8c | ||
|
|
c21e44cf6b | ||
|
|
593ff0be75 | ||
|
|
6fd042df96 | ||
|
|
c3e1cf7230 | ||
|
|
5b3d86ab14 | ||
|
|
5d4bbbd806 | ||
|
|
cfc6d9e439 | ||
|
|
d10954f47a | ||
|
|
c3e1198448 | ||
|
|
fe9f042111 | ||
|
|
32e86ba72d | ||
|
|
28cd39d152 | ||
|
|
25f3e25555 | ||
|
|
699fbb4e55 | ||
|
|
5fa93de8c4 | ||
|
|
74e976aae4 | ||
|
|
dd829e9d6a | ||
|
|
56bca03fbe | ||
|
|
d0572730a8 | ||
|
|
eb816936ed | ||
|
|
e1b9cac1df | ||
|
|
d927b631c5 | ||
|
|
17dc5d98d1 | ||
|
|
cda086093d | ||
|
|
bda579577c | ||
|
|
a16b555d47 | ||
|
|
6667c39c73 | ||
|
|
5219ac12a6 | ||
|
|
445f813fb9 | ||
|
|
87f9e59cfb | ||
|
|
8b03b39aa8 | ||
|
|
e59b6bb971 | ||
|
|
24a7ed467c | ||
|
|
f01f1033ac | ||
|
|
d35f515413 |
@@ -11,7 +11,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByOriginResult,
|
||||
CancelByDestinationResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
PruneResult,
|
||||
@@ -107,16 +107,18 @@ async def cancel_by_batch_ids(
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_origin",
|
||||
operation_id="cancel_by_origin",
|
||||
"/{queue_id}/cancel_by_destination",
|
||||
operation_id="cancel_by_destination",
|
||||
responses={200: {"model": CancelByBatchIDsResult}},
|
||||
)
|
||||
async def cancel_by_origin(
|
||||
async def cancel_by_destination(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
origin: str = Query(description="The origin to cancel all queue items for"),
|
||||
) -> CancelByOriginResult:
|
||||
destination: str = Query(description="The destination to cancel all queue items for"),
|
||||
) -> CancelByDestinationResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_origin(queue_id=queue_id, origin=origin)
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
|
||||
@@ -19,7 +19,8 @@ from invokeai.app.invocations.model import CLIPField
|
||||
from invokeai.app.invocations.primitives import ConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.ti_utils import generate_ti_list
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoraPatcher
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
BasicConditioningInfo,
|
||||
@@ -82,9 +83,10 @@ class CompelInvocation(BaseInvocation):
|
||||
# apply all patches while the model is on the target device
|
||||
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
|
||||
tokenizer_info as tokenizer,
|
||||
ModelPatcher.apply_lora_text_encoder(
|
||||
text_encoder,
|
||||
loras=_lora_loader(),
|
||||
LoraPatcher.apply_lora_patches(
|
||||
model=text_encoder,
|
||||
patches=_lora_loader(),
|
||||
prefix="lora_te_",
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
|
||||
@@ -177,9 +179,9 @@ class SDXLPromptInvocationBase:
|
||||
# apply all patches while the model is on the target device
|
||||
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
|
||||
tokenizer_info as tokenizer,
|
||||
ModelPatcher.apply_lora(
|
||||
LoraPatcher.apply_lora_patches(
|
||||
text_encoder,
|
||||
loras=_lora_loader(),
|
||||
patches=_lora_loader(),
|
||||
prefix=lora_prefix,
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
|
||||
@@ -36,7 +36,8 @@ from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoraPatcher
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.stable_diffusion import PipelineIntermediateState
|
||||
@@ -979,9 +980,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
|
||||
SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME
|
||||
# Apply the LoRA after unet has been moved to its target device for faster patching.
|
||||
ModelPatcher.apply_lora_unet(
|
||||
unet,
|
||||
loras=_lora_loader(),
|
||||
LoraPatcher.apply_lora_patches(
|
||||
model=unet,
|
||||
patches=_lora_loader(),
|
||||
prefix="lora_unet_",
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Callable, Optional
|
||||
from typing import Callable, Iterator, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
@@ -29,6 +29,8 @@ from invokeai.backend.flux.sampling_utils import (
|
||||
pack,
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoraPatcher
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
@@ -187,7 +189,16 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
with transformer_info as transformer:
|
||||
with (
|
||||
transformer_info.model_on_device() as (cached_weights, transformer),
|
||||
# Apply the LoRA after transformer has been moved to its target device for faster patching.
|
||||
LoraPatcher.apply_lora_patches(
|
||||
model=transformer,
|
||||
patches=self._lora_iterator(context),
|
||||
prefix="",
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
):
|
||||
assert isinstance(transformer, Flux)
|
||||
|
||||
x = denoise(
|
||||
@@ -241,6 +252,13 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# `latents`.
|
||||
return mask.expand_as(latents)
|
||||
|
||||
def _lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in self.transformer.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, LoRAModelRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
state.latents = unpack(state.latents.float(), self.height, self.width).squeeze()
|
||||
|
||||
53
invokeai/app/invocations/flux_lora_loader.py
Normal file
53
invokeai/app/invocations/flux_lora_loader.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import LoRAField, ModelIdentifierField, TransformerField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation_output("flux_lora_loader_output")
|
||||
class FluxLoRALoaderOutput(BaseInvocationOutput):
|
||||
"""FLUX LoRA Loader Output"""
|
||||
|
||||
transformer: TransformerField = OutputField(
|
||||
default=None, description=FieldDescriptions.transformer, title="FLUX Transformer"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_lora_loader",
|
||||
title="FLUX LoRA",
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
)
|
||||
class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply a LoRA model to a FLUX transformer."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.transformer,
|
||||
input=Input.Connection,
|
||||
title="FLUX Transformer",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
|
||||
lora_key = self.lora.key
|
||||
|
||||
if not context.models.exists(lora_key):
|
||||
raise ValueError(f"Unknown lora: {lora_key}!")
|
||||
|
||||
if any(lora.lora.key == lora_key for lora in self.transformer.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to transformer.')
|
||||
|
||||
transformer = self.transformer.model_copy(deep=True)
|
||||
transformer.loras.append(
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
|
||||
return FluxLoRALoaderOutput(transformer=transformer)
|
||||
@@ -69,6 +69,7 @@ class CLIPField(BaseModel):
|
||||
|
||||
class TransformerField(BaseModel):
|
||||
transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel")
|
||||
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
|
||||
|
||||
|
||||
class T5EncoderField(BaseModel):
|
||||
@@ -202,7 +203,7 @@ class FluxModelLoaderInvocation(BaseInvocation):
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer),
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
|
||||
@@ -22,8 +22,8 @@ from invokeai.app.invocations.fields import (
|
||||
from invokeai.app.invocations.model import UNetField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoraPatcher
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import ControlNetData, PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.multi_diffusion_pipeline import (
|
||||
MultiDiffusionPipeline,
|
||||
@@ -204,7 +204,11 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
# Load the UNet model.
|
||||
unet_info = context.models.load(self.unet.unet)
|
||||
|
||||
with ExitStack() as exit_stack, unet_info as unet, ModelPatcher.apply_lora_unet(unet, _lora_loader()):
|
||||
with (
|
||||
ExitStack() as exit_stack,
|
||||
unet_info as unet,
|
||||
LoraPatcher.apply_lora_patches(model=unet, patches=_lora_loader(), prefix="lora_unet_"),
|
||||
):
|
||||
assert isinstance(unet, UNet2DConditionModel)
|
||||
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
||||
if noise is not None:
|
||||
|
||||
@@ -6,7 +6,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByOriginResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
@@ -97,8 +97,8 @@ class SessionQueueBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_by_origin(self, queue_id: str, origin: str) -> CancelByOriginResult:
|
||||
"""Cancels all queue items with the given batch origin"""
|
||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||
"""Cancels all queue items with the given batch destination"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -346,10 +346,10 @@ class CancelByBatchIDsResult(BaseModel):
|
||||
canceled: int = Field(..., description="Number of queue items canceled")
|
||||
|
||||
|
||||
class CancelByOriginResult(BaseModel):
|
||||
"""Result of canceling by list of batch ids"""
|
||||
class CancelByDestinationResult(CancelByBatchIDsResult):
|
||||
"""Result of canceling by a destination"""
|
||||
|
||||
canceled: int = Field(..., description="Number of queue items canceled")
|
||||
pass
|
||||
|
||||
|
||||
class CancelByQueueIDResult(CancelByBatchIDsResult):
|
||||
|
||||
@@ -10,7 +10,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByOriginResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
@@ -426,19 +426,19 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.release()
|
||||
return CancelByBatchIDsResult(canceled=count)
|
||||
|
||||
def cancel_by_origin(self, queue_id: str, origin: str) -> CancelByOriginResult:
|
||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||
try:
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
self.__lock.acquire()
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
AND origin == ?
|
||||
AND destination == ?
|
||||
AND status != 'canceled'
|
||||
AND status != 'completed'
|
||||
AND status != 'failed'
|
||||
"""
|
||||
params = (queue_id, origin)
|
||||
params = (queue_id, destination)
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
@@ -457,14 +457,14 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
params,
|
||||
)
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.origin == origin:
|
||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelByOriginResult(canceled=count)
|
||||
return CancelByDestinationResult(canceled=count)
|
||||
|
||||
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
||||
try:
|
||||
|
||||
@@ -1,672 +0,0 @@
|
||||
# Copyright (c) 2024 The InvokeAI Development team
|
||||
"""LoRA model support."""
|
||||
|
||||
import bisect
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
from typing_extensions import Self
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.model_manager import BaseModelType
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
|
||||
class LoRALayerBase:
|
||||
# rank: Optional[int]
|
||||
# alpha: Optional[float]
|
||||
# bias: Optional[torch.Tensor]
|
||||
# layer_key: str
|
||||
|
||||
# @property
|
||||
# def scale(self):
|
||||
# return self.alpha / self.rank if (self.alpha and self.rank) else 1.0
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
if "alpha" in values:
|
||||
self.alpha = values["alpha"].item()
|
||||
else:
|
||||
self.alpha = None
|
||||
|
||||
if "bias_indices" in values and "bias_values" in values and "bias_size" in values:
|
||||
self.bias: Optional[torch.Tensor] = torch.sparse_coo_tensor(
|
||||
values["bias_indices"],
|
||||
values["bias_values"],
|
||||
tuple(values["bias_size"]),
|
||||
)
|
||||
|
||||
else:
|
||||
self.bias = None
|
||||
|
||||
self.rank = None # set in layer implementation
|
||||
self.layer_key = layer_key
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_bias(self, orig_bias: torch.Tensor) -> Optional[torch.Tensor]:
|
||||
return self.bias
|
||||
|
||||
def get_parameters(self, orig_module: torch.nn.Module) -> Dict[str, torch.Tensor]:
|
||||
params = {"weight": self.get_weight(orig_module.weight)}
|
||||
bias = self.get_bias(orig_module.bias)
|
||||
if bias is not None:
|
||||
params["bias"] = bias
|
||||
return params
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = 0
|
||||
for val in [self.bias]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
if self.bias is not None:
|
||||
self.bias = self.bias.to(device=device, dtype=dtype)
|
||||
|
||||
def check_keys(self, values: Dict[str, torch.Tensor], known_keys: Set[str]):
|
||||
"""Log a warning if values contains unhandled keys."""
|
||||
# {"alpha", "bias_indices", "bias_values", "bias_size"} are hard-coded, because they are handled by
|
||||
# `LoRALayerBase`. Sub-classes should provide the known_keys that they handled.
|
||||
all_known_keys = known_keys | {"alpha", "bias_indices", "bias_values", "bias_size"}
|
||||
unknown_keys = set(values.keys()) - all_known_keys
|
||||
if unknown_keys:
|
||||
logger.warning(
|
||||
f"Unexpected keys found in LoRA/LyCORIS layer, model might work incorrectly! Keys: {unknown_keys}"
|
||||
)
|
||||
|
||||
|
||||
# TODO: find and debug lora/locon with bias
|
||||
class LoRALayer(LoRALayerBase):
|
||||
# up: torch.Tensor
|
||||
# mid: Optional[torch.Tensor]
|
||||
# down: torch.Tensor
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.up = values["lora_up.weight"]
|
||||
self.down = values["lora_down.weight"]
|
||||
self.mid = values.get("lora_mid.weight", None)
|
||||
|
||||
self.rank = self.down.shape[0]
|
||||
self.check_keys(
|
||||
values,
|
||||
{
|
||||
"lora_up.weight",
|
||||
"lora_down.weight",
|
||||
"lora_mid.weight",
|
||||
},
|
||||
)
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
if self.mid is not None:
|
||||
up = self.up.reshape(self.up.shape[0], self.up.shape[1])
|
||||
down = self.down.reshape(self.down.shape[0], self.down.shape[1])
|
||||
weight = torch.einsum("m n w h, i m, n j -> i j w h", self.mid, up, down)
|
||||
else:
|
||||
weight = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
|
||||
|
||||
return weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.up, self.mid, self.down]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.up = self.up.to(device=device, dtype=dtype)
|
||||
self.down = self.down.to(device=device, dtype=dtype)
|
||||
|
||||
if self.mid is not None:
|
||||
self.mid = self.mid.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
class LoHALayer(LoRALayerBase):
|
||||
# w1_a: torch.Tensor
|
||||
# w1_b: torch.Tensor
|
||||
# w2_a: torch.Tensor
|
||||
# w2_b: torch.Tensor
|
||||
# t1: Optional[torch.Tensor] = None
|
||||
# t2: Optional[torch.Tensor] = None
|
||||
|
||||
def __init__(self, layer_key: str, values: Dict[str, torch.Tensor]):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.w1_a = values["hada_w1_a"]
|
||||
self.w1_b = values["hada_w1_b"]
|
||||
self.w2_a = values["hada_w2_a"]
|
||||
self.w2_b = values["hada_w2_b"]
|
||||
self.t1 = values.get("hada_t1", None)
|
||||
self.t2 = values.get("hada_t2", None)
|
||||
|
||||
self.rank = self.w1_b.shape[0]
|
||||
self.check_keys(
|
||||
values,
|
||||
{
|
||||
"hada_w1_a",
|
||||
"hada_w1_b",
|
||||
"hada_w2_a",
|
||||
"hada_w2_b",
|
||||
"hada_t1",
|
||||
"hada_t2",
|
||||
},
|
||||
)
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
if self.t1 is None:
|
||||
weight: torch.Tensor = (self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b)
|
||||
|
||||
else:
|
||||
rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", self.t1, self.w1_b, self.w1_a)
|
||||
rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", self.t2, self.w2_b, self.w2_a)
|
||||
weight = rebuild1 * rebuild2
|
||||
|
||||
return weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.w1_a = self.w1_a.to(device=device, dtype=dtype)
|
||||
self.w1_b = self.w1_b.to(device=device, dtype=dtype)
|
||||
if self.t1 is not None:
|
||||
self.t1 = self.t1.to(device=device, dtype=dtype)
|
||||
|
||||
self.w2_a = self.w2_a.to(device=device, dtype=dtype)
|
||||
self.w2_b = self.w2_b.to(device=device, dtype=dtype)
|
||||
if self.t2 is not None:
|
||||
self.t2 = self.t2.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
class LoKRLayer(LoRALayerBase):
|
||||
# w1: Optional[torch.Tensor] = None
|
||||
# w1_a: Optional[torch.Tensor] = None
|
||||
# w1_b: Optional[torch.Tensor] = None
|
||||
# w2: Optional[torch.Tensor] = None
|
||||
# w2_a: Optional[torch.Tensor] = None
|
||||
# w2_b: Optional[torch.Tensor] = None
|
||||
# t2: Optional[torch.Tensor] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.w1 = values.get("lokr_w1", None)
|
||||
if self.w1 is None:
|
||||
self.w1_a = values["lokr_w1_a"]
|
||||
self.w1_b = values["lokr_w1_b"]
|
||||
else:
|
||||
self.w1_b = None
|
||||
self.w1_a = None
|
||||
|
||||
self.w2 = values.get("lokr_w2", None)
|
||||
if self.w2 is None:
|
||||
self.w2_a = values["lokr_w2_a"]
|
||||
self.w2_b = values["lokr_w2_b"]
|
||||
else:
|
||||
self.w2_a = None
|
||||
self.w2_b = None
|
||||
|
||||
self.t2 = values.get("lokr_t2", None)
|
||||
|
||||
if self.w1_b is not None:
|
||||
self.rank = self.w1_b.shape[0]
|
||||
elif self.w2_b is not None:
|
||||
self.rank = self.w2_b.shape[0]
|
||||
else:
|
||||
self.rank = None # unscaled
|
||||
|
||||
self.check_keys(
|
||||
values,
|
||||
{
|
||||
"lokr_w1",
|
||||
"lokr_w1_a",
|
||||
"lokr_w1_b",
|
||||
"lokr_w2",
|
||||
"lokr_w2_a",
|
||||
"lokr_w2_b",
|
||||
"lokr_t2",
|
||||
},
|
||||
)
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
w1: Optional[torch.Tensor] = self.w1
|
||||
if w1 is None:
|
||||
assert self.w1_a is not None
|
||||
assert self.w1_b is not None
|
||||
w1 = self.w1_a @ self.w1_b
|
||||
|
||||
w2 = self.w2
|
||||
if w2 is None:
|
||||
if self.t2 is None:
|
||||
assert self.w2_a is not None
|
||||
assert self.w2_b is not None
|
||||
w2 = self.w2_a @ self.w2_b
|
||||
else:
|
||||
w2 = torch.einsum("i j k l, i p, j r -> p r k l", self.t2, self.w2_a, self.w2_b)
|
||||
|
||||
if len(w2.shape) == 4:
|
||||
w1 = w1.unsqueeze(2).unsqueeze(2)
|
||||
w2 = w2.contiguous()
|
||||
assert w1 is not None
|
||||
assert w2 is not None
|
||||
weight = torch.kron(w1, w2)
|
||||
|
||||
return weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
if self.w1 is not None:
|
||||
self.w1 = self.w1.to(device=device, dtype=dtype)
|
||||
else:
|
||||
assert self.w1_a is not None
|
||||
assert self.w1_b is not None
|
||||
self.w1_a = self.w1_a.to(device=device, dtype=dtype)
|
||||
self.w1_b = self.w1_b.to(device=device, dtype=dtype)
|
||||
|
||||
if self.w2 is not None:
|
||||
self.w2 = self.w2.to(device=device, dtype=dtype)
|
||||
else:
|
||||
assert self.w2_a is not None
|
||||
assert self.w2_b is not None
|
||||
self.w2_a = self.w2_a.to(device=device, dtype=dtype)
|
||||
self.w2_b = self.w2_b.to(device=device, dtype=dtype)
|
||||
|
||||
if self.t2 is not None:
|
||||
self.t2 = self.t2.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
class FullLayer(LoRALayerBase):
|
||||
# bias handled in LoRALayerBase(calc_size, to)
|
||||
# weight: torch.Tensor
|
||||
# bias: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.weight = values["diff"]
|
||||
self.bias = values.get("diff_b", None)
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"diff", "diff_b"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
return self.weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
class IA3Layer(LoRALayerBase):
|
||||
# weight: torch.Tensor
|
||||
# on_input: torch.Tensor
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.weight = values["weight"]
|
||||
self.on_input = values["on_input"]
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"weight", "on_input"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
weight = self.weight
|
||||
if not self.on_input:
|
||||
weight = weight.reshape(-1, 1)
|
||||
assert orig_weight is not None
|
||||
return orig_weight * weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
model_size += self.on_input.nelement() * self.on_input.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
self.on_input = self.on_input.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
class NormLayer(LoRALayerBase):
|
||||
# bias handled in LoRALayerBase(calc_size, to)
|
||||
# weight: torch.Tensor
|
||||
# bias: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
layer_key: str,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(layer_key, values)
|
||||
|
||||
self.weight = values["w_norm"]
|
||||
self.bias = values.get("b_norm", None)
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"w_norm", "b_norm"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
return self.weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer, NormLayer]
|
||||
|
||||
|
||||
class LoRAModelRaw(RawModel): # (torch.nn.Module):
|
||||
_name: str
|
||||
layers: Dict[str, AnyLoRALayer]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
layers: Dict[str, AnyLoRALayer],
|
||||
):
|
||||
self._name = name
|
||||
self.layers = layers
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self._name
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
# TODO: try revert if exception?
|
||||
for _key, layer in self.layers.items():
|
||||
layer.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = 0
|
||||
for _, layer in self.layers.items():
|
||||
model_size += layer.calc_size()
|
||||
return model_size
|
||||
|
||||
@classmethod
|
||||
def _convert_sdxl_keys_to_diffusers_format(cls, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
||||
"""Convert the keys of an SDXL LoRA state_dict to diffusers format.
|
||||
|
||||
The input state_dict can be in either Stability AI format or diffusers format. If the state_dict is already in
|
||||
diffusers format, then this function will have no effect.
|
||||
|
||||
This function is adapted from:
|
||||
https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L385-L409
|
||||
|
||||
Args:
|
||||
state_dict (Dict[str, Tensor]): The SDXL LoRA state_dict.
|
||||
|
||||
Raises:
|
||||
ValueError: If state_dict contains an unrecognized key, or not all keys could be converted.
|
||||
|
||||
Returns:
|
||||
Dict[str, Tensor]: The diffusers-format state_dict.
|
||||
"""
|
||||
converted_count = 0 # The number of Stability AI keys converted to diffusers format.
|
||||
not_converted_count = 0 # The number of keys that were not converted.
|
||||
|
||||
# Get a sorted list of Stability AI UNet keys so that we can efficiently search for keys with matching prefixes.
|
||||
# For example, we want to efficiently find `input_blocks_4_1` in the list when searching for
|
||||
# `input_blocks_4_1_proj_in`.
|
||||
stability_unet_keys = list(SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP)
|
||||
stability_unet_keys.sort()
|
||||
|
||||
new_state_dict = {}
|
||||
for full_key, value in state_dict.items():
|
||||
if full_key.startswith("lora_unet_"):
|
||||
search_key = full_key.replace("lora_unet_", "")
|
||||
# Use bisect to find the key in stability_unet_keys that *may* match the search_key's prefix.
|
||||
position = bisect.bisect_right(stability_unet_keys, search_key)
|
||||
map_key = stability_unet_keys[position - 1]
|
||||
# Now, check if the map_key *actually* matches the search_key.
|
||||
if search_key.startswith(map_key):
|
||||
new_key = full_key.replace(map_key, SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP[map_key])
|
||||
new_state_dict[new_key] = value
|
||||
converted_count += 1
|
||||
else:
|
||||
new_state_dict[full_key] = value
|
||||
not_converted_count += 1
|
||||
elif full_key.startswith("lora_te1_") or full_key.startswith("lora_te2_"):
|
||||
# The CLIP text encoders have the same keys in both Stability AI and diffusers formats.
|
||||
new_state_dict[full_key] = value
|
||||
continue
|
||||
else:
|
||||
raise ValueError(f"Unrecognized SDXL LoRA key prefix: '{full_key}'.")
|
||||
|
||||
if converted_count > 0 and not_converted_count > 0:
|
||||
raise ValueError(
|
||||
f"The SDXL LoRA could only be partially converted to diffusers format. converted={converted_count},"
|
||||
f" not_converted={not_converted_count}"
|
||||
)
|
||||
|
||||
return new_state_dict
|
||||
|
||||
@classmethod
|
||||
def from_checkpoint(
|
||||
cls,
|
||||
file_path: Union[str, Path],
|
||||
device: Optional[torch.device] = None,
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
base_model: Optional[BaseModelType] = None,
|
||||
) -> Self:
|
||||
device = device or torch.device("cpu")
|
||||
dtype = dtype or torch.float32
|
||||
|
||||
if isinstance(file_path, str):
|
||||
file_path = Path(file_path)
|
||||
|
||||
model = cls(
|
||||
name=file_path.stem,
|
||||
layers={},
|
||||
)
|
||||
|
||||
if file_path.suffix == ".safetensors":
|
||||
sd = load_file(file_path.absolute().as_posix(), device="cpu")
|
||||
else:
|
||||
sd = torch.load(file_path, map_location="cpu")
|
||||
|
||||
state_dict = cls._group_state(sd)
|
||||
|
||||
if base_model == BaseModelType.StableDiffusionXL:
|
||||
state_dict = cls._convert_sdxl_keys_to_diffusers_format(state_dict)
|
||||
|
||||
for layer_key, values in state_dict.items():
|
||||
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
|
||||
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules
|
||||
|
||||
# lora and locon
|
||||
if "lora_up.weight" in values:
|
||||
layer: AnyLoRALayer = LoRALayer(layer_key, values)
|
||||
|
||||
# loha
|
||||
elif "hada_w1_a" in values:
|
||||
layer = LoHALayer(layer_key, values)
|
||||
|
||||
# lokr
|
||||
elif "lokr_w1" in values or "lokr_w1_a" in values:
|
||||
layer = LoKRLayer(layer_key, values)
|
||||
|
||||
# diff
|
||||
elif "diff" in values:
|
||||
layer = FullLayer(layer_key, values)
|
||||
|
||||
# ia3
|
||||
elif "on_input" in values:
|
||||
layer = IA3Layer(layer_key, values)
|
||||
|
||||
# norms
|
||||
elif "w_norm" in values:
|
||||
layer = NormLayer(layer_key, values)
|
||||
|
||||
else:
|
||||
print(f">> Encountered unknown lora layer module in {model.name}: {layer_key} - {list(values.keys())}")
|
||||
raise Exception("Unknown lora format!")
|
||||
|
||||
# lower memory consumption by removing already parsed layer values
|
||||
state_dict[layer_key].clear()
|
||||
|
||||
layer.to(device=device, dtype=dtype)
|
||||
model.layers[layer_key] = layer
|
||||
|
||||
return model
|
||||
|
||||
@staticmethod
|
||||
def _group_state(state_dict: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, torch.Tensor]]:
|
||||
state_dict_groupped: Dict[str, Dict[str, torch.Tensor]] = {}
|
||||
|
||||
for key, value in state_dict.items():
|
||||
stem, leaf = key.split(".", 1)
|
||||
if stem not in state_dict_groupped:
|
||||
state_dict_groupped[stem] = {}
|
||||
state_dict_groupped[stem][leaf] = value
|
||||
|
||||
return state_dict_groupped
|
||||
|
||||
|
||||
# code from
|
||||
# https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L15C1-L97C32
|
||||
def make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
|
||||
"""Create a dict mapping state_dict keys from Stability AI SDXL format to diffusers SDXL format."""
|
||||
unet_conversion_map_layer = []
|
||||
|
||||
for i in range(3): # num_blocks is 3 in sdxl
|
||||
# loop over downblocks/upblocks
|
||||
for j in range(2):
|
||||
# loop over resnets/attentions for downblocks
|
||||
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
||||
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
|
||||
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no attention layers in down_blocks.3
|
||||
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
||||
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
|
||||
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
|
||||
|
||||
for j in range(3):
|
||||
# loop over resnets/attentions for upblocks
|
||||
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
|
||||
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
|
||||
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
|
||||
|
||||
# if i > 0: commentout for sdxl
|
||||
# no attention layers in up_blocks.0
|
||||
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
|
||||
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
|
||||
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no downsample in down_blocks.3
|
||||
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
||||
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
|
||||
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
|
||||
|
||||
# no upsample in up_blocks.3
|
||||
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
||||
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
|
||||
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
|
||||
|
||||
hf_mid_atn_prefix = "mid_block.attentions.0."
|
||||
sd_mid_atn_prefix = "middle_block.1."
|
||||
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
|
||||
|
||||
for j in range(2):
|
||||
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
||||
sd_mid_res_prefix = f"middle_block.{2*j}."
|
||||
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
||||
|
||||
unet_conversion_map_resnet = [
|
||||
# (stable-diffusion, HF Diffusers)
|
||||
("in_layers.0.", "norm1."),
|
||||
("in_layers.2.", "conv1."),
|
||||
("out_layers.0.", "norm2."),
|
||||
("out_layers.3.", "conv2."),
|
||||
("emb_layers.1.", "time_emb_proj."),
|
||||
("skip_connection.", "conv_shortcut."),
|
||||
]
|
||||
|
||||
unet_conversion_map = []
|
||||
for sd, hf in unet_conversion_map_layer:
|
||||
if "resnets" in hf:
|
||||
for sd_res, hf_res in unet_conversion_map_resnet:
|
||||
unet_conversion_map.append((sd + sd_res, hf + hf_res))
|
||||
else:
|
||||
unet_conversion_map.append((sd, hf))
|
||||
|
||||
for j in range(2):
|
||||
hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
|
||||
sd_time_embed_prefix = f"time_embed.{j*2}."
|
||||
unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
|
||||
|
||||
for j in range(2):
|
||||
hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
|
||||
sd_label_embed_prefix = f"label_emb.0.{j*2}."
|
||||
unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
|
||||
|
||||
unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
|
||||
unet_conversion_map.append(("out.0.", "conv_norm_out."))
|
||||
unet_conversion_map.append(("out.2.", "conv_out."))
|
||||
|
||||
return unet_conversion_map
|
||||
|
||||
|
||||
SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP = {
|
||||
sd.rstrip(".").replace(".", "_"): hf.rstrip(".").replace(".", "_") for sd, hf in make_sdxl_unet_conversion_map()
|
||||
}
|
||||
0
invokeai/backend/lora/__init__.py
Normal file
0
invokeai/backend/lora/__init__.py
Normal file
0
invokeai/backend/lora/conversions/__init__.py
Normal file
0
invokeai/backend/lora/conversions/__init__.py
Normal file
@@ -0,0 +1,210 @@
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
|
||||
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
|
||||
from invokeai.backend.lora.layers.lora_layer import LoRALayer
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Tensor]) -> bool:
|
||||
"""Checks if the provided state dict is likely in the Diffusers FLUX LoRA format.
|
||||
|
||||
This is intended to be a reasonably high-precision detector, but it is not guaranteed to have perfect precision. (A
|
||||
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
|
||||
"""
|
||||
# First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format).
|
||||
all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys())
|
||||
|
||||
# Next, check that this is likely a FLUX model by spot-checking a few keys.
|
||||
expected_keys = [
|
||||
"transformer.single_transformer_blocks.0.attn.to_q.lora_A.weight",
|
||||
"transformer.single_transformer_blocks.0.attn.to_q.lora_B.weight",
|
||||
"transformer.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
|
||||
"transformer.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
|
||||
]
|
||||
all_expected_keys_present = all(k in state_dict for k in expected_keys)
|
||||
|
||||
return all_keys_in_peft_format and all_expected_keys_present
|
||||
|
||||
|
||||
# TODO(ryand): What alpha should we use? 1.0? Rank of the LoRA?
|
||||
def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor], alpha: float = 1.0) -> LoRAModelRaw: # pyright: ignore[reportRedeclaration] (state_dict is intentionally re-declared)
|
||||
"""Loads a state dict in the Diffusers FLUX LoRA format into a LoRAModelRaw object.
|
||||
|
||||
This function is based on:
|
||||
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
|
||||
"""
|
||||
# Group keys by layer.
|
||||
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_by_layer(state_dict)
|
||||
|
||||
# Remove the "transformer." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
|
||||
|
||||
# Constants for FLUX.1
|
||||
num_double_layers = 19
|
||||
num_single_layers = 38
|
||||
# inner_dim = 3072
|
||||
# mlp_ratio = 4.0
|
||||
|
||||
layers: dict[str, AnyLoRALayer] = {}
|
||||
|
||||
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
|
||||
if src_key in grouped_state_dict:
|
||||
src_layer_dict = grouped_state_dict.pop(src_key)
|
||||
layers[dst_key] = LoRALayer(
|
||||
values={
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
"alpha": torch.tensor(alpha),
|
||||
},
|
||||
)
|
||||
assert len(src_layer_dict) == 0
|
||||
|
||||
def add_qkv_lora_layer_if_present(src_keys: list[str], dst_qkv_key: str) -> None:
|
||||
"""Handle the Q, K, V matrices for a transformer block. We need special handling because the diffusers format
|
||||
stores them in separate matrices, whereas the BFL format used internally by InvokeAI concatenates them.
|
||||
"""
|
||||
# We expect that either all src keys are present or none of them are. Verify this.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
assert all(keys_present) or not any(keys_present)
|
||||
|
||||
# If none of the keys are present, return early.
|
||||
if not any(keys_present):
|
||||
return
|
||||
|
||||
src_layer_dicts = [grouped_state_dict.pop(key) for key in src_keys]
|
||||
sub_layers: list[LoRALayerBase] = []
|
||||
for src_layer_dict in src_layer_dicts:
|
||||
sub_layers.append(
|
||||
LoRALayer(
|
||||
values={
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
"alpha": torch.tensor(alpha),
|
||||
},
|
||||
)
|
||||
)
|
||||
assert len(src_layer_dict) == 0
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers, concat_axis=0)
|
||||
|
||||
# time_text_embed.timestep_embedder -> time_in.
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_2", "time_in.out_layer")
|
||||
|
||||
# time_text_embed.text_embedder -> vector_in.
|
||||
add_lora_layer_if_present("time_text_embed.text_embedder.linear_1", "vector_in.in_layer")
|
||||
add_lora_layer_if_present("time_text_embed.text_embedder.linear_2", "vector_in.out_layer")
|
||||
|
||||
# time_text_embed.guidance_embedder -> guidance_in.
|
||||
add_lora_layer_if_present("time_text_embed.guidance_embedder.linear_1", "guidance_in")
|
||||
add_lora_layer_if_present("time_text_embed.guidance_embedder.linear_2", "guidance_in")
|
||||
|
||||
# context_embedder -> txt_in.
|
||||
add_lora_layer_if_present("context_embedder", "txt_in")
|
||||
|
||||
# x_embedder -> img_in.
|
||||
add_lora_layer_if_present("x_embedder", "img_in")
|
||||
|
||||
# Double transformer blocks.
|
||||
for i in range(num_double_layers):
|
||||
# norms.
|
||||
add_lora_layer_if_present(f"transformer_blocks.{i}.norm1.linear", f"double_blocks.{i}.img_mod.lin")
|
||||
add_lora_layer_if_present(f"transformer_blocks.{i}.norm1_context.linear", f"double_blocks.{i}.txt_mod.lin")
|
||||
|
||||
# Q, K, V
|
||||
add_qkv_lora_layer_if_present(
|
||||
[
|
||||
f"transformer_blocks.{i}.attn.to_q",
|
||||
f"transformer_blocks.{i}.attn.to_k",
|
||||
f"transformer_blocks.{i}.attn.to_v",
|
||||
],
|
||||
f"double_blocks.{i}.img_attn.qkv",
|
||||
)
|
||||
add_qkv_lora_layer_if_present(
|
||||
[
|
||||
f"transformer_blocks.{i}.attn.add_q_proj",
|
||||
f"transformer_blocks.{i}.attn.add_k_proj",
|
||||
f"transformer_blocks.{i}.attn.add_v_proj",
|
||||
],
|
||||
f"double_blocks.{i}.txt_attn.qkv",
|
||||
)
|
||||
|
||||
# ff img_mlp
|
||||
add_lora_layer_if_present(
|
||||
f"transformer_blocks.{i}.ff.net.0.proj",
|
||||
f"double_blocks.{i}.img_mlp.0",
|
||||
)
|
||||
add_lora_layer_if_present(
|
||||
f"transformer_blocks.{i}.ff.net.2",
|
||||
f"double_blocks.{i}.img_mlp.2",
|
||||
)
|
||||
|
||||
# ff txt_mlp
|
||||
add_lora_layer_if_present(
|
||||
f"transformer_blocks.{i}.ff_context.net.0.proj",
|
||||
f"double_blocks.{i}.txt_mlp.0",
|
||||
)
|
||||
add_lora_layer_if_present(
|
||||
f"transformer_blocks.{i}.ff_context.net.2",
|
||||
f"double_blocks.{i}.txt_mlp.2",
|
||||
)
|
||||
|
||||
# output projections.
|
||||
add_lora_layer_if_present(
|
||||
f"transformer_blocks.{i}.attn.to_out.0",
|
||||
f"double_blocks.{i}.img_attn.proj",
|
||||
)
|
||||
add_lora_layer_if_present(
|
||||
f"transformer_blocks.{i}.attn.to_add_out",
|
||||
f"double_blocks.{i}.txt_attn.proj",
|
||||
)
|
||||
|
||||
# Single transformer blocks.
|
||||
for i in range(num_single_layers):
|
||||
# norms
|
||||
add_lora_layer_if_present(
|
||||
f"single_transformer_blocks.{i}.norm.linear",
|
||||
f"single_blocks.{i}.modulation.lin",
|
||||
)
|
||||
|
||||
# Q, K, V, mlp
|
||||
add_qkv_lora_layer_if_present(
|
||||
[
|
||||
f"single_transformer_blocks.{i}.attn.to_q",
|
||||
f"single_transformer_blocks.{i}.attn.to_k",
|
||||
f"single_transformer_blocks.{i}.attn.to_v",
|
||||
f"single_transformer_blocks.{i}.proj_mlp",
|
||||
],
|
||||
f"single_blocks.{i}.linear1",
|
||||
)
|
||||
|
||||
# Output projections.
|
||||
add_lora_layer_if_present(
|
||||
f"single_transformer_blocks.{i}.proj_out",
|
||||
f"single_blocks.{i}.linear2",
|
||||
)
|
||||
|
||||
# Final layer.
|
||||
add_lora_layer_if_present("proj_out", "final_layer.linear")
|
||||
|
||||
# Assert that all keys were processed.
|
||||
assert len(grouped_state_dict) == 0
|
||||
|
||||
return LoRAModelRaw(layers=layers)
|
||||
|
||||
|
||||
def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:
|
||||
"""Groups the keys in the state dict by layer."""
|
||||
layer_dict: dict[str, dict[str, torch.Tensor]] = {}
|
||||
for key in state_dict:
|
||||
# Split the 'lora_A.weight' or 'lora_B.weight' suffix from the layer name.
|
||||
parts = key.rsplit(".", maxsplit=2)
|
||||
layer_name = parts[0]
|
||||
key_name = ".".join(parts[1:])
|
||||
if layer_name not in layer_dict:
|
||||
layer_dict[layer_name] = {}
|
||||
layer_dict[layer_name][key_name] = state_dict[key]
|
||||
return layer_dict
|
||||
@@ -0,0 +1,80 @@
|
||||
import re
|
||||
from typing import Any, Dict, TypeVar
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
|
||||
from invokeai.backend.lora.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
|
||||
# A regex pattern that matches all of the keys in the Kohya FLUX LoRA format.
|
||||
# Example keys:
|
||||
# lora_unet_double_blocks_0_img_attn_proj.alpha
|
||||
# lora_unet_double_blocks_0_img_attn_proj.lora_down.weight
|
||||
# lora_unet_double_blocks_0_img_attn_proj.lora_up.weight
|
||||
FLUX_KOHYA_KEY_REGEX = (
|
||||
r"lora_unet_(\w+_blocks)_(\d+)_(img_attn|img_mlp|img_mod|txt_attn|txt_mlp|txt_mod|linear1|linear2|modulation)_?(.*)"
|
||||
)
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> bool:
|
||||
"""Checks if the provided state dict is likely in the Kohya FLUX LoRA format.
|
||||
|
||||
This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A
|
||||
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
|
||||
"""
|
||||
return all(re.match(FLUX_KOHYA_KEY_REGEX, k) for k in state_dict.keys())
|
||||
|
||||
|
||||
def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
|
||||
# Group keys by layer.
|
||||
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
|
||||
for key, value in state_dict.items():
|
||||
layer_name, param_name = key.split(".", 1)
|
||||
if layer_name not in grouped_state_dict:
|
||||
grouped_state_dict[layer_name] = {}
|
||||
grouped_state_dict[layer_name][param_name] = value
|
||||
|
||||
# Convert the state dict to the InvokeAI format.
|
||||
grouped_state_dict = convert_flux_kohya_state_dict_to_invoke_format(grouped_state_dict)
|
||||
|
||||
# Create LoRA layers.
|
||||
layers: dict[str, AnyLoRALayer] = {}
|
||||
for layer_key, layer_state_dict in grouped_state_dict.items():
|
||||
layers[layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
# Create and return the LoRAModelRaw.
|
||||
return LoRAModelRaw(layers=layers)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def convert_flux_kohya_state_dict_to_invoke_format(state_dict: Dict[str, T]) -> Dict[str, T]:
|
||||
"""Converts a state dict from the Kohya FLUX LoRA format to LoRA weight format used internally by InvokeAI.
|
||||
|
||||
Example key conversions:
|
||||
"lora_unet_double_blocks_0_img_attn_proj" -> "double_blocks.0.img_attn.proj"
|
||||
"lora_unet_double_blocks_0_img_attn_proj" -> "double_blocks.0.img_attn.proj"
|
||||
"lora_unet_double_blocks_0_img_attn_proj" -> "double_blocks.0.img_attn.proj"
|
||||
"lora_unet_double_blocks_0_img_attn_qkv" -> "double_blocks.0.img_attn.qkv"
|
||||
"lora_unet_double_blocks_0_img_attn_qkv" -> "double_blocks.0.img.attn.qkv"
|
||||
"lora_unet_double_blocks_0_img_attn_qkv" -> "double_blocks.0.img.attn.qkv"
|
||||
"""
|
||||
|
||||
def replace_func(match: re.Match[str]) -> str:
|
||||
s = f"{match.group(1)}.{match.group(2)}.{match.group(3)}"
|
||||
if match.group(4):
|
||||
s += f".{match.group(4)}"
|
||||
return s
|
||||
|
||||
converted_dict: dict[str, T] = {}
|
||||
for k, v in state_dict.items():
|
||||
match = re.match(FLUX_KOHYA_KEY_REGEX, k)
|
||||
if match:
|
||||
new_key = re.sub(FLUX_KOHYA_KEY_REGEX, replace_func, k)
|
||||
converted_dict[new_key] = v
|
||||
else:
|
||||
raise ValueError(f"Key '{k}' does not match the expected pattern for FLUX LoRA weights.")
|
||||
|
||||
return converted_dict
|
||||
@@ -0,0 +1,29 @@
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
|
||||
from invokeai.backend.lora.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
|
||||
|
||||
def lora_model_from_sd_state_dict(state_dict: Dict[str, torch.Tensor]) -> LoRAModelRaw:
|
||||
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_state(state_dict)
|
||||
|
||||
layers: dict[str, AnyLoRALayer] = {}
|
||||
for layer_key, values in grouped_state_dict.items():
|
||||
layers[layer_key] = any_lora_layer_from_state_dict(values)
|
||||
|
||||
return LoRAModelRaw(layers=layers)
|
||||
|
||||
|
||||
def _group_state(state_dict: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, torch.Tensor]]:
|
||||
state_dict_groupped: Dict[str, Dict[str, torch.Tensor]] = {}
|
||||
|
||||
for key, value in state_dict.items():
|
||||
stem, leaf = key.split(".", 1)
|
||||
if stem not in state_dict_groupped:
|
||||
state_dict_groupped[stem] = {}
|
||||
state_dict_groupped[stem][leaf] = value
|
||||
|
||||
return state_dict_groupped
|
||||
154
invokeai/backend/lora/conversions/sdxl_lora_conversion_utils.py
Normal file
154
invokeai/backend/lora/conversions/sdxl_lora_conversion_utils.py
Normal file
@@ -0,0 +1,154 @@
|
||||
import bisect
|
||||
from typing import Dict, List, Tuple, TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def convert_sdxl_keys_to_diffusers_format(state_dict: Dict[str, T]) -> dict[str, T]:
|
||||
"""Convert the keys of an SDXL LoRA state_dict to diffusers format.
|
||||
|
||||
The input state_dict can be in either Stability AI format or diffusers format. If the state_dict is already in
|
||||
diffusers format, then this function will have no effect.
|
||||
|
||||
This function is adapted from:
|
||||
https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L385-L409
|
||||
|
||||
Args:
|
||||
state_dict (Dict[str, Tensor]): The SDXL LoRA state_dict.
|
||||
|
||||
Raises:
|
||||
ValueError: If state_dict contains an unrecognized key, or not all keys could be converted.
|
||||
|
||||
Returns:
|
||||
Dict[str, Tensor]: The diffusers-format state_dict.
|
||||
"""
|
||||
converted_count = 0 # The number of Stability AI keys converted to diffusers format.
|
||||
not_converted_count = 0 # The number of keys that were not converted.
|
||||
|
||||
# Get a sorted list of Stability AI UNet keys so that we can efficiently search for keys with matching prefixes.
|
||||
# For example, we want to efficiently find `input_blocks_4_1` in the list when searching for
|
||||
# `input_blocks_4_1_proj_in`.
|
||||
stability_unet_keys = list(SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP)
|
||||
stability_unet_keys.sort()
|
||||
|
||||
new_state_dict: dict[str, T] = {}
|
||||
for full_key, value in state_dict.items():
|
||||
if full_key.startswith("lora_unet_"):
|
||||
search_key = full_key.replace("lora_unet_", "")
|
||||
# Use bisect to find the key in stability_unet_keys that *may* match the search_key's prefix.
|
||||
position = bisect.bisect_right(stability_unet_keys, search_key)
|
||||
map_key = stability_unet_keys[position - 1]
|
||||
# Now, check if the map_key *actually* matches the search_key.
|
||||
if search_key.startswith(map_key):
|
||||
new_key = full_key.replace(map_key, SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP[map_key])
|
||||
new_state_dict[new_key] = value
|
||||
converted_count += 1
|
||||
else:
|
||||
new_state_dict[full_key] = value
|
||||
not_converted_count += 1
|
||||
elif full_key.startswith("lora_te1_") or full_key.startswith("lora_te2_"):
|
||||
# The CLIP text encoders have the same keys in both Stability AI and diffusers formats.
|
||||
new_state_dict[full_key] = value
|
||||
continue
|
||||
else:
|
||||
raise ValueError(f"Unrecognized SDXL LoRA key prefix: '{full_key}'.")
|
||||
|
||||
if converted_count > 0 and not_converted_count > 0:
|
||||
raise ValueError(
|
||||
f"The SDXL LoRA could only be partially converted to diffusers format. converted={converted_count},"
|
||||
f" not_converted={not_converted_count}"
|
||||
)
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
# code from
|
||||
# https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L15C1-L97C32
|
||||
def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
|
||||
"""Create a dict mapping state_dict keys from Stability AI SDXL format to diffusers SDXL format."""
|
||||
unet_conversion_map_layer: list[tuple[str, str]] = []
|
||||
|
||||
for i in range(3): # num_blocks is 3 in sdxl
|
||||
# loop over downblocks/upblocks
|
||||
for j in range(2):
|
||||
# loop over resnets/attentions for downblocks
|
||||
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
|
||||
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
|
||||
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no attention layers in down_blocks.3
|
||||
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
|
||||
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
|
||||
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
|
||||
|
||||
for j in range(3):
|
||||
# loop over resnets/attentions for upblocks
|
||||
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
|
||||
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
|
||||
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
|
||||
|
||||
# if i > 0: commentout for sdxl
|
||||
# no attention layers in up_blocks.0
|
||||
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
|
||||
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
|
||||
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
|
||||
|
||||
if i < 3:
|
||||
# no downsample in down_blocks.3
|
||||
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
|
||||
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
|
||||
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
|
||||
|
||||
# no upsample in up_blocks.3
|
||||
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
|
||||
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
|
||||
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
|
||||
|
||||
hf_mid_atn_prefix = "mid_block.attentions.0."
|
||||
sd_mid_atn_prefix = "middle_block.1."
|
||||
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
|
||||
|
||||
for j in range(2):
|
||||
hf_mid_res_prefix = f"mid_block.resnets.{j}."
|
||||
sd_mid_res_prefix = f"middle_block.{2*j}."
|
||||
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
|
||||
|
||||
unet_conversion_map_resnet = [
|
||||
# (stable-diffusion, HF Diffusers)
|
||||
("in_layers.0.", "norm1."),
|
||||
("in_layers.2.", "conv1."),
|
||||
("out_layers.0.", "norm2."),
|
||||
("out_layers.3.", "conv2."),
|
||||
("emb_layers.1.", "time_emb_proj."),
|
||||
("skip_connection.", "conv_shortcut."),
|
||||
]
|
||||
|
||||
unet_conversion_map: list[tuple[str, str]] = []
|
||||
for sd, hf in unet_conversion_map_layer:
|
||||
if "resnets" in hf:
|
||||
for sd_res, hf_res in unet_conversion_map_resnet:
|
||||
unet_conversion_map.append((sd + sd_res, hf + hf_res))
|
||||
else:
|
||||
unet_conversion_map.append((sd, hf))
|
||||
|
||||
for j in range(2):
|
||||
hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
|
||||
sd_time_embed_prefix = f"time_embed.{j*2}."
|
||||
unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
|
||||
|
||||
for j in range(2):
|
||||
hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
|
||||
sd_label_embed_prefix = f"label_emb.0.{j*2}."
|
||||
unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
|
||||
|
||||
unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
|
||||
unet_conversion_map.append(("out.0.", "conv_norm_out."))
|
||||
unet_conversion_map.append(("out.2.", "conv_out."))
|
||||
|
||||
return unet_conversion_map
|
||||
|
||||
|
||||
SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP = {
|
||||
sd.rstrip(".").replace(".", "_"): hf.rstrip(".").replace(".", "_") for sd, hf in _make_sdxl_unet_conversion_map()
|
||||
}
|
||||
0
invokeai/backend/lora/layers/__init__.py
Normal file
0
invokeai/backend/lora/layers/__init__.py
Normal file
11
invokeai/backend/lora/layers/any_lora_layer.py
Normal file
11
invokeai/backend/lora/layers/any_lora_layer.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from typing import Union
|
||||
|
||||
from invokeai.backend.lora.layers.concatenated_lora_layer import ConcatenatedLoRALayer
|
||||
from invokeai.backend.lora.layers.full_layer import FullLayer
|
||||
from invokeai.backend.lora.layers.ia3_layer import IA3Layer
|
||||
from invokeai.backend.lora.layers.loha_layer import LoHALayer
|
||||
from invokeai.backend.lora.layers.lokr_layer import LoKRLayer
|
||||
from invokeai.backend.lora.layers.lora_layer import LoRALayer
|
||||
from invokeai.backend.lora.layers.norm_layer import NormLayer
|
||||
|
||||
AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer, NormLayer, ConcatenatedLoRALayer]
|
||||
46
invokeai/backend/lora/layers/concatenated_lora_layer.py
Normal file
46
invokeai/backend/lora/layers/concatenated_lora_layer.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from typing import List, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class ConcatenatedLoRALayer(LoRALayerBase):
|
||||
"""A LoRA layer that is composed of multiple LoRA layers concatenated along a specified axis.
|
||||
|
||||
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
|
||||
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
|
||||
stored as separate tensors. This class enables diffusers LoRA layers to be used in BFL FLUX models.
|
||||
"""
|
||||
|
||||
def __init__(self, lora_layers: List[LoRALayerBase], concat_axis: int = 0):
|
||||
# Note: We pass values={} to the base class, because the values are handled by the individual LoRA layers.
|
||||
super().__init__(values={})
|
||||
|
||||
self._lora_layers = lora_layers
|
||||
self._concat_axis = concat_axis
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
# TODO(ryand): Currently, we pass orig_weight=None to the sub-layers. If we want to support sub-layers that
|
||||
# require this value, we will need to implement chunking of the original weight tensor here.
|
||||
layer_weights = [lora_layer.get_weight(None) for lora_layer in self._lora_layers] # pyright: ignore[reportArgumentType]
|
||||
return torch.cat(layer_weights, dim=self._concat_axis)
|
||||
|
||||
def get_bias(self, orig_bias: torch.Tensor) -> Optional[torch.Tensor]:
|
||||
# TODO(ryand): Currently, we pass orig_bias=None to the sub-layers. If we want to support sub-layers that
|
||||
# require this value, we will need to implement chunking of the original bias tensor here.
|
||||
layer_biases = [lora_layer.get_bias(None) for lora_layer in self._lora_layers] # pyright: ignore[reportArgumentType]
|
||||
layer_bias_is_none = [layer_bias is None for layer_bias in layer_biases]
|
||||
if any(layer_bias_is_none):
|
||||
assert all(layer_bias_is_none)
|
||||
return None
|
||||
|
||||
# Ignore the type error, because we have just verified that all layer biases are non-None.
|
||||
return torch.cat(layer_biases, dim=self._concat_axis)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
return sum(lora_layer.calc_size() for lora_layer in self._lora_layers)
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
for lora_layer in self._lora_layers:
|
||||
lora_layer.to(device=device, dtype=dtype)
|
||||
36
invokeai/backend/lora/layers/full_layer.py
Normal file
36
invokeai/backend/lora/layers/full_layer.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class FullLayer(LoRALayerBase):
|
||||
# bias handled in LoRALayerBase(calc_size, to)
|
||||
# weight: torch.Tensor
|
||||
# bias: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(values)
|
||||
|
||||
self.weight = values["diff"]
|
||||
self.bias = values.get("diff_b", None)
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"diff", "diff_b"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
return self.weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
41
invokeai/backend/lora/layers/ia3_layer.py
Normal file
41
invokeai/backend/lora/layers/ia3_layer.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class IA3Layer(LoRALayerBase):
|
||||
# weight: torch.Tensor
|
||||
# on_input: torch.Tensor
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(values)
|
||||
|
||||
self.weight = values["weight"]
|
||||
self.on_input = values["on_input"]
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"weight", "on_input"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
weight = self.weight
|
||||
if not self.on_input:
|
||||
weight = weight.reshape(-1, 1)
|
||||
assert orig_weight is not None
|
||||
return orig_weight * weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
model_size += self.on_input.nelement() * self.on_input.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
self.on_input = self.on_input.to(device=device, dtype=dtype)
|
||||
68
invokeai/backend/lora/layers/loha_layer.py
Normal file
68
invokeai/backend/lora/layers/loha_layer.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class LoHALayer(LoRALayerBase):
|
||||
# w1_a: torch.Tensor
|
||||
# w1_b: torch.Tensor
|
||||
# w2_a: torch.Tensor
|
||||
# w2_b: torch.Tensor
|
||||
# t1: Optional[torch.Tensor] = None
|
||||
# t2: Optional[torch.Tensor] = None
|
||||
|
||||
def __init__(self, values: Dict[str, torch.Tensor]):
|
||||
super().__init__(values)
|
||||
|
||||
self.w1_a = values["hada_w1_a"]
|
||||
self.w1_b = values["hada_w1_b"]
|
||||
self.w2_a = values["hada_w2_a"]
|
||||
self.w2_b = values["hada_w2_b"]
|
||||
self.t1 = values.get("hada_t1", None)
|
||||
self.t2 = values.get("hada_t2", None)
|
||||
|
||||
self.rank = self.w1_b.shape[0]
|
||||
self.check_keys(
|
||||
values,
|
||||
{
|
||||
"hada_w1_a",
|
||||
"hada_w1_b",
|
||||
"hada_w2_a",
|
||||
"hada_w2_b",
|
||||
"hada_t1",
|
||||
"hada_t2",
|
||||
},
|
||||
)
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
if self.t1 is None:
|
||||
weight: torch.Tensor = (self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b)
|
||||
|
||||
else:
|
||||
rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", self.t1, self.w1_b, self.w1_a)
|
||||
rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", self.t2, self.w2_b, self.w2_a)
|
||||
weight = rebuild1 * rebuild2
|
||||
|
||||
return weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.w1_a = self.w1_a.to(device=device, dtype=dtype)
|
||||
self.w1_b = self.w1_b.to(device=device, dtype=dtype)
|
||||
if self.t1 is not None:
|
||||
self.t1 = self.t1.to(device=device, dtype=dtype)
|
||||
|
||||
self.w2_a = self.w2_a.to(device=device, dtype=dtype)
|
||||
self.w2_b = self.w2_b.to(device=device, dtype=dtype)
|
||||
if self.t2 is not None:
|
||||
self.t2 = self.t2.to(device=device, dtype=dtype)
|
||||
113
invokeai/backend/lora/layers/lokr_layer.py
Normal file
113
invokeai/backend/lora/layers/lokr_layer.py
Normal file
@@ -0,0 +1,113 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class LoKRLayer(LoRALayerBase):
|
||||
# w1: Optional[torch.Tensor] = None
|
||||
# w1_a: Optional[torch.Tensor] = None
|
||||
# w1_b: Optional[torch.Tensor] = None
|
||||
# w2: Optional[torch.Tensor] = None
|
||||
# w2_a: Optional[torch.Tensor] = None
|
||||
# w2_b: Optional[torch.Tensor] = None
|
||||
# t2: Optional[torch.Tensor] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(values)
|
||||
|
||||
self.w1 = values.get("lokr_w1", None)
|
||||
if self.w1 is None:
|
||||
self.w1_a = values["lokr_w1_a"]
|
||||
self.w1_b = values["lokr_w1_b"]
|
||||
else:
|
||||
self.w1_b = None
|
||||
self.w1_a = None
|
||||
|
||||
self.w2 = values.get("lokr_w2", None)
|
||||
if self.w2 is None:
|
||||
self.w2_a = values["lokr_w2_a"]
|
||||
self.w2_b = values["lokr_w2_b"]
|
||||
else:
|
||||
self.w2_a = None
|
||||
self.w2_b = None
|
||||
|
||||
self.t2 = values.get("lokr_t2", None)
|
||||
|
||||
if self.w1_b is not None:
|
||||
self.rank = self.w1_b.shape[0]
|
||||
elif self.w2_b is not None:
|
||||
self.rank = self.w2_b.shape[0]
|
||||
else:
|
||||
self.rank = None # unscaled
|
||||
|
||||
self.check_keys(
|
||||
values,
|
||||
{
|
||||
"lokr_w1",
|
||||
"lokr_w1_a",
|
||||
"lokr_w1_b",
|
||||
"lokr_w2",
|
||||
"lokr_w2_a",
|
||||
"lokr_w2_b",
|
||||
"lokr_t2",
|
||||
},
|
||||
)
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
w1: Optional[torch.Tensor] = self.w1
|
||||
if w1 is None:
|
||||
assert self.w1_a is not None
|
||||
assert self.w1_b is not None
|
||||
w1 = self.w1_a @ self.w1_b
|
||||
|
||||
w2 = self.w2
|
||||
if w2 is None:
|
||||
if self.t2 is None:
|
||||
assert self.w2_a is not None
|
||||
assert self.w2_b is not None
|
||||
w2 = self.w2_a @ self.w2_b
|
||||
else:
|
||||
w2 = torch.einsum("i j k l, i p, j r -> p r k l", self.t2, self.w2_a, self.w2_b)
|
||||
|
||||
if len(w2.shape) == 4:
|
||||
w1 = w1.unsqueeze(2).unsqueeze(2)
|
||||
w2 = w2.contiguous()
|
||||
assert w1 is not None
|
||||
assert w2 is not None
|
||||
weight = torch.kron(w1, w2)
|
||||
|
||||
return weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
if self.w1 is not None:
|
||||
self.w1 = self.w1.to(device=device, dtype=dtype)
|
||||
else:
|
||||
assert self.w1_a is not None
|
||||
assert self.w1_b is not None
|
||||
self.w1_a = self.w1_a.to(device=device, dtype=dtype)
|
||||
self.w1_b = self.w1_b.to(device=device, dtype=dtype)
|
||||
|
||||
if self.w2 is not None:
|
||||
self.w2 = self.w2.to(device=device, dtype=dtype)
|
||||
else:
|
||||
assert self.w2_a is not None
|
||||
assert self.w2_b is not None
|
||||
self.w2_a = self.w2_a.to(device=device, dtype=dtype)
|
||||
self.w2_b = self.w2_b.to(device=device, dtype=dtype)
|
||||
|
||||
if self.t2 is not None:
|
||||
self.t2 = self.t2.to(device=device, dtype=dtype)
|
||||
58
invokeai/backend/lora/layers/lora_layer.py
Normal file
58
invokeai/backend/lora/layers/lora_layer.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
# TODO: find and debug lora/locon with bias
|
||||
class LoRALayer(LoRALayerBase):
|
||||
# up: torch.Tensor
|
||||
# mid: Optional[torch.Tensor]
|
||||
# down: torch.Tensor
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(values)
|
||||
|
||||
self.up = values["lora_up.weight"]
|
||||
self.down = values["lora_down.weight"]
|
||||
self.mid = values.get("lora_mid.weight", None)
|
||||
|
||||
self.rank = self.down.shape[0]
|
||||
self.check_keys(
|
||||
values,
|
||||
{
|
||||
"lora_up.weight",
|
||||
"lora_down.weight",
|
||||
"lora_mid.weight",
|
||||
},
|
||||
)
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
if self.mid is not None:
|
||||
up = self.up.reshape(self.up.shape[0], self.up.shape[1])
|
||||
down = self.down.reshape(self.down.shape[0], self.down.shape[1])
|
||||
weight = torch.einsum("m n w h, i m, n j -> i j w h", self.mid, up, down)
|
||||
else:
|
||||
weight = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
|
||||
|
||||
return weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.up, self.mid, self.down]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.up = self.up.to(device=device, dtype=dtype)
|
||||
self.down = self.down.to(device=device, dtype=dtype)
|
||||
|
||||
if self.mid is not None:
|
||||
self.mid = self.mid.to(device=device, dtype=dtype)
|
||||
71
invokeai/backend/lora/layers/lora_layer_base.py
Normal file
71
invokeai/backend/lora/layers/lora_layer_base.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from typing import Dict, Optional, Set
|
||||
|
||||
import torch
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
|
||||
|
||||
class LoRALayerBase:
|
||||
# rank: Optional[int]
|
||||
# alpha: Optional[float]
|
||||
# bias: Optional[torch.Tensor]
|
||||
|
||||
# @property
|
||||
# def scale(self):
|
||||
# return self.alpha / self.rank if (self.alpha and self.rank) else 1.0
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
if "alpha" in values:
|
||||
self.alpha = values["alpha"].item()
|
||||
else:
|
||||
self.alpha = None
|
||||
|
||||
if "bias_indices" in values and "bias_values" in values and "bias_size" in values:
|
||||
self.bias: Optional[torch.Tensor] = torch.sparse_coo_tensor(
|
||||
values["bias_indices"],
|
||||
values["bias_values"],
|
||||
tuple(values["bias_size"]),
|
||||
)
|
||||
|
||||
else:
|
||||
self.bias = None
|
||||
|
||||
self.rank = None # set in layer implementation
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_bias(self, orig_bias: torch.Tensor) -> Optional[torch.Tensor]:
|
||||
return self.bias
|
||||
|
||||
def get_parameters(self, orig_module: torch.nn.Module) -> Dict[str, torch.Tensor]:
|
||||
params = {"weight": self.get_weight(orig_module.weight)}
|
||||
bias = self.get_bias(orig_module.bias)
|
||||
if bias is not None:
|
||||
params["bias"] = bias
|
||||
return params
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = 0
|
||||
for val in [self.bias]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
if self.bias is not None:
|
||||
self.bias = self.bias.to(device=device, dtype=dtype)
|
||||
|
||||
def check_keys(self, values: Dict[str, torch.Tensor], known_keys: Set[str]):
|
||||
"""Log a warning if values contains unhandled keys."""
|
||||
# {"alpha", "bias_indices", "bias_values", "bias_size"} are hard-coded, because they are handled by
|
||||
# `LoRALayerBase`. Sub-classes should provide the known_keys that they handled.
|
||||
all_known_keys = known_keys | {"alpha", "bias_indices", "bias_values", "bias_size"}
|
||||
unknown_keys = set(values.keys()) - all_known_keys
|
||||
if unknown_keys:
|
||||
logger.warning(
|
||||
f"Unexpected keys found in LoRA/LyCORIS layer, model might work incorrectly! Keys: {unknown_keys}"
|
||||
)
|
||||
36
invokeai/backend/lora/layers/norm_layer.py
Normal file
36
invokeai/backend/lora/layers/norm_layer.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class NormLayer(LoRALayerBase):
|
||||
# bias handled in LoRALayerBase(calc_size, to)
|
||||
# weight: torch.Tensor
|
||||
# bias: Optional[torch.Tensor]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Dict[str, torch.Tensor],
|
||||
):
|
||||
super().__init__(values)
|
||||
|
||||
self.weight = values["w_norm"]
|
||||
self.bias = values.get("b_norm", None)
|
||||
|
||||
self.rank = None # unscaled
|
||||
self.check_keys(values, {"w_norm", "b_norm"})
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
return self.weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
self.weight = self.weight.to(device=device, dtype=dtype)
|
||||
33
invokeai/backend/lora/layers/utils.py
Normal file
33
invokeai/backend/lora/layers/utils.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
|
||||
from invokeai.backend.lora.layers.full_layer import FullLayer
|
||||
from invokeai.backend.lora.layers.ia3_layer import IA3Layer
|
||||
from invokeai.backend.lora.layers.loha_layer import LoHALayer
|
||||
from invokeai.backend.lora.layers.lokr_layer import LoKRLayer
|
||||
from invokeai.backend.lora.layers.lora_layer import LoRALayer
|
||||
from invokeai.backend.lora.layers.norm_layer import NormLayer
|
||||
|
||||
|
||||
def any_lora_layer_from_state_dict(state_dict: Dict[str, torch.Tensor]) -> AnyLoRALayer:
|
||||
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
|
||||
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules
|
||||
|
||||
if "lora_up.weight" in state_dict:
|
||||
# LoRA a.k.a LoCon
|
||||
return LoRALayer(state_dict)
|
||||
elif "hada_w1_a" in state_dict:
|
||||
return LoHALayer(state_dict)
|
||||
elif "lokr_w1" in state_dict or "lokr_w1_a" in state_dict:
|
||||
return LoKRLayer(state_dict)
|
||||
elif "diff" in state_dict:
|
||||
# Full a.k.a Diff
|
||||
return FullLayer(state_dict)
|
||||
elif "on_input" in state_dict:
|
||||
return IA3Layer(state_dict)
|
||||
elif "w_norm" in state_dict:
|
||||
return NormLayer(state_dict)
|
||||
else:
|
||||
raise ValueError(f"Unsupported lora format: {state_dict.keys()}")
|
||||
22
invokeai/backend/lora/lora_model_raw.py
Normal file
22
invokeai/backend/lora/lora_model_raw.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright (c) 2024 The InvokeAI Development team
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.any_lora_layer import AnyLoRALayer
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
|
||||
|
||||
class LoRAModelRaw(RawModel): # (torch.nn.Module):
|
||||
def __init__(self, layers: Dict[str, AnyLoRALayer]):
|
||||
self.layers = layers
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
for _key, layer in self.layers.items():
|
||||
layer.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = 0
|
||||
for _, layer in self.layers.items():
|
||||
model_size += layer.calc_size()
|
||||
return model_size
|
||||
148
invokeai/backend/lora/lora_patcher.py
Normal file
148
invokeai/backend/lora/lora_patcher.py
Normal file
@@ -0,0 +1,148 @@
|
||||
from contextlib import contextmanager
|
||||
from typing import Dict, Iterable, Optional, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
|
||||
class LoraPatcher:
|
||||
@staticmethod
|
||||
@torch.no_grad()
|
||||
@contextmanager
|
||||
def apply_lora_patches(
|
||||
model: torch.nn.Module,
|
||||
patches: Iterable[Tuple[LoRAModelRaw, float]],
|
||||
prefix: str,
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
):
|
||||
"""Apply one or more LoRA patches to a model within a context manager.
|
||||
|
||||
:param model: The model to patch.
|
||||
:param loras: An iterator that returns tuples of LoRA patches and associated weights. An iterator is used so
|
||||
that the LoRA patches do not need to be loaded into memory all at once.
|
||||
:param prefix: The keys in the patches will be filtered to only include weights with this prefix.
|
||||
:cached_weights: Read-only copy of the model's state dict in CPU, for efficient unpatching purposes.
|
||||
"""
|
||||
original_weights = OriginalWeightsStorage(cached_weights)
|
||||
try:
|
||||
for patch, patch_weight in patches:
|
||||
LoraPatcher.apply_lora_patch(
|
||||
model=model,
|
||||
prefix=prefix,
|
||||
patch=patch,
|
||||
patch_weight=patch_weight,
|
||||
original_weights=original_weights,
|
||||
)
|
||||
del patch
|
||||
|
||||
yield
|
||||
finally:
|
||||
for param_key, weight in original_weights.get_changed_weights():
|
||||
model.get_parameter(param_key).copy_(weight)
|
||||
|
||||
@staticmethod
|
||||
@torch.no_grad()
|
||||
def apply_lora_patch(
|
||||
model: torch.nn.Module,
|
||||
prefix: str,
|
||||
patch: LoRAModelRaw,
|
||||
patch_weight: float,
|
||||
original_weights: OriginalWeightsStorage,
|
||||
):
|
||||
"""
|
||||
Apply a single LoRA patch to a model.
|
||||
:param model: The model to patch.
|
||||
:param patch: LoRA model to patch in.
|
||||
:param patch_weight: LoRA patch weight.
|
||||
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
|
||||
:param original_weights: Storage with original weights, filled by weights which lora patches, used for unpatching.
|
||||
"""
|
||||
|
||||
if patch_weight == 0:
|
||||
return
|
||||
|
||||
# If the layer keys contain a dot, then they are not flattened, and can be directly used to access model
|
||||
# submodules. If the layer keys do not contain a dot, then they are flattened, meaning that all '.' have been
|
||||
# replaced with '_'. Non-flattened keys are preferred, because they allow submodules to be accessed directly
|
||||
# without searching, but some legacy code still uses flattened keys.
|
||||
layer_keys_are_flattened = "." not in next(iter(patch.layers.keys()))
|
||||
|
||||
prefix_len = len(prefix)
|
||||
|
||||
for layer_key, layer in patch.layers.items():
|
||||
if not layer_key.startswith(prefix):
|
||||
continue
|
||||
|
||||
module_key, module = LoraPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
|
||||
# All of the LoRA weight calculations will be done on the same device as the module weight.
|
||||
# (Performance will be best if this is a CUDA device.)
|
||||
device = module.weight.device
|
||||
dtype = module.weight.dtype
|
||||
|
||||
layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0
|
||||
|
||||
# We intentionally move to the target device first, then cast. Experimentally, this was found to
|
||||
# be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the
|
||||
# same thing in a single call to '.to(...)'.
|
||||
layer.to(device=device)
|
||||
layer.to(dtype=torch.float32)
|
||||
|
||||
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
|
||||
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
|
||||
for param_name, lora_param_weight in layer.get_parameters(module).items():
|
||||
param_key = module_key + "." + param_name
|
||||
module_param = module.get_parameter(param_name)
|
||||
|
||||
# Save original weight
|
||||
original_weights.save(param_key, module_param)
|
||||
|
||||
if module_param.shape != lora_param_weight.shape:
|
||||
lora_param_weight = lora_param_weight.reshape(module_param.shape)
|
||||
|
||||
lora_param_weight *= patch_weight * layer_scale
|
||||
module_param += lora_param_weight.to(dtype=dtype)
|
||||
|
||||
layer.to(device=TorchDevice.CPU_DEVICE)
|
||||
|
||||
@staticmethod
|
||||
def _get_submodule(
|
||||
model: torch.nn.Module, layer_key: str, layer_key_is_flattened: bool
|
||||
) -> tuple[str, torch.nn.Module]:
|
||||
"""Get the submodule corresponding to the given layer key.
|
||||
:param model: The model to search.
|
||||
:param layer_key: The layer key to search for.
|
||||
:param layer_key_is_flattened: Whether the layer key is flattened. If flattened, then all '.' have been replaced
|
||||
with '_'. Non-flattened keys are preferred, because they allow submodules to be accessed directly without
|
||||
searching, but some legacy code still uses flattened keys.
|
||||
:return: A tuple containing the module key and the submodule.
|
||||
"""
|
||||
if not layer_key_is_flattened:
|
||||
return layer_key, model.get_submodule(layer_key)
|
||||
|
||||
# Handle flattened keys.
|
||||
assert "." not in layer_key
|
||||
|
||||
module = model
|
||||
module_key = ""
|
||||
key_parts = layer_key.split("_")
|
||||
|
||||
submodule_name = key_parts.pop(0)
|
||||
|
||||
while len(key_parts) > 0:
|
||||
try:
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key += "." + submodule_name
|
||||
submodule_name = key_parts.pop(0)
|
||||
except Exception:
|
||||
submodule_name += "_" + key_parts.pop(0)
|
||||
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key = (module_key + "." + submodule_name).lstrip(".")
|
||||
|
||||
return module_key, module
|
||||
@@ -32,7 +32,9 @@ from invokeai.backend.model_manager.config import (
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.util.model_util import convert_bundle_to_flux_transformer_checkpoint
|
||||
from invokeai.backend.model_manager.util.model_util import (
|
||||
convert_bundle_to_flux_transformer_checkpoint,
|
||||
)
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
try:
|
||||
@@ -193,6 +195,11 @@ class FluxCheckpointModel(ModelLoader):
|
||||
sd = load_file(model_path)
|
||||
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
|
||||
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
new_sd_size = sum([ten.nelement() * torch.bfloat16.itemsize for ten in sd.values()])
|
||||
self._ram_cache.make_room(new_sd_size)
|
||||
for k in sd.keys():
|
||||
# We need to cast to bfloat16 due to it being the only currently supported dtype for inference
|
||||
sd[k] = sd[k].to(torch.bfloat16)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
|
||||
@@ -5,8 +5,18 @@ from logging import Logger
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
|
||||
lora_model_from_flux_diffusers_state_dict,
|
||||
)
|
||||
from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import (
|
||||
lora_model_from_flux_kohya_state_dict,
|
||||
)
|
||||
from invokeai.backend.lora.conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
|
||||
from invokeai.backend.lora.conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
@@ -45,14 +55,33 @@ class LoRALoader(ModelLoader):
|
||||
raise ValueError("There are no submodels in a LoRA model.")
|
||||
model_path = Path(config.path)
|
||||
assert self._model_base is not None
|
||||
model = LoRAModelRaw.from_checkpoint(
|
||||
file_path=model_path,
|
||||
dtype=self._torch_dtype,
|
||||
base_model=self._model_base,
|
||||
)
|
||||
|
||||
# Load the state dict from the model file.
|
||||
if model_path.suffix == ".safetensors":
|
||||
state_dict = load_file(model_path.absolute().as_posix(), device="cpu")
|
||||
else:
|
||||
state_dict = torch.load(model_path, map_location="cpu")
|
||||
|
||||
# Apply state_dict key conversions, if necessary.
|
||||
if self._model_base == BaseModelType.StableDiffusionXL:
|
||||
state_dict = convert_sdxl_keys_to_diffusers_format(state_dict)
|
||||
model = lora_model_from_sd_state_dict(state_dict=state_dict)
|
||||
elif self._model_base == BaseModelType.Flux:
|
||||
if config.format == ModelFormat.Diffusers:
|
||||
model = lora_model_from_flux_diffusers_state_dict(state_dict=state_dict)
|
||||
elif config.format == ModelFormat.LyCORIS:
|
||||
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:
|
||||
# Currently, we don't apply any conversions for SD1 and SD2 LoRA models.
|
||||
model = lora_model_from_sd_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LoRA base model: {self._model_base}")
|
||||
|
||||
model.to(dtype=self._torch_dtype)
|
||||
return model
|
||||
|
||||
# override
|
||||
def _get_model_path(self, config: AnyModelConfig) -> Path:
|
||||
# cheating a little - we remember this variable for using in the subsequent call to _load_model()
|
||||
self._model_base = config.base
|
||||
|
||||
@@ -15,7 +15,7 @@ from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import D
|
||||
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
|
||||
from invokeai.backend.image_util.segment_anything.segment_anything_pipeline import SegmentAnythingPipeline
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
|
||||
@@ -10,6 +10,10 @@ from picklescan.scanner import scan_file_path
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.lora.conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
)
|
||||
from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import is_state_dict_likely_in_flux_kohya_format
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
@@ -244,7 +248,9 @@ class ModelProbe(object):
|
||||
return ModelType.VAE
|
||||
elif key.startswith(("lora_te_", "lora_unet_")):
|
||||
return ModelType.LoRA
|
||||
elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight")):
|
||||
# "lora_A.weight" and "lora_B.weight" are associated with models in PEFT format. We don't support all PEFT
|
||||
# LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models.
|
||||
elif key.endswith(("to_k_lora.up.weight", "to_q_lora.down.weight", "lora_A.weight", "lora_B.weight")):
|
||||
return ModelType.LoRA
|
||||
elif key.startswith(("controlnet", "control_model", "input_blocks")):
|
||||
return ModelType.ControlNet
|
||||
@@ -554,12 +560,21 @@ class LoRACheckpointProbe(CheckpointProbeBase):
|
||||
"""Class for LoRA checkpoints."""
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat("lycoris")
|
||||
if is_state_dict_likely_in_flux_diffusers_format(self.checkpoint):
|
||||
# TODO(ryand): This is an unusual case. In other places throughout the codebase, we treat
|
||||
# ModelFormat.Diffusers as meaning that the model is in a directory. In this case, the model is a single
|
||||
# file, but the weight keys are in the diffusers format.
|
||||
return ModelFormat.Diffusers
|
||||
return ModelFormat.LyCORIS
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
token_vector_length = lora_token_vector_length(checkpoint)
|
||||
if is_state_dict_likely_in_flux_kohya_format(self.checkpoint) or is_state_dict_likely_in_flux_diffusers_format(
|
||||
self.checkpoint
|
||||
):
|
||||
return BaseModelType.Flux
|
||||
|
||||
# If we've gotten here, we assume that the model is a Stable Diffusion model.
|
||||
token_vector_length = lora_token_vector_length(self.checkpoint)
|
||||
if token_vector_length == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif token_vector_length == 1024:
|
||||
|
||||
@@ -5,32 +5,18 @@ from __future__ import annotations
|
||||
|
||||
import pickle
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, Generator, Iterator, List, Optional, Tuple, Type, Union
|
||||
from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from diffusers import OnnxRuntimeModel, UNet2DConditionModel
|
||||
from diffusers import UNet2DConditionModel
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.model_manager import AnyModel
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
|
||||
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
|
||||
from invokeai.backend.stable_diffusion.extensions.lora import LoRAExt
|
||||
from invokeai.backend.textual_inversion import TextualInversionManager, TextualInversionModelRaw
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
"""
|
||||
loras = [
|
||||
(lora_model1, 0.7),
|
||||
(lora_model2, 0.4),
|
||||
]
|
||||
with LoRAHelper.apply_lora_unet(unet, loras):
|
||||
# unet with applied loras
|
||||
# unmodified unet
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ModelPatcher:
|
||||
@@ -54,95 +40,6 @@ class ModelPatcher:
|
||||
finally:
|
||||
unet.set_attn_processor(unet_orig_processors)
|
||||
|
||||
@staticmethod
|
||||
def _resolve_lora_key(model: torch.nn.Module, lora_key: str, prefix: str) -> Tuple[str, torch.nn.Module]:
|
||||
assert "." not in lora_key
|
||||
|
||||
if not lora_key.startswith(prefix):
|
||||
raise Exception(f"lora_key with invalid prefix: {lora_key}, {prefix}")
|
||||
|
||||
module = model
|
||||
module_key = ""
|
||||
key_parts = lora_key[len(prefix) :].split("_")
|
||||
|
||||
submodule_name = key_parts.pop(0)
|
||||
|
||||
while len(key_parts) > 0:
|
||||
try:
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key += "." + submodule_name
|
||||
submodule_name = key_parts.pop(0)
|
||||
except Exception:
|
||||
submodule_name += "_" + key_parts.pop(0)
|
||||
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key = (module_key + "." + submodule_name).lstrip(".")
|
||||
|
||||
return (module_key, module)
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_lora_unet(
|
||||
cls,
|
||||
unet: UNet2DConditionModel,
|
||||
loras: Iterator[Tuple[LoRAModelRaw, float]],
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
) -> Generator[None, None, None]:
|
||||
with cls.apply_lora(
|
||||
unet,
|
||||
loras=loras,
|
||||
prefix="lora_unet_",
|
||||
cached_weights=cached_weights,
|
||||
):
|
||||
yield
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_lora_text_encoder(
|
||||
cls,
|
||||
text_encoder: CLIPTextModel,
|
||||
loras: Iterator[Tuple[LoRAModelRaw, float]],
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
) -> Generator[None, None, None]:
|
||||
with cls.apply_lora(text_encoder, loras=loras, prefix="lora_te_", cached_weights=cached_weights):
|
||||
yield
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_lora(
|
||||
cls,
|
||||
model: AnyModel,
|
||||
loras: Iterator[Tuple[LoRAModelRaw, float]],
|
||||
prefix: str,
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
) -> Generator[None, None, None]:
|
||||
"""
|
||||
Apply one or more LoRAs to a model.
|
||||
|
||||
:param model: The model to patch.
|
||||
:param loras: An iterator that returns the LoRA to patch in and its patch weight.
|
||||
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
|
||||
:cached_weights: Read-only copy of the model's state dict in CPU, for unpatching purposes.
|
||||
"""
|
||||
original_weights = OriginalWeightsStorage(cached_weights)
|
||||
try:
|
||||
for lora_model, lora_weight in loras:
|
||||
LoRAExt.patch_model(
|
||||
model=model,
|
||||
prefix=prefix,
|
||||
lora=lora_model,
|
||||
lora_weight=lora_weight,
|
||||
original_weights=original_weights,
|
||||
)
|
||||
del lora_model
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
with torch.no_grad():
|
||||
for param_key, weight in original_weights.get_changed_weights():
|
||||
model.get_parameter(param_key).copy_(weight)
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_ti(
|
||||
@@ -282,26 +179,6 @@ class ModelPatcher:
|
||||
|
||||
|
||||
class ONNXModelPatcher:
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_lora_unet(
|
||||
cls,
|
||||
unet: OnnxRuntimeModel,
|
||||
loras: Iterator[Tuple[LoRAModelRaw, float]],
|
||||
) -> None:
|
||||
with cls.apply_lora(unet, loras, "lora_unet_"):
|
||||
yield
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def apply_lora_text_encoder(
|
||||
cls,
|
||||
text_encoder: OnnxRuntimeModel,
|
||||
loras: List[Tuple[LoRAModelRaw, float]],
|
||||
) -> None:
|
||||
with cls.apply_lora(text_encoder, loras, "lora_te_"):
|
||||
yield
|
||||
|
||||
# based on
|
||||
# https://github.com/ssube/onnx-web/blob/ca2e436f0623e18b4cfe8a0363fcfcf10508acf7/api/onnx_web/convert/diffusion/lora.py#L323
|
||||
@classmethod
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
from diffusers import UNet2DConditionModel
|
||||
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoraPatcher
|
||||
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
|
||||
@@ -31,107 +30,14 @@ class LoRAExt(ExtensionBase):
|
||||
@contextmanager
|
||||
def patch_unet(self, unet: UNet2DConditionModel, original_weights: OriginalWeightsStorage):
|
||||
lora_model = self._node_context.models.load(self._model_id).model
|
||||
self.patch_model(
|
||||
assert isinstance(lora_model, LoRAModelRaw)
|
||||
LoraPatcher.apply_lora_patch(
|
||||
model=unet,
|
||||
prefix="lora_unet_",
|
||||
lora=lora_model,
|
||||
lora_weight=self._weight,
|
||||
patch=lora_model,
|
||||
patch_weight=self._weight,
|
||||
original_weights=original_weights,
|
||||
)
|
||||
del lora_model
|
||||
|
||||
yield
|
||||
|
||||
@classmethod
|
||||
@torch.no_grad()
|
||||
def patch_model(
|
||||
cls,
|
||||
model: torch.nn.Module,
|
||||
prefix: str,
|
||||
lora: LoRAModelRaw,
|
||||
lora_weight: float,
|
||||
original_weights: OriginalWeightsStorage,
|
||||
):
|
||||
"""
|
||||
Apply one or more LoRAs to a model.
|
||||
:param model: The model to patch.
|
||||
:param lora: LoRA model to patch in.
|
||||
:param lora_weight: LoRA patch weight.
|
||||
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
|
||||
:param original_weights: Storage with original weights, filled by weights which lora patches, used for unpatching.
|
||||
"""
|
||||
|
||||
if lora_weight == 0:
|
||||
return
|
||||
|
||||
# assert lora.device.type == "cpu"
|
||||
for layer_key, layer in lora.layers.items():
|
||||
if not layer_key.startswith(prefix):
|
||||
continue
|
||||
|
||||
# TODO(ryand): A non-negligible amount of time is currently spent resolving LoRA keys. This
|
||||
# should be improved in the following ways:
|
||||
# 1. The key mapping could be more-efficiently pre-computed. This would save time every time a
|
||||
# LoRA model is applied.
|
||||
# 2. From an API perspective, there's no reason that the `ModelPatcher` should be aware of the
|
||||
# intricacies of Stable Diffusion key resolution. It should just expect the input LoRA
|
||||
# weights to have valid keys.
|
||||
assert isinstance(model, torch.nn.Module)
|
||||
module_key, module = cls._resolve_lora_key(model, layer_key, prefix)
|
||||
|
||||
# All of the LoRA weight calculations will be done on the same device as the module weight.
|
||||
# (Performance will be best if this is a CUDA device.)
|
||||
device = module.weight.device
|
||||
dtype = module.weight.dtype
|
||||
|
||||
layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0
|
||||
|
||||
# We intentionally move to the target device first, then cast. Experimentally, this was found to
|
||||
# be significantly faster for 16-bit CPU tensors being moved to a CUDA device than doing the
|
||||
# same thing in a single call to '.to(...)'.
|
||||
layer.to(device=device)
|
||||
layer.to(dtype=torch.float32)
|
||||
|
||||
# TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA
|
||||
# devices here. Experimentally, it was found to be very slow on CPU. More investigation needed.
|
||||
for param_name, lora_param_weight in layer.get_parameters(module).items():
|
||||
param_key = module_key + "." + param_name
|
||||
module_param = module.get_parameter(param_name)
|
||||
|
||||
# save original weight
|
||||
original_weights.save(param_key, module_param)
|
||||
|
||||
if module_param.shape != lora_param_weight.shape:
|
||||
# TODO: debug on lycoris
|
||||
lora_param_weight = lora_param_weight.reshape(module_param.shape)
|
||||
|
||||
lora_param_weight *= lora_weight * layer_scale
|
||||
module_param += lora_param_weight.to(dtype=dtype)
|
||||
|
||||
layer.to(device=TorchDevice.CPU_DEVICE)
|
||||
|
||||
@staticmethod
|
||||
def _resolve_lora_key(model: torch.nn.Module, lora_key: str, prefix: str) -> Tuple[str, torch.nn.Module]:
|
||||
assert "." not in lora_key
|
||||
|
||||
if not lora_key.startswith(prefix):
|
||||
raise Exception(f"lora_key with invalid prefix: {lora_key}, {prefix}")
|
||||
|
||||
module = model
|
||||
module_key = ""
|
||||
key_parts = lora_key[len(prefix) :].split("_")
|
||||
|
||||
submodule_name = key_parts.pop(0)
|
||||
|
||||
while len(key_parts) > 0:
|
||||
try:
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key += "." + submodule_name
|
||||
submodule_name = key_parts.pop(0)
|
||||
except Exception:
|
||||
submodule_name += "_" + key_parts.pop(0)
|
||||
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key = (module_key + "." + submodule_name).lstrip(".")
|
||||
|
||||
return (module_key, module)
|
||||
|
||||
@@ -16,6 +16,14 @@ module.exports = {
|
||||
'no-promise-executor-return': 'error',
|
||||
// https://eslint.org/docs/latest/rules/require-await
|
||||
'require-await': 'error',
|
||||
'no-restricted-properties': [
|
||||
'error',
|
||||
{
|
||||
object: 'crypto',
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
],
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
|
||||
@@ -11,6 +11,8 @@ const config: KnipConfig = {
|
||||
'src/features/nodes/types/v2/**',
|
||||
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.20",
|
||||
"@invoke-ai/ui-library": "^0.0.32",
|
||||
"@invoke-ai/ui-library": "^0.0.33",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
@@ -92,7 +92,7 @@
|
||||
"react-i18next": "^14.1.3",
|
||||
"react-icons": "^5.2.1",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.0.23",
|
||||
"react-resizable-panels": "^2.1.2",
|
||||
"react-use": "^17.5.1",
|
||||
"react-virtuoso": "^4.9.0",
|
||||
"reactflow": "^11.11.4",
|
||||
|
||||
62
invokeai/frontend/web/pnpm-lock.yaml
generated
62
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.0.20
|
||||
version: 5.0.20
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.32
|
||||
version: 0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.33
|
||||
version: 0.0.33(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.2)(react@18.3.1)
|
||||
@@ -126,8 +126,8 @@ dependencies:
|
||||
specifier: 9.1.2
|
||||
version: 9.1.2(@types/react@18.3.3)(react@18.3.1)(redux@5.0.1)
|
||||
react-resizable-panels:
|
||||
specifier: ^2.0.23
|
||||
version: 2.0.23(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^2.1.2
|
||||
version: 2.1.2(react-dom@18.3.1)(react@18.3.1)
|
||||
react-use:
|
||||
specifier: ^17.5.1
|
||||
version: 17.5.1(react-dom@18.3.1)(react@18.3.1)
|
||||
@@ -2056,7 +2056,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/dom-utils': 2.1.0
|
||||
react: 18.3.1
|
||||
react-focus-lock: 2.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
react-focus-lock: 2.13.2(@types/react@18.3.3)(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- '@types/react'
|
||||
dev: false
|
||||
@@ -2253,7 +2253,7 @@ packages:
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
react-remove-scroll: 2.5.10(@types/react@18.3.3)(react@18.3.1)
|
||||
react-remove-scroll: 2.6.0(@types/react@18.3.3)(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- '@types/react'
|
||||
dev: false
|
||||
@@ -3574,8 +3574,8 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-JxAoblrDu/cZ4ha9KO4ry5OWvyLUE1Dj28i+ciMaDNUpC/cN+IyiTbUBoFoPaoN5JP8Zpd/MYCcmF2qsziHDzg==}
|
||||
/@invoke-ai/ui-library@0.0.33(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-YLydTCOTUEgju4Ex6yXt/bvNBcO97y6zc1cGYjt7vtJMS8e6deA89cC5JejjbmVgntdnn49cDyeUxB8Z24gZew==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
@@ -10011,8 +10011,8 @@ packages:
|
||||
resolution: {integrity: sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==}
|
||||
dev: false
|
||||
|
||||
/react-focus-lock@2.13.0(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-w7aIcTwZwNzUp2fYQDMICy+khFwVmKmOrLF8kNsPS+dz4Oi/oxoVJ2wCMVvX6rWGriM/+mYaTyp1MRmkcs2amw==}
|
||||
/react-focus-lock@2.13.2(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-T/7bsofxYqnod2xadvuwjGKHOoL5GH7/EIPI5UyEvaU/c2CcphvGI371opFtuY/SYdbMsNiuF4HsHQ50nA/TKQ==}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
@@ -10147,25 +10147,6 @@ packages:
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/react-remove-scroll@2.5.10(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-m3zvBRANPBw3qxVVjEIPEQinkcwlFZ4qyomuWVpNJdv4c6MvHfXV0C3L9Jx5rr3HeBHKNRX+1jreB5QloDIJjA==}
|
||||
engines: {node: '>=10'}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
peerDependenciesMeta:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@types/react': 18.3.3
|
||||
react: 18.3.1
|
||||
react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1)
|
||||
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1)
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
|
||||
/react-remove-scroll@2.5.5(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==}
|
||||
engines: {node: '>=10'}
|
||||
@@ -10185,8 +10166,27 @@ packages:
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
|
||||
/react-resizable-panels@2.0.23(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-8ZKTwTU11t/FYwiwhMdtZYYyFxic5U5ysRu2YwfkAgDbUJXFvnWSJqhnzkSlW+mnDoNAzDCrJhdOSXBPA76wug==}
|
||||
/react-remove-scroll@2.6.0(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==}
|
||||
engines: {node: '>=10'}
|
||||
peerDependencies:
|
||||
'@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
react: ^16.8.0 || ^17.0.0 || ^18.0.0
|
||||
peerDependenciesMeta:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@types/react': 18.3.3
|
||||
react: 18.3.1
|
||||
react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1)
|
||||
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1)
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
|
||||
/react-resizable-panels@2.1.2(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-Ku2Bo7JvE8RpHhl4X1uhkdeT9auPBoxAOlGTqomDUUrBAX2mVGuHYZTcWvlnJSgx0QyHIxHECgGB5XVPUbUOkQ==}
|
||||
peerDependencies:
|
||||
react: ^16.14.0 || ^17.0.0 || ^18.0.0
|
||||
react-dom: ^16.14.0 || ^17.0.0 || ^18.0.0
|
||||
|
||||
@@ -127,7 +127,14 @@
|
||||
"bulkDownloadRequestedDesc": "Dein Download wird vorbereitet. Dies kann ein paar Momente dauern.",
|
||||
"bulkDownloadRequestFailed": "Problem beim Download vorbereiten",
|
||||
"bulkDownloadFailed": "Download fehlgeschlagen",
|
||||
"alwaysShowImageSizeBadge": "Zeige immer Bilder Größe Abzeichen"
|
||||
"alwaysShowImageSizeBadge": "Zeige immer Bilder Größe Abzeichen",
|
||||
"selectForCompare": "Zum Vergleichen auswählen",
|
||||
"compareImage": "Bilder vergleichen",
|
||||
"exitSearch": "Suche beenden",
|
||||
"newestFirst": "Neueste zuerst",
|
||||
"oldestFirst": "Älteste zuerst",
|
||||
"openInViewer": "Im Viewer öffnen",
|
||||
"swapImages": "Bilder tauschen"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
@@ -631,7 +638,8 @@
|
||||
"archived": "Archiviert",
|
||||
"noBoards": "Kein {boardType}} Ordner",
|
||||
"hideBoards": "Ordner verstecken",
|
||||
"viewBoards": "Ordner ansehen"
|
||||
"viewBoards": "Ordner ansehen",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt."
|
||||
},
|
||||
"controlnet": {
|
||||
"showAdvanced": "Zeige Erweitert",
|
||||
@@ -781,7 +789,9 @@
|
||||
"batchFieldValues": "Stapelverarbeitungswerte",
|
||||
"batchQueued": "Stapelverarbeitung eingereiht",
|
||||
"graphQueued": "Graph eingereiht",
|
||||
"graphFailedToQueue": "Fehler beim Einreihen des Graphen"
|
||||
"graphFailedToQueue": "Fehler beim Einreihen des Graphen",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generationen"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@@ -1146,5 +1156,10 @@
|
||||
"noMatchingTriggers": "Keine passenden Trigger",
|
||||
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"queue": "Warteschlange"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +93,7 @@
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"on": "On",
|
||||
"off": "Off",
|
||||
"or": "or",
|
||||
"checkpoint": "Checkpoint",
|
||||
"communityLabel": "Community",
|
||||
@@ -134,6 +135,7 @@
|
||||
"nodes": "Workflows",
|
||||
"notInstalled": "Not $t(common.installed)",
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"openInViewer": "Open in Viewer",
|
||||
"orderBy": "Order By",
|
||||
"outpaint": "outpaint",
|
||||
"outputs": "Outputs",
|
||||
@@ -373,6 +375,7 @@
|
||||
"useCache": "Use Cache"
|
||||
},
|
||||
"gallery": {
|
||||
"gallery": "Gallery",
|
||||
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
|
||||
"assets": "Assets",
|
||||
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
|
||||
@@ -385,11 +388,11 @@
|
||||
"deleteImage_one": "Delete Image",
|
||||
"deleteImage_other": "Delete {{count}} Images",
|
||||
"deleteImagePermanent": "Deleted images cannot be restored.",
|
||||
"displayBoardSearch": "Display Board Search",
|
||||
"displaySearch": "Display Search",
|
||||
"displayBoardSearch": "Board Search",
|
||||
"displaySearch": "Image Search",
|
||||
"download": "Download",
|
||||
"exitBoardSearch": "Exit Board Search",
|
||||
"exitSearch": "Exit Search",
|
||||
"exitSearch": "Exit Image Search",
|
||||
"featuresWillReset": "If you delete this image, those features will immediately be reset.",
|
||||
"galleryImageSize": "Image Size",
|
||||
"gallerySettings": "Gallery Settings",
|
||||
@@ -435,7 +438,8 @@
|
||||
"compareHelp1": "Hold <Kbd>Alt</Kbd> while clicking a gallery image or using the arrow keys to change the compare image.",
|
||||
"compareHelp2": "Press <Kbd>M</Kbd> to cycle through comparison modes.",
|
||||
"compareHelp3": "Press <Kbd>C</Kbd> to swap the compared images.",
|
||||
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit."
|
||||
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit.",
|
||||
"toggleMiniViewer": "Toggle Mini Viewer"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Search Hotkeys",
|
||||
@@ -1014,6 +1018,8 @@
|
||||
"noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.",
|
||||
"incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.",
|
||||
"noModelSelected": "No model selected",
|
||||
"canvasManagerNotLoaded": "Canvas Manager not loaded",
|
||||
"canvasBusy": "Canvas is busy",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
@@ -1045,12 +1051,11 @@
|
||||
"scaledHeight": "Scaled H",
|
||||
"scaledWidth": "Scaled W",
|
||||
"scheduler": "Scheduler",
|
||||
"seamlessXAxis": "Seamless Tiling X Axis",
|
||||
"seamlessYAxis": "Seamless Tiling Y Axis",
|
||||
"seamlessXAxis": "Seamless X Axis",
|
||||
"seamlessYAxis": "Seamless Y Axis",
|
||||
"seed": "Seed",
|
||||
"imageActions": "Image Actions",
|
||||
"sendToImg2Img": "Send to Image to Image",
|
||||
"sendToUnifiedCanvas": "Send To Unified Canvas",
|
||||
"sendToCanvas": "Send To Canvas",
|
||||
"sendToUpscale": "Send To Upscale",
|
||||
"showOptionsPanel": "Show Side Panel (O or T)",
|
||||
"shuffle": "Shuffle Seed",
|
||||
@@ -1191,8 +1196,8 @@
|
||||
"problemSavingMaskDesc": "Unable to export mask",
|
||||
"prunedQueue": "Pruned Queue",
|
||||
"resetInitialImage": "Reset Initial Image",
|
||||
"sentToImageToImage": "Sent To Image To Image",
|
||||
"sentToUnifiedCanvas": "Sent to Unified Canvas",
|
||||
"sentToCanvas": "Sent to Canvas",
|
||||
"sentToUpscale": "Sent to Upscale",
|
||||
"serverError": "Server Error",
|
||||
"sessionRef": "Session: {{sessionId}}",
|
||||
"setAsCanvasInitialImage": "Set as canvas initial image",
|
||||
@@ -1655,6 +1660,7 @@
|
||||
},
|
||||
"controlLayers": {
|
||||
"bookmark": "Bookmark for Quick Switch",
|
||||
"fitBboxToLayers": "Fit Bbox To Layers",
|
||||
"removeBookmark": "Remove Bookmark",
|
||||
"saveCanvasToGallery": "Save Canvas To Gallery",
|
||||
"saveBboxToGallery": "Save Bbox To Gallery",
|
||||
@@ -1710,6 +1716,8 @@
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"sendingToCanvas": "Sending to Canvas",
|
||||
"sendingToGallery": "Sending to Gallery",
|
||||
"sendToGallery": "Send To Gallery",
|
||||
"sendToGalleryDesc": "Generations will be sent to the gallery.",
|
||||
"sendToCanvas": "Send To Canvas",
|
||||
@@ -1789,9 +1797,19 @@
|
||||
"filter": "Filter",
|
||||
"filters": "Filters",
|
||||
"filterType": "Filter Type",
|
||||
"preview": "Preview",
|
||||
"autoProcess": "Auto Process",
|
||||
"reset": "Reset",
|
||||
"process": "Process",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel"
|
||||
"cancel": "Cancel",
|
||||
"spandrel": {
|
||||
"label": "Image-to-Image Model",
|
||||
"description": "Run an image-to-image model on the selected layer.",
|
||||
"paramModel": "Model",
|
||||
"paramAutoScale": "Auto Scale",
|
||||
"paramAutoScaleDesc": "The selected model will be run until the target scale is reached.",
|
||||
"paramScale": "Target Scale"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
"transform": "Transform",
|
||||
@@ -1799,6 +1817,28 @@
|
||||
"reset": "Reset",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel"
|
||||
},
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"label": "Snap to Grid",
|
||||
"on": "On",
|
||||
"off": "Off"
|
||||
}
|
||||
},
|
||||
"HUD": {
|
||||
"bbox": "Bbox",
|
||||
"scaledBbox": "Scaled Bbox",
|
||||
"autoSave": "Auto Save",
|
||||
"entityStatus": {
|
||||
"selectedEntity": "Selected Entity",
|
||||
"selectedEntityIs": "Selected Entity is",
|
||||
"isFiltering": "is filtering",
|
||||
"isTransforming": "is transforming",
|
||||
"isLocked": "is locked",
|
||||
"isHidden": "is hidden",
|
||||
"isDisabled": "is disabled",
|
||||
"enabled": "Enabled"
|
||||
}
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
|
||||
@@ -86,15 +86,15 @@
|
||||
"loadMore": "Cargar más",
|
||||
"noImagesInGallery": "No hay imágenes para mostrar",
|
||||
"deleteImage_one": "Eliminar Imagen",
|
||||
"deleteImage_many": "",
|
||||
"deleteImage_other": "",
|
||||
"deleteImage_many": "Eliminar {{count}} Imágenes",
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Atajos de teclado",
|
||||
"appHotkeys": "Atajos de applicación",
|
||||
"appHotkeys": "Atajos de aplicación",
|
||||
"generalHotkeys": "Atajos generales",
|
||||
"galleryHotkeys": "Atajos de galería",
|
||||
"unifiedCanvasHotkeys": "Atajos de lienzo unificado",
|
||||
@@ -535,7 +535,7 @@
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
@@ -549,7 +549,13 @@
|
||||
"imagesWithCount_other": "{{count}} imágenes",
|
||||
"assetsWithCount_one": "{{count}} activo",
|
||||
"assetsWithCount_many": "{{count}} activos",
|
||||
"assetsWithCount_other": "{{count}} activos"
|
||||
"assetsWithCount_other": "{{count}} activos",
|
||||
"hideBoards": "Ocultar Paneles",
|
||||
"addPrivateBoard": "Agregar un tablero privado",
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archived": "Archivado"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
|
||||
@@ -496,7 +496,9 @@
|
||||
"main": "Principali",
|
||||
"noModelsInstalledDesc1": "Installa i modelli con",
|
||||
"ipAdapters": "Adattatori IP",
|
||||
"noMatchingModels": "Nessun modello corrispondente"
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
"starterModelsInModelManager": "I modelli iniziali possono essere trovati in Gestione Modelli",
|
||||
"spandrelImageToImage": "Immagine a immagine (Spandrel)"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -510,7 +512,7 @@
|
||||
"perlinNoise": "Rumore Perlin",
|
||||
"type": "Tipo",
|
||||
"strength": "Forza",
|
||||
"upscaling": "Ampliamento",
|
||||
"upscaling": "Amplia",
|
||||
"scale": "Scala",
|
||||
"imageFit": "Adatta l'immagine iniziale alle dimensioni di output",
|
||||
"scaleBeforeProcessing": "Scala prima dell'elaborazione",
|
||||
@@ -593,7 +595,7 @@
|
||||
"globalPositivePromptPlaceholder": "Prompt positivo globale",
|
||||
"globalNegativePromptPlaceholder": "Prompt negativo globale",
|
||||
"processImage": "Elabora Immagine",
|
||||
"sendToUpscale": "Invia a Ampliare",
|
||||
"sendToUpscale": "Invia a Amplia",
|
||||
"postProcessing": "Post-elaborazione (Shift + U)"
|
||||
},
|
||||
"settings": {
|
||||
@@ -1420,7 +1422,7 @@
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Metodo di ampliamento",
|
||||
"paragraphs": [
|
||||
"Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione."
|
||||
"Metodo utilizzato per ampliare l'immagine per la correzione ad alta risoluzione."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
@@ -1528,7 +1530,7 @@
|
||||
},
|
||||
"upscaleModel": {
|
||||
"paragraphs": [
|
||||
"Il modello di ampliamento (Upscale), scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
|
||||
"Il modello di ampliamento, scala l'immagine alle dimensioni di uscita prima di aggiungere i dettagli. È possibile utilizzare qualsiasi modello di ampliamento supportato, ma alcuni sono specializzati per diversi tipi di immagini, come foto o disegni al tratto."
|
||||
],
|
||||
"heading": "Modello di ampliamento"
|
||||
},
|
||||
@@ -1720,26 +1722,27 @@
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Coda",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"upscaling": "Ampliamento",
|
||||
"upscaling": "Amplia",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"creativity": "Creatività",
|
||||
"structure": "Struttura",
|
||||
"upscaleModel": "Modello di Ampliamento",
|
||||
"upscaleModel": "Modello di ampliamento",
|
||||
"scale": "Scala",
|
||||
"missingModelsWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare i modelli richiesti:",
|
||||
"mainModelDesc": "Modello principale (architettura SD1.5 o SDXL)",
|
||||
"tileControlNetModelDesc": "Modello Tile ControlNet per l'architettura del modello principale scelto",
|
||||
"upscaleModelDesc": "Modello per l'ampliamento (da immagine a immagine)",
|
||||
"upscaleModelDesc": "Modello per l'ampliamento (immagine a immagine)",
|
||||
"missingUpscaleInitialImage": "Immagine iniziale mancante per l'ampliamento",
|
||||
"missingUpscaleModel": "Modello per l’ampliamento mancante",
|
||||
"missingTileControlNetModel": "Nessun modello ControlNet Tile valido installato",
|
||||
"postProcessingModel": "Modello di post-elaborazione",
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata."
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -1789,6 +1792,7 @@
|
||||
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
||||
"noTemplates": "Nessun modello",
|
||||
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
||||
"templateActions": "Azioni modello"
|
||||
"templateActions": "Azioni modello",
|
||||
"promptTemplateCleared": "Modello di prompt cancellato"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,7 +501,8 @@
|
||||
"noModelsInstalled": "Нет установленных моделей",
|
||||
"noModelsInstalledDesc1": "Установите модели с помощью",
|
||||
"noMatchingModels": "Нет подходящих моделей",
|
||||
"ipAdapters": "IP адаптеры"
|
||||
"ipAdapters": "IP адаптеры",
|
||||
"starterModelsInModelManager": "Стартовые модели можно найти в Менеджере моделей"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Изображения",
|
||||
@@ -1758,7 +1759,8 @@
|
||||
"postProcessingModel": "Модель постобработки",
|
||||
"tileControlNetModelDesc": "Модель ControlNet для выбранной архитектуры основной модели",
|
||||
"missingModelsWarning": "Зайдите в <LinkComponent>Менеджер моделей</LinkComponent> чтоб установить необходимые модели:",
|
||||
"postProcessingMissingModelWarning": "Посетите <LinkComponent>Менеджер моделей</LinkComponent>, чтобы установить модель постобработки (img2img)."
|
||||
"postProcessingMissingModelWarning": "Посетите <LinkComponent>Менеджер моделей</LinkComponent>, чтобы установить модель постобработки (img2img).",
|
||||
"upscale": "Увеличить"
|
||||
},
|
||||
"stylePresets": {
|
||||
"noMatchingTemplates": "Нет подходящих шаблонов",
|
||||
@@ -1804,7 +1806,8 @@
|
||||
"noTemplates": "Нет шаблонов",
|
||||
"promptTemplatesDesc2": "Используйте строку-заполнитель <Pre>{{placeholder}}</Pre>, чтобы указать место, куда должен быть включен ваш запрос в шаблоне.",
|
||||
"searchByName": "Поиск по имени",
|
||||
"shared": "Общий"
|
||||
"shared": "Общий",
|
||||
"promptTemplateCleared": "Шаблон запроса создан"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Пригласите членов команды",
|
||||
|
||||
@@ -154,7 +154,8 @@
|
||||
"displaySearch": "显示搜索",
|
||||
"stretchToFit": "拉伸以适应",
|
||||
"exitCompare": "退出对比",
|
||||
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。"
|
||||
"compareHelp1": "在点击图库中的图片或使用箭头键切换比较图片时,请按住<Kbd>Alt</Kbd> 键。",
|
||||
"go": "运行"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "快捷键",
|
||||
@@ -494,7 +495,9 @@
|
||||
"huggingFacePlaceholder": "所有者或模型名称",
|
||||
"huggingFaceRepoID": "HuggingFace仓库ID",
|
||||
"loraTriggerPhrases": "LoRA 触发词",
|
||||
"ipAdapters": "IP适配器"
|
||||
"ipAdapters": "IP适配器",
|
||||
"spandrelImageToImage": "图生图(Spandrel)",
|
||||
"starterModelsInModelManager": "您可以在模型管理器中找到初始模型"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -695,7 +698,9 @@
|
||||
"outOfMemoryErrorDesc": "您当前的生成设置已超出系统处理能力.请调整设置后再次尝试.",
|
||||
"parametersSet": "参数已恢复",
|
||||
"errorCopied": "错误信息已复制",
|
||||
"modelImportCanceled": "模型导入已取消"
|
||||
"modelImportCanceled": "模型导入已取消",
|
||||
"importFailed": "导入失败",
|
||||
"importSuccessful": "导入成功"
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "图层",
|
||||
@@ -1705,12 +1710,55 @@
|
||||
"missingModelsWarning": "请访问<LinkComponent>模型管理器</LinkComponent> 安装所需的模型:",
|
||||
"mainModelDesc": "主模型(SD1.5或SDXL架构)",
|
||||
"exceedsMaxSize": "放大设置超出了最大尺寸限制",
|
||||
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择."
|
||||
"exceedsMaxSizeDetails": "最大放大限制是 {{maxUpscaleDimension}}x{{maxUpscaleDimension}} 像素.请尝试一个较小的图像或减少您的缩放选择.",
|
||||
"upscale": "放大"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "邀请团队成员",
|
||||
"professional": "专业",
|
||||
"professionalUpsell": "可在 Invoke 的专业版中使用.点击此处或访问 invoke.com/pricing 了解更多详情.",
|
||||
"shareAccess": "共享访问权限"
|
||||
},
|
||||
"stylePresets": {
|
||||
"positivePrompt": "正向提示词",
|
||||
"preview": "预览",
|
||||
"deleteImage": "删除图像",
|
||||
"deleteTemplate": "删除模版",
|
||||
"deleteTemplate2": "您确定要删除这个模板吗?请注意,删除后无法恢复.",
|
||||
"importTemplates": "导入提示模板,支持CSV或JSON格式",
|
||||
"insertPlaceholder": "插入一个占位符",
|
||||
"myTemplates": "我的模版",
|
||||
"name": "名称",
|
||||
"type": "类型",
|
||||
"unableToDeleteTemplate": "无法删除提示模板",
|
||||
"updatePromptTemplate": "更新提示词模版",
|
||||
"exportPromptTemplates": "导出我的提示模板为CSV格式",
|
||||
"exportDownloaded": "导出已下载",
|
||||
"noMatchingTemplates": "无匹配的模版",
|
||||
"promptTemplatesDesc1": "提示模板可以帮助您在编写提示时添加预设的文本内容.",
|
||||
"promptTemplatesDesc3": "如果您没有使用占位符,那么模板的内容将会被添加到您提示的末尾.",
|
||||
"searchByName": "按名称搜索",
|
||||
"shared": "已分享",
|
||||
"sharedTemplates": "已分享的模版",
|
||||
"templateActions": "模版操作",
|
||||
"templateDeleted": "提示模版已删除",
|
||||
"toggleViewMode": "切换显示模式",
|
||||
"uploadImage": "上传图像",
|
||||
"active": "激活",
|
||||
"choosePromptTemplate": "选择提示词模板",
|
||||
"clearTemplateSelection": "清除模版选择",
|
||||
"copyTemplate": "拷贝模版",
|
||||
"createPromptTemplate": "创建提示词模版",
|
||||
"defaultTemplates": "默认模版",
|
||||
"editTemplate": "编辑模版",
|
||||
"exportFailed": "无法生成并下载CSV文件",
|
||||
"flatten": "将选定的模板内容合并到当前提示中",
|
||||
"negativePrompt": "反向提示词",
|
||||
"promptTemplateCleared": "提示模板已清除",
|
||||
"useForTemplate": "用于提示词模版",
|
||||
"viewList": "预览模版列表",
|
||||
"viewModeTooltip": "这是您的提示在当前选定的模板下的预览效果。如需编辑提示,请直接在文本框中点击进行修改.",
|
||||
"noTemplates": "无模版",
|
||||
"private": "私密"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,10 +21,16 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
|
||||
direction,
|
||||
shadows: {
|
||||
..._theme.shadows,
|
||||
selected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverSelected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverUnselected:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
selectedForCompare:
|
||||
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-400)',
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
hoverSelectedForCompare:
|
||||
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-300)',
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
});
|
||||
}, [direction]);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { TypedStartListening } from '@reduxjs/toolkit';
|
||||
import { createListenerMiddleware } from '@reduxjs/toolkit';
|
||||
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
|
||||
import { addAdHocPostProcessingRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/addAdHocPostProcessingRequestedListener';
|
||||
import { addStagingListeners } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener';
|
||||
import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued';
|
||||
@@ -9,6 +9,7 @@ import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddlewar
|
||||
import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted';
|
||||
import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected';
|
||||
import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload';
|
||||
import { addCancellationsListeners } from 'app/store/middleware/listenerMiddleware/listeners/cancellationsListeners';
|
||||
import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear';
|
||||
import { addEnqueueRequestedNodes } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes';
|
||||
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
|
||||
@@ -40,6 +41,8 @@ export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
|
||||
|
||||
const startAppListening = listenerMiddleware.startListening as AppStartListening;
|
||||
|
||||
export const addAppListener = addListener.withTypes<RootState, AppDispatch>();
|
||||
|
||||
/**
|
||||
* The RTK listener middleware is a lightweight alternative sagas/observables.
|
||||
*
|
||||
@@ -119,3 +122,5 @@ addDynamicPromptsListener(startAppListening);
|
||||
|
||||
addSetDefaultSettingsListener(startAppListening);
|
||||
// addControlAdapterPreprocessor(startAppListening);
|
||||
|
||||
addCancellationsListeners(startAppListening);
|
||||
|
||||
@@ -1,37 +1,34 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import {
|
||||
sessionStagingAreaImageAccepted,
|
||||
sessionStagingAreaReset,
|
||||
} from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { canvasReset, rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { stagingAreaImageAccepted, stagingAreaReset } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { $lastCanvasProgressEvent } from 'services/events/setEventListeners';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('canvas');
|
||||
|
||||
const matchCanvasOrStagingAreaRest = isAnyOf(stagingAreaReset, canvasReset);
|
||||
|
||||
export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: sessionStagingAreaReset,
|
||||
matcher: matchCanvasOrStagingAreaRest,
|
||||
effect: async (_, { dispatch }) => {
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.cancelByBatchOrigin.initiate(
|
||||
{ origin: 'canvas' },
|
||||
queueApi.endpoints.cancelByBatchDestination.initiate(
|
||||
{ destination: 'canvas' },
|
||||
{ fixedCacheKey: 'cancelByBatchOrigin' }
|
||||
)
|
||||
);
|
||||
const { canceled } = await req.unwrap();
|
||||
req.reset();
|
||||
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
|
||||
if (canceled > 0) {
|
||||
log.debug(`Canceled ${canceled} canvas batches`);
|
||||
toast({
|
||||
@@ -52,11 +49,11 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
actionCreator: sessionStagingAreaImageAccepted,
|
||||
actionCreator: stagingAreaImageAccepted,
|
||||
effect: (action, api) => {
|
||||
const { index } = action.payload;
|
||||
const state = api.getState();
|
||||
const stagingAreaImage = state.canvasSession.stagedImages[index];
|
||||
const stagingAreaImage = state.canvasStagingArea.stagedImages[index];
|
||||
|
||||
assert(stagingAreaImage, 'No staged image found to accept');
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
@@ -69,7 +66,7 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
};
|
||||
|
||||
api.dispatch(rasterLayerAdded({ overrides, isSelected: false }));
|
||||
api.dispatch(sessionStagingAreaReset());
|
||||
api.dispatch(stagingAreaReset());
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,137 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { $lastCanvasProgressEvent } from 'features/controlLayers/store/canvasSlice';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
/**
|
||||
* To prevent a race condition where a progress event arrives after a successful cancellation, we need to keep track of
|
||||
* cancellations:
|
||||
* - In the route handlers above, we track and update the cancellations object
|
||||
* - When the user queues a, we should reset the cancellations, also handled int he route handlers above
|
||||
* - When we get a progress event, we should check if the event is cancelled before setting the event
|
||||
*
|
||||
* We have a few ways that cancellations are effected, so we need to track them all:
|
||||
* - by queue item id (in this case, we will compare the session_id and not the item_id)
|
||||
* - by batch id
|
||||
* - by destination
|
||||
* - by clearing the queue
|
||||
*/
|
||||
type Cancellations = {
|
||||
sessionIds: Set<string>;
|
||||
batchIds: Set<string>;
|
||||
destinations: Set<string>;
|
||||
clearQueue: boolean;
|
||||
};
|
||||
|
||||
const resetCancellations = (): void => {
|
||||
cancellations.clearQueue = false;
|
||||
cancellations.sessionIds.clear();
|
||||
cancellations.batchIds.clear();
|
||||
cancellations.destinations.clear();
|
||||
};
|
||||
|
||||
const cancellations: Cancellations = {
|
||||
sessionIds: new Set(),
|
||||
batchIds: new Set(),
|
||||
destinations: new Set(),
|
||||
clearQueue: false,
|
||||
} as Readonly<Cancellations>;
|
||||
|
||||
/**
|
||||
* Checks if an item is cancelled, used to prevent race conditions with event handling.
|
||||
*
|
||||
* To use this, provide the session_id, batch_id and destination from the event payload.
|
||||
*/
|
||||
export const getIsCancelled = (item: {
|
||||
session_id: string;
|
||||
batch_id: string;
|
||||
destination?: string | null;
|
||||
}): boolean => {
|
||||
if (cancellations.clearQueue) {
|
||||
return true;
|
||||
}
|
||||
if (cancellations.sessionIds.has(item.session_id)) {
|
||||
return true;
|
||||
}
|
||||
if (cancellations.batchIds.has(item.batch_id)) {
|
||||
return true;
|
||||
}
|
||||
if (item.destination && cancellations.destinations.has(item.destination)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
export const addCancellationsListeners = (startAppListening: AppStartListening) => {
|
||||
// When we get a cancellation, we may need to clear the last progress event - next few listeners handle those cases.
|
||||
// Maybe we could use the `getIsCancelled` util here, but I think that could introduce _another_ race condition...
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.enqueueBatch.matchFulfilled,
|
||||
effect: () => {
|
||||
resetCancellations();
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.cancelByBatchDestination.matchFulfilled,
|
||||
effect: (action) => {
|
||||
cancellations.destinations.add(action.meta.arg.originalArgs.destination);
|
||||
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.cancelQueueItem.matchFulfilled,
|
||||
effect: (action) => {
|
||||
cancellations.sessionIds.add(action.payload.session_id);
|
||||
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.cancelByBatchIds.matchFulfilled,
|
||||
effect: (action) => {
|
||||
for (const batch_id of action.meta.arg.originalArgs.batch_ids) {
|
||||
cancellations.batchIds.add(batch_id);
|
||||
}
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
startAppListening({
|
||||
matcher: queueApi.endpoints.clearQueue.matchFulfilled,
|
||||
effect: () => {
|
||||
cancellations.clearQueue = true;
|
||||
const event = $lastCanvasProgressEvent.get();
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
const { session_id, batch_id, destination } = event;
|
||||
if (getIsCancelled({ session_id, batch_id, destination })) {
|
||||
$lastCanvasProgressEvent.set(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -4,8 +4,12 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import type { Result } from 'common/util/result';
|
||||
import { isErr, withResult, withResultAsync } from 'common/util/result';
|
||||
import { $canvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { sessionStagingAreaReset, sessionStartedStaging } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { $canvasManager } from 'features/controlLayers/store/canvasSlice';
|
||||
import {
|
||||
selectIsStaging,
|
||||
stagingAreaReset,
|
||||
stagingAreaStartedStaging,
|
||||
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
@@ -31,14 +35,14 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
|
||||
let didStartStaging = false;
|
||||
|
||||
if (!state.canvasSession.isStaging && state.canvasSettings.sendToCanvas) {
|
||||
dispatch(sessionStartedStaging());
|
||||
if (!selectIsStaging(state) && state.canvasSettings.sendToCanvas) {
|
||||
dispatch(stagingAreaStartedStaging());
|
||||
didStartStaging = true;
|
||||
}
|
||||
|
||||
const abortStaging = () => {
|
||||
if (didStartStaging && getState().canvasSession.isStaging) {
|
||||
dispatch(sessionStagingAreaReset());
|
||||
if (didStartStaging && selectIsStaging(getState())) {
|
||||
dispatch(stagingAreaReset());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
@@ -11,7 +10,6 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'upscaling',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const { shouldShowProgressInViewer } = state.ui;
|
||||
const { prepend } = action.payload;
|
||||
|
||||
const { g, noise, posCond } = await buildMultidiffusionUpscaleGraph(state);
|
||||
@@ -25,9 +23,6 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
);
|
||||
try {
|
||||
await req.unwrap();
|
||||
if (shouldShowProgressInViewer) {
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
}
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
//TODO(psyche): handle image deletion (canvas sessions?)
|
||||
//TODO(psyche): handle image deletion (canvas staging area?)
|
||||
|
||||
// Some utils to delete images from different parts of the app
|
||||
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectDefaultControlAdapter } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
ipaImageChanged,
|
||||
@@ -12,7 +13,7 @@ import type { CanvasControlLayerState, CanvasRasterLayerState } from 'features/c
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { isValidDrop } from 'features/dnd/util/isValidDrop';
|
||||
import { imageToCompareChanged, isImageViewerOpenChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
@@ -103,11 +104,14 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const state = getState();
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
const defaultControlAdapter = selectDefaultControlAdapter(state);
|
||||
const overrides: Partial<CanvasControlLayerState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
controlAdapter: defaultControlAdapter,
|
||||
};
|
||||
dispatch(controlLayerAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
@@ -142,7 +146,6 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
dispatch(imageToCompareChanged(imageDTO));
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { ipaImageChanged, rgIPAdapterImageChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -44,6 +45,8 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
if (!autoAddBoardId || autoAddBoardId === 'none') {
|
||||
const title = postUploadAction.title || DEFAULT_UPLOADED_TOAST.title;
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, title });
|
||||
dispatch(boardIdSelected({ boardId: 'none' }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
} else {
|
||||
// Add this image to the board
|
||||
dispatch(
|
||||
@@ -67,6 +70,8 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description,
|
||||
});
|
||||
dispatch(boardIdSelected({ boardId: autoAddBoardId }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import { modelChanged, refinerModelChanged, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/DocumentSize/calculateNewSize';
|
||||
import { calculateNewSize } from 'features/parameters/components/Bbox/calculateNewSize';
|
||||
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { zParameterModel, zParameterVAEModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
|
||||
@@ -6,9 +6,12 @@ import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
|
||||
import { canvasSessionPersistConfig, canvasSessionSlice } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
|
||||
import {
|
||||
canvasStagingAreaPersistConfig,
|
||||
canvasStagingAreaSlice,
|
||||
} from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice';
|
||||
@@ -63,7 +66,7 @@ const allReducers = {
|
||||
[stylePresetSlice.name]: stylePresetSlice.reducer,
|
||||
[paramsSlice.name]: paramsSlice.reducer,
|
||||
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
|
||||
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
|
||||
[canvasStagingAreaSlice.name]: canvasStagingAreaSlice.reducer,
|
||||
[lorasSlice.name]: lorasSlice.reducer,
|
||||
};
|
||||
|
||||
@@ -108,7 +111,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
|
||||
[paramsPersistConfig.name]: paramsPersistConfig,
|
||||
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
|
||||
[canvasSessionPersistConfig.name]: canvasSessionPersistConfig,
|
||||
[canvasStagingAreaPersistConfig.name]: canvasStagingAreaPersistConfig,
|
||||
[lorasPersistConfig.name]: lorasPersistConfig,
|
||||
};
|
||||
|
||||
|
||||
@@ -6,18 +6,51 @@ import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import ImageContextMenu from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import type { MouseEvent, ReactElement, ReactNode, SyntheticEvent } from 'react';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { PiImageBold, PiUploadSimpleBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
import IAIDraggable from './IAIDraggable';
|
||||
import IAIDroppable from './IAIDroppable';
|
||||
import SelectionOverlay from './SelectionOverlay';
|
||||
|
||||
const defaultUploadElement = <Icon as={PiUploadSimpleBold} boxSize={16} />;
|
||||
|
||||
const defaultNoContentFallback = <IAINoContentFallback icon={PiImageBold} />;
|
||||
|
||||
const sx: SystemStyleObject = {
|
||||
'.gallery-image-container::before': {
|
||||
content: '""',
|
||||
display: 'inline-block',
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
bottom: 0,
|
||||
pointerEvents: 'none',
|
||||
borderRadius: 'base',
|
||||
},
|
||||
'&[data-selected="selected"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&[data-selected="selectedForCompare"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
'&:hover>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&:hover[data-selected="selected"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&:hover[data-selected="selectedForCompare"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
};
|
||||
|
||||
type IAIDndImageProps = FlexProps & {
|
||||
imageDTO: ImageDTO | undefined;
|
||||
onError?: (event: SyntheticEvent<HTMLImageElement>) => void;
|
||||
@@ -75,13 +108,11 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
const handleMouseOver = useCallback(
|
||||
(e: MouseEvent<HTMLDivElement>) => {
|
||||
if (onMouseOver) {
|
||||
onMouseOver(e);
|
||||
}
|
||||
setIsHovered(true);
|
||||
},
|
||||
[onMouseOver]
|
||||
);
|
||||
@@ -90,7 +121,6 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
if (onMouseOut) {
|
||||
onMouseOut(e);
|
||||
}
|
||||
setIsHovered(false);
|
||||
},
|
||||
[onMouseOut]
|
||||
);
|
||||
@@ -141,10 +171,13 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
minH={minSize ? minSize : undefined}
|
||||
userSelect="none"
|
||||
cursor={isDragDisabled || !imageDTO ? 'default' : 'pointer'}
|
||||
sx={withHoverOverlay ? sx : undefined}
|
||||
data-selected={isSelectedForCompare ? 'selectedForCompare' : isSelected ? 'selected' : undefined}
|
||||
{...rest}
|
||||
>
|
||||
{imageDTO && (
|
||||
<Flex
|
||||
className="gallery-image-container"
|
||||
w="full"
|
||||
h="full"
|
||||
position={fitContainer ? 'absolute' : 'relative'}
|
||||
@@ -167,11 +200,6 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
data-testid={dataTestId}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
|
||||
<SelectionOverlay
|
||||
isSelected={isSelected}
|
||||
isSelectedForCompare={isSelectedForCompare}
|
||||
isHovered={withHoverOverlay ? isHovered : false}
|
||||
/>
|
||||
</Flex>
|
||||
)}
|
||||
{!imageDTO && !isUploadDisabled && (
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex, IconButton, Tooltip, useToken } from '@invoke-ai/ui-library';
|
||||
import type { ReactElement, ReactNode } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
|
||||
type IconSwitchProps = {
|
||||
isChecked: boolean;
|
||||
onChange: (checked: boolean) => void;
|
||||
iconChecked: ReactElement;
|
||||
tooltipChecked?: ReactNode;
|
||||
iconUnchecked: ReactElement;
|
||||
tooltipUnchecked?: ReactNode;
|
||||
ariaLabel: string;
|
||||
};
|
||||
|
||||
const getSx = (padding: string | number): SystemStyleObject => ({
|
||||
transition: 'left 0.1s ease-in-out, transform 0.1s ease-in-out',
|
||||
'&[data-checked="true"]': {
|
||||
left: `calc(100% - ${padding})`,
|
||||
transform: 'translateX(-100%)',
|
||||
},
|
||||
'&[data-checked="false"]': {
|
||||
left: padding,
|
||||
transform: 'translateX(0)',
|
||||
},
|
||||
});
|
||||
|
||||
export const IconSwitch = memo(
|
||||
({
|
||||
isChecked,
|
||||
onChange,
|
||||
iconChecked,
|
||||
tooltipChecked,
|
||||
iconUnchecked,
|
||||
tooltipUnchecked,
|
||||
ariaLabel,
|
||||
}: IconSwitchProps) => {
|
||||
const onUncheck = useCallback(() => {
|
||||
onChange(false);
|
||||
}, [onChange]);
|
||||
const onCheck = useCallback(() => {
|
||||
onChange(true);
|
||||
}, [onChange]);
|
||||
|
||||
const gap = useToken('space', 1.5);
|
||||
const sx = useMemo(() => getSx(gap), [gap]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
position="relative"
|
||||
bg="base.800"
|
||||
borderRadius="base"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
h="full"
|
||||
p={gap}
|
||||
gap={gap}
|
||||
>
|
||||
<Box
|
||||
position="absolute"
|
||||
borderRadius="base"
|
||||
bg="invokeBlue.400"
|
||||
w={12}
|
||||
top={gap}
|
||||
bottom={gap}
|
||||
data-checked={isChecked}
|
||||
sx={sx}
|
||||
/>
|
||||
<Tooltip hasArrow label={tooltipUnchecked}>
|
||||
<IconButton
|
||||
size="sm"
|
||||
fontSize={16}
|
||||
icon={iconUnchecked}
|
||||
onClick={onUncheck}
|
||||
variant={!isChecked ? 'solid' : 'ghost'}
|
||||
colorScheme={!isChecked ? 'invokeBlue' : 'base'}
|
||||
aria-label={ariaLabel}
|
||||
data-checked={!isChecked}
|
||||
w={12}
|
||||
alignSelf="stretch"
|
||||
h="auto"
|
||||
/>
|
||||
</Tooltip>
|
||||
<Tooltip hasArrow label={tooltipChecked}>
|
||||
<IconButton
|
||||
size="sm"
|
||||
fontSize={16}
|
||||
icon={iconChecked}
|
||||
onClick={onCheck}
|
||||
variant={isChecked ? 'solid' : 'ghost'}
|
||||
colorScheme={isChecked ? 'invokeBlue' : 'base'}
|
||||
aria-label={ariaLabel}
|
||||
data-checked={isChecked}
|
||||
w={12}
|
||||
alignSelf="stretch"
|
||||
h="auto"
|
||||
/>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IconSwitch.displayName = 'IconSwitch';
|
||||
@@ -1,46 +0,0 @@
|
||||
import { Box } from '@invoke-ai/ui-library';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
type Props = {
|
||||
isSelected: boolean;
|
||||
isSelectedForCompare: boolean;
|
||||
isHovered: boolean;
|
||||
};
|
||||
const SelectionOverlay = ({ isSelected, isSelectedForCompare, isHovered }: Props) => {
|
||||
const shadow = useMemo(() => {
|
||||
if (isSelectedForCompare && isHovered) {
|
||||
return 'hoverSelectedForCompare';
|
||||
}
|
||||
if (isSelectedForCompare && !isHovered) {
|
||||
return 'selectedForCompare';
|
||||
}
|
||||
if (isSelected && isHovered) {
|
||||
return 'hoverSelected';
|
||||
}
|
||||
if (isSelected && !isHovered) {
|
||||
return 'selected';
|
||||
}
|
||||
if (!isSelected && isHovered) {
|
||||
return 'hoverUnselected';
|
||||
}
|
||||
return undefined;
|
||||
}, [isHovered, isSelected, isSelectedForCompare]);
|
||||
return (
|
||||
<Box
|
||||
className="selection-box"
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineEnd={0}
|
||||
bottom={0}
|
||||
insetInlineStart={0}
|
||||
borderRadius="base"
|
||||
opacity={isSelected || isSelectedForCompare ? 1 : 0.7}
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
pointerEvents="none"
|
||||
shadow={shadow}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(SelectionOverlay);
|
||||
@@ -114,4 +114,13 @@ export const useGlobalHotkeys = () => {
|
||||
},
|
||||
[dispatch, isModelManagerEnabled]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
isModelManagerEnabled ? '6' : '5',
|
||||
() => {
|
||||
dispatch(setActiveTab('gallery'));
|
||||
setScopes([]);
|
||||
},
|
||||
[dispatch, isModelManagerEnabled]
|
||||
);
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@ import { useStore } from '@nanostores/react';
|
||||
import { $isConnected } from 'app/hooks/useSocketIO';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
@@ -17,6 +18,7 @@ import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
|
||||
@@ -28,7 +30,7 @@ const LAYER_TYPE_TO_TKEY = {
|
||||
control_layer: 'controlLayers.globalControlAdapter',
|
||||
} as const;
|
||||
|
||||
const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
const createSelector = (templates: Templates, isConnected: boolean, canvasIsBusy: boolean) =>
|
||||
createMemoizedSelector(
|
||||
[
|
||||
selectSystemSlice,
|
||||
@@ -117,6 +119,10 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
reasons.push({ content: i18n.t('upscaling.missingTileControlNetModel') });
|
||||
}
|
||||
} else {
|
||||
if (canvasIsBusy) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasBusy') });
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
@@ -240,10 +246,17 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
}
|
||||
);
|
||||
|
||||
const dummyAtom = atom(true);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const templates = useStore($templates);
|
||||
const isConnected = useStore($isConnected);
|
||||
const selector = useMemo(() => createSelector(templates, isConnected), [templates, isConnected]);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsBusy = useStore(canvasManager?.$isBusy ?? dummyAtom);
|
||||
const selector = useMemo(
|
||||
() => createSelector(templates, isConnected, canvasIsBusy),
|
||||
[templates, isConnected, canvasIsBusy]
|
||||
);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
export const roundDownToMultiple = (num: number, multiple: number): number => {
|
||||
return Math.floor(num / multiple) * multiple;
|
||||
};
|
||||
export const roundUpToMultiple = (num: number, multiple: number): number => {
|
||||
return Math.ceil(num / multiple) * multiple;
|
||||
};
|
||||
|
||||
export const roundToMultiple = (num: number, multiple: number): number => {
|
||||
return Math.round(num / multiple) * multiple;
|
||||
|
||||
@@ -1,34 +1,22 @@
|
||||
import { Button, ButtonGroup, Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
useAddControlLayer,
|
||||
useAddInpaintMask,
|
||||
useAddIPAdapter,
|
||||
useAddRasterLayer,
|
||||
useAddRegionalGuidance,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasAddEntityButtons = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(ipaAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addInpaintMask = useAddInpaintMask();
|
||||
const addRegionalGuidance = useAddRegionalGuidance();
|
||||
const addRasterLayer = useAddRasterLayer();
|
||||
const addControlLayer = useAddControlLayer();
|
||||
const addIPAdapter = useAddIPAdapter();
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" w="full" h="full" alignItems="center">
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import IAIDroppable from 'common/components/IAIDroppable';
|
||||
import type { AddControlLayerFromImageDropData, AddRasterLayerFromImageDropData } from 'features/dnd/types';
|
||||
import { useIsImageViewerOpen } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { memo } from 'react';
|
||||
|
||||
const addRasterLayerFromImageDropData: AddRasterLayerFromImageDropData = {
|
||||
@@ -15,12 +14,6 @@ const addControlLayerFromImageDropData: AddControlLayerFromImageDropData = {
|
||||
};
|
||||
|
||||
export const CanvasDropArea = memo(() => {
|
||||
const isImageViewerOpen = useIsImageViewerOpen();
|
||||
|
||||
if (isImageViewerOpen) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Flex position="absolute" top={0} right={0} bottom="50%" left={0} gap={2} pointerEvents="none">
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import type { Meta, StoryObj } from '@storybook/react';
|
||||
import { CanvasEditor } from 'features/controlLayers/components/CanvasEditor';
|
||||
|
||||
const meta: Meta<typeof CanvasEditor> = {
|
||||
title: 'Feature/ControlLayers',
|
||||
tags: ['autodocs'],
|
||||
component: CanvasEditor,
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof CanvasEditor>;
|
||||
|
||||
const Component = () => {
|
||||
return (
|
||||
<Flex w={1500} h={1500}>
|
||||
<CanvasEditor />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
render: Component,
|
||||
};
|
||||
@@ -1,20 +0,0 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { EntityListActionBarAddLayerButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuButton';
|
||||
import { EntityListActionBarDeleteButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarDeleteButton';
|
||||
import { EntityListActionBarSelectedEntityFill } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityFill';
|
||||
import { SelectedEntityOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityOpacity';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const EntityListActionBar = memo(() => {
|
||||
return (
|
||||
<Flex w="full" py={1} px={1} gap={2} alignItems="center">
|
||||
<SelectedEntityOpacity />
|
||||
<Spacer />
|
||||
<EntityListActionBarSelectedEntityFill />
|
||||
<EntityListActionBarAddLayerButton />
|
||||
<EntityListActionBarDeleteButton />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBar.displayName = 'EntityListActionBar';
|
||||
@@ -1,28 +0,0 @@
|
||||
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityListMenuItems } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuItems';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListActionBarAddLayerButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
size="sm"
|
||||
tooltip={t('controlLayers.addLayer')}
|
||||
aria-label={t('controlLayers.addLayer')}
|
||||
icon={<PiPlusBold />}
|
||||
variant="ghost"
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
/>
|
||||
<MenuList>
|
||||
<CanvasEntityListMenuItems />
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarAddLayerButton.displayName = 'EntityListActionBarAddLayerButton';
|
||||
@@ -1,57 +0,0 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useDefaultIPAdapter } from 'features/controlLayers/hooks/useLayerControlAdapter';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasEntityListMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const defaultIPAdapter = useDefaultIPAdapter();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
const overrides = { ipAdapter: defaultIPAdapter };
|
||||
dispatch(ipaAdded({ isSelected: true, overrides }));
|
||||
}, [defaultIPAdapter, dispatch]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.globalIPAdapter')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityListMenuItems.displayName = 'CanvasEntityListMenu';
|
||||
@@ -1,39 +0,0 @@
|
||||
import { IconButton, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { allEntitiesDeleted, entityDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectEntityCount, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleFill } from 'react-icons/pi';
|
||||
|
||||
export const EntityListActionBarDeleteButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const entityCount = useAppSelector(selectEntityCount);
|
||||
const shift = useShiftModifier();
|
||||
const onClick = useCallback(() => {
|
||||
if (shift) {
|
||||
dispatch(allEntitiesDeleted());
|
||||
return;
|
||||
}
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityDeleted({ entityIdentifier: selectedEntityIdentifier }));
|
||||
}, [dispatch, selectedEntityIdentifier, shift]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={onClick}
|
||||
isDisabled={shift ? entityCount === 0 : !selectedEntityIdentifier}
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
aria-label={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
|
||||
tooltip={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
|
||||
icon={<PiTrashSimpleFill />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarDeleteButton.displayName = 'EntityListActionBarDeleteButton';
|
||||
@@ -0,0 +1,54 @@
|
||||
import { IconButton, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
useAddControlLayer,
|
||||
useAddInpaintMask,
|
||||
useAddIPAdapter,
|
||||
useAddRasterLayer,
|
||||
useAddRegionalGuidance,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const addInpaintMask = useAddInpaintMask();
|
||||
const addRegionalGuidance = useAddRegionalGuidance();
|
||||
const addRasterLayer = useAddRasterLayer();
|
||||
const addControlLayer = useAddControlLayer();
|
||||
const addIPAdapter = useAddIPAdapter();
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
tooltip={t('controlLayers.addLayer')}
|
||||
aria-label={t('controlLayers.addLayer')}
|
||||
icon={<PiPlusBold />}
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
/>
|
||||
<MenuList>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.globalIPAdapter')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListGlobalActionBarAddLayerMenu.displayName = 'EntityListGlobalActionBarAddLayerMenu';
|
||||
@@ -0,0 +1,26 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { EntityListGlobalActionBarAddLayerMenu } from 'features/controlLayers/components/CanvasEntityList/EntityListGlobalActionBarAddLayerMenu';
|
||||
import { EntityListSelectedEntityActionBarDuplicateButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarDuplicateButton';
|
||||
import { EntityListSelectedEntityActionBarFill } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFill';
|
||||
import { EntityListSelectedEntityActionBarFilterButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarFilterButton';
|
||||
import { EntityListSelectedEntityActionBarOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarOpacity';
|
||||
import { EntityListSelectedEntityActionBarTransformButton } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBarTransformButton';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const EntityListSelectedEntityActionBar = memo(() => {
|
||||
return (
|
||||
<Flex w="full" gap={2} alignItems="center" ps={1}>
|
||||
<EntityListSelectedEntityActionBarOpacity />
|
||||
<Spacer />
|
||||
<EntityListSelectedEntityActionBarFill />
|
||||
<Flex h="full">
|
||||
<EntityListSelectedEntityActionBarFilterButton />
|
||||
<EntityListSelectedEntityActionBarTransformButton />
|
||||
<EntityListSelectedEntityActionBarDuplicateButton />
|
||||
<EntityListGlobalActionBarAddLayerMenu />
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBar.displayName = 'EntityListSelectedEntityActionBar';
|
||||
@@ -0,0 +1,34 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { entityDuplicated } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCopyFill } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarDuplicateButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const onClick = useCallback(() => {
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityDuplicated({ entityIdentifier: selectedEntityIdentifier }));
|
||||
}, [dispatch, selectedEntityIdentifier]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={onClick}
|
||||
isDisabled={!selectedEntityIdentifier}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.duplicate')}
|
||||
tooltip={t('controlLayers.duplicate')}
|
||||
icon={<PiCopyFill />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarDuplicateButton.displayName = 'EntityListSelectedEntityActionBarDuplicateButton';
|
||||
@@ -9,7 +9,7 @@ import { type FillStyle, isMaskEntityIdentifier, type RgbColor } from 'features/
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const EntityListActionBarSelectedEntityFill = memo(() => {
|
||||
export const EntityListSelectedEntityActionBarFill = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
@@ -67,4 +67,4 @@ export const EntityListActionBarSelectedEntityFill = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarSelectedEntityFill.displayName = 'EntityListActionBarSelectedEntityFill';
|
||||
EntityListSelectedEntityActionBarFill.displayName = 'EntityListSelectedEntityActionBarFill';
|
||||
@@ -0,0 +1,37 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityFilter } from 'features/controlLayers/hooks/useEntityFilter';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { isFilterableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiShootingStarBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarFilterButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const filter = useEntityFilter(selectedEntityIdentifier);
|
||||
|
||||
if (!selectedEntityIdentifier) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isFilterableEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={filter.start}
|
||||
isDisabled={filter.isDisabled}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.filter.filter')}
|
||||
tooltip={t('controlLayers.filter.filter')}
|
||||
icon={<PiShootingStarBold />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarFilterButton.displayName = 'EntityListSelectedEntityActionBarFilterButton';
|
||||
@@ -77,7 +77,7 @@ const selectOpacity = createSelector(selectCanvasSlice, (canvas) => {
|
||||
return selectedEntity.opacity;
|
||||
});
|
||||
|
||||
export const SelectedEntityOpacity = memo(() => {
|
||||
export const EntityListSelectedEntityActionBarOpacity = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
@@ -193,4 +193,4 @@ export const SelectedEntityOpacity = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
SelectedEntityOpacity.displayName = 'SelectedEntityOpacity';
|
||||
EntityListSelectedEntityActionBarOpacity.displayName = 'EntityListSelectedEntityActionBarOpacity';
|
||||
@@ -0,0 +1,37 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityTransform } from 'features/controlLayers/hooks/useEntityTransform';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { isTransformableEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFrameCornersBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListSelectedEntityActionBarTransformButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const transform = useEntityTransform(selectedEntityIdentifier);
|
||||
|
||||
if (!selectedEntityIdentifier) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isTransformableEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={transform.start}
|
||||
isDisabled={transform.isDisabled}
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label={t('controlLayers.transform.transform')}
|
||||
tooltip={t('controlLayers.transform.transform')}
|
||||
icon={<PiFrameCornersBold />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListSelectedEntityActionBarTransformButton.displayName = 'EntityListSelectedEntityActionBarTransformButton';
|
||||
@@ -2,7 +2,7 @@ import { Divider, Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasAddEntityButtons } from 'features/controlLayers/components/CanvasAddEntityButtons';
|
||||
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList/CanvasEntityList';
|
||||
import { EntityListActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBar';
|
||||
import { EntityListSelectedEntityActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListSelectedEntityActionBar';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectHasEntities } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
@@ -13,7 +13,7 @@ export const CanvasPanelContent = memo(() => {
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<Flex flexDir="column" gap={2} w="full" h="full">
|
||||
<EntityListActionBar />
|
||||
<EntityListSelectedEntityActionBar />
|
||||
<Divider py={0} />
|
||||
{!hasEntities && <CanvasAddEntityButtons />}
|
||||
{hasEntities && <CanvasEntityList />}
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
import { Flex, Grid, GridItem, IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo, useCallback, useState } from 'react';
|
||||
import {
|
||||
PiArrowDownBold,
|
||||
PiArrowDownLeftBold,
|
||||
PiArrowDownRightBold,
|
||||
PiArrowLeftBold,
|
||||
PiArrowRightBold,
|
||||
PiArrowUpBold,
|
||||
PiArrowUpLeftBold,
|
||||
PiArrowUpRightBold,
|
||||
PiSquareBold,
|
||||
} from 'react-icons/pi';
|
||||
|
||||
type ResizeDirection =
|
||||
| 'up-left'
|
||||
| 'up'
|
||||
| 'up-right'
|
||||
| 'left'
|
||||
| 'center-out'
|
||||
| 'right'
|
||||
| 'down-left'
|
||||
| 'down'
|
||||
| 'down-right';
|
||||
|
||||
export const CanvasResizer = memo(() => {
|
||||
const [resizeDirection, setResizeDirection] = useState<ResizeDirection>('center-out');
|
||||
|
||||
const setDirUpLeft = useCallback(() => {
|
||||
setResizeDirection('up-left');
|
||||
}, []);
|
||||
|
||||
const setDirUp = useCallback(() => {
|
||||
setResizeDirection('up');
|
||||
}, []);
|
||||
|
||||
const setDirUpRight = useCallback(() => {
|
||||
setResizeDirection('up-right');
|
||||
}, []);
|
||||
|
||||
const setDirLeft = useCallback(() => {
|
||||
setResizeDirection('left');
|
||||
}, []);
|
||||
|
||||
const setDirCenterOut = useCallback(() => {
|
||||
setResizeDirection('center-out');
|
||||
}, []);
|
||||
|
||||
const setDirRight = useCallback(() => {
|
||||
setResizeDirection('right');
|
||||
}, []);
|
||||
|
||||
const setDirDownLeft = useCallback(() => {
|
||||
setResizeDirection('down-left');
|
||||
}, []);
|
||||
|
||||
const setDirDown = useCallback(() => {
|
||||
setResizeDirection('down');
|
||||
}, []);
|
||||
|
||||
const setDirDownRight = useCallback(() => {
|
||||
setResizeDirection('down-right');
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<Flex p={2}>
|
||||
<Grid gridTemplateRows="1fr 1fr 1fr" gridTemplateColumns="1fr 1fr 1fr" gap={2}>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirUpLeft}
|
||||
aria-label="up-left"
|
||||
icon={<PiArrowUpLeftBold />}
|
||||
variant={resizeDirection === 'up-left' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirUp}
|
||||
aria-label="up"
|
||||
icon={<PiArrowUpBold />}
|
||||
variant={resizeDirection === 'up' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirUpRight}
|
||||
aria-label="up-right"
|
||||
icon={<PiArrowUpRightBold />}
|
||||
variant={resizeDirection === 'up-right' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirLeft}
|
||||
aria-label="left"
|
||||
icon={<PiArrowLeftBold />}
|
||||
variant={resizeDirection === 'left' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirCenterOut}
|
||||
aria-label="center-out"
|
||||
icon={<PiSquareBold />}
|
||||
variant={resizeDirection === 'center-out' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirRight}
|
||||
aria-label="right"
|
||||
icon={<PiArrowRightBold />}
|
||||
variant={resizeDirection === 'right' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirDownLeft}
|
||||
aria-label="down-left"
|
||||
icon={<PiArrowDownLeftBold />}
|
||||
variant={resizeDirection === 'down-left' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirDown}
|
||||
aria-label="down"
|
||||
icon={<PiArrowDownBold />}
|
||||
variant={resizeDirection === 'down' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
<GridItem>
|
||||
<IconButton
|
||||
onClick={setDirDownRight}
|
||||
aria-label="down-right"
|
||||
icon={<PiArrowDownRightBold />}
|
||||
variant={resizeDirection === 'down-right' ? 'solid' : 'ghost'}
|
||||
/>
|
||||
</GridItem>
|
||||
</Grid>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasResizer.displayName = 'CanvasResizer';
|
||||
@@ -0,0 +1,83 @@
|
||||
import { useDndContext } from '@dnd-kit/core';
|
||||
import { Box, Spacer, Tab, TabList, TabPanel, TabPanels, Tabs } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useScopeOnFocus } from 'common/hooks/interactionScopes';
|
||||
import { CanvasPanelContent } from 'features/controlLayers/components/CanvasPanelContent';
|
||||
import { CanvasSendToToggle } from 'features/controlLayers/components/CanvasSendToToggle';
|
||||
import { selectSendToCanvas } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import GalleryPanelContent from 'features/gallery/components/GalleryPanelContent';
|
||||
import { memo, useCallback, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const CanvasRightPanelContent = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const [tab, setTab] = useState(0);
|
||||
useScopeOnFocus('gallery', ref);
|
||||
|
||||
return (
|
||||
<Tabs index={tab} onChange={setTab} w="full" h="full" display="flex" flexDir="column">
|
||||
<TabList alignItems="center">
|
||||
<PanelTabs setTab={setTab} />
|
||||
<Spacer />
|
||||
<CanvasSendToToggle />
|
||||
</TabList>
|
||||
<TabPanels w="full" h="full">
|
||||
<TabPanel w="full" h="full" p={0} pt={3}>
|
||||
<CanvasPanelContent />
|
||||
</TabPanel>
|
||||
<TabPanel w="full" h="full" p={0} pt={3}>
|
||||
<GalleryPanelContent />
|
||||
</TabPanel>
|
||||
</TabPanels>
|
||||
</Tabs>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasRightPanelContent.displayName = 'CanvasRightPanelContent';
|
||||
|
||||
const PanelTabs = memo(({ setTab }: { setTab: (val: number) => void }) => {
|
||||
const { t } = useTranslation();
|
||||
const sendToCanvas = useAppSelector(selectSendToCanvas);
|
||||
const tabTimeout = useRef<number | null>(null);
|
||||
const dndCtx = useDndContext();
|
||||
|
||||
const onOnMouseOverLayersTab = useCallback(() => {
|
||||
tabTimeout.current = window.setTimeout(() => {
|
||||
if (dndCtx.active) {
|
||||
setTab(1);
|
||||
}
|
||||
}, 300);
|
||||
}, [dndCtx.active, setTab]);
|
||||
|
||||
const onOnMouseOverGalleryTab = useCallback(() => {
|
||||
tabTimeout.current = window.setTimeout(() => {
|
||||
if (dndCtx.active) {
|
||||
setTab(0);
|
||||
}
|
||||
}, 300);
|
||||
}, [dndCtx.active, setTab]);
|
||||
|
||||
const onMouseOut = useCallback(() => {
|
||||
if (tabTimeout.current) {
|
||||
clearTimeout(tabTimeout.current);
|
||||
}
|
||||
}, []);
|
||||
return (
|
||||
<>
|
||||
<Tab position="relative" onMouseOver={onOnMouseOverLayersTab} onMouseOut={onMouseOut}>
|
||||
{t('controlLayers.layer_other')}
|
||||
{sendToCanvas && (
|
||||
<Box position="absolute" top={2} right={2} h={2} w={2} bg="invokeYellow.300" borderRadius="full" />
|
||||
)}
|
||||
</Tab>
|
||||
<Tab position="relative" onMouseOver={onOnMouseOverGalleryTab} onMouseOut={onMouseOut}>
|
||||
{t('gallery.gallery')}
|
||||
{!sendToCanvas && (
|
||||
<Box position="absolute" top={2} right={2} h={2} w={2} bg="invokeYellow.300" borderRadius="full" />
|
||||
)}
|
||||
</Tab>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
PanelTabs.displayName = 'PanelTabs';
|
||||
@@ -1,64 +1,76 @@
|
||||
import { Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { IconSwitch } from 'common/components/IconSwitch';
|
||||
import {
|
||||
selectCanvasSettingsSlice,
|
||||
settingsSendToCanvasChanged,
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
Button,
|
||||
Flex,
|
||||
Icon,
|
||||
Popover,
|
||||
PopoverArrow,
|
||||
PopoverBody,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectSendToCanvas, settingsSendToCanvasChanged } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiImageBold, PiPaintBrushBold } from 'react-icons/pi';
|
||||
|
||||
const TooltipSendToGallery = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex flexDir="column">
|
||||
<Text fontWeight="semibold">{t('controlLayers.sendToGallery')}</Text>
|
||||
<Text fontWeight="normal">{t('controlLayers.sendToGalleryDesc')}</Text>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
TooltipSendToGallery.displayName = 'TooltipSendToGallery';
|
||||
|
||||
const TooltipSendToCanvas = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex flexDir="column">
|
||||
<Text fontWeight="semibold">{t('controlLayers.sendToCanvas')}</Text>
|
||||
<Text fontWeight="normal">{t('controlLayers.sendToCanvasDesc')}</Text>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
TooltipSendToCanvas.displayName = 'TooltipSendToCanvas';
|
||||
|
||||
const selectSendToCanvas = createSelector(selectCanvasSettingsSlice, (canvasSettings) => canvasSettings.sendToCanvas);
|
||||
import { PiCaretDownBold, PiCheckBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasSendToToggle = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
const sendToCanvas = useAppSelector(selectSendToCanvas);
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const onChange = useCallback(
|
||||
(isChecked: boolean) => {
|
||||
dispatch(settingsSendToCanvasChanged(isChecked));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
const enableSendToCanvas = useCallback(() => {
|
||||
dispatch(settingsSendToCanvasChanged(true));
|
||||
}, [dispatch]);
|
||||
|
||||
const disableSendToCanvas = useCallback(() => {
|
||||
dispatch(settingsSendToCanvasChanged(false));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<IconSwitch
|
||||
isChecked={sendToCanvas}
|
||||
onChange={onChange}
|
||||
iconUnchecked={<PiImageBold />}
|
||||
tooltipUnchecked={<TooltipSendToGallery />}
|
||||
iconChecked={<PiPaintBrushBold />}
|
||||
tooltipChecked={<TooltipSendToCanvas />}
|
||||
ariaLabel="Toggle canvas mode"
|
||||
/>
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="link"
|
||||
data-testid="toggle-viewer-menu-button"
|
||||
pointerEvents="auto"
|
||||
rightIcon={<PiCaretDownBold />}
|
||||
>
|
||||
{sendToCanvas ? t('controlLayers.sendingToCanvas') : t('controlLayers.sendingToGallery')}
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent p={2} pointerEvents="auto">
|
||||
<PopoverArrow />
|
||||
<PopoverBody>
|
||||
<Flex flexDir="column">
|
||||
<Button onClick={disableSendToCanvas} variant="ghost" h="auto" w="auto" p={2}>
|
||||
<Flex gap={2} w="full">
|
||||
<Icon as={PiCheckBold} visibility={!sendToCanvas ? 'visible' : 'hidden'} />
|
||||
<Flex flexDir="column" gap={2} alignItems="flex-start">
|
||||
<Text fontWeight="semibold">{t('controlLayers.sendToGallery')}</Text>
|
||||
<Text fontWeight="normal" variant="subtext">
|
||||
{t('controlLayers.sendToGalleryDesc')}
|
||||
</Text>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Button>
|
||||
<Button onClick={enableSendToCanvas} variant="ghost" h="auto" w="auto" p={2}>
|
||||
<Flex gap={2} w="full">
|
||||
<Icon as={PiCheckBold} visibility={sendToCanvas ? 'visible' : 'hidden'} />
|
||||
<Flex flexDir="column" gap={2} alignItems="flex-start">
|
||||
<Text fontWeight="semibold">{t('controlLayers.sendToCanvas')}</Text>
|
||||
<Text fontWeight="normal" variant="subtext">
|
||||
{t('controlLayers.sendToCanvasDesc')}
|
||||
</Text>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Button>
|
||||
</Flex>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import { Transform } from 'features/controlLayers/components/Transform/Transform
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { memo, useRef } from 'react';
|
||||
|
||||
export const CanvasEditor = memo(() => {
|
||||
export const CanvasTabContent = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
useScopeOnFocus('canvas', ref);
|
||||
|
||||
@@ -48,4 +48,4 @@ export const CanvasEditor = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEditor.displayName = 'CanvasEditor';
|
||||
CanvasTabContent.displayName = 'CanvasTabContent';
|
||||
@@ -1,21 +1,35 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { createMemoizedAppSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
|
||||
import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { ControlLayerControlAdapterControlMode } from 'features/controlLayers/components/ControlLayer/ControlLayerControlAdapterControlMode';
|
||||
import { ControlLayerControlAdapterModel } from 'features/controlLayers/components/ControlLayer/ControlLayerControlAdapterModel';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useControlLayerControlAdapter } from 'features/controlLayers/hooks/useLayerControlAdapter';
|
||||
import {
|
||||
controlLayerBeginEndStepPctChanged,
|
||||
controlLayerControlModeChanged,
|
||||
controlLayerModelChanged,
|
||||
controlLayerWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import type { ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type { ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
const useControlLayerControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) => {
|
||||
const selectControlAdapter = useMemo(
|
||||
() =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return layer.controlAdapter;
|
||||
}),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const controlAdapter = useAppSelector(selectControlAdapter);
|
||||
return controlAdapter;
|
||||
};
|
||||
|
||||
export const ControlLayerControlAdapter = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useControlNetAndT2IAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
@@ -17,8 +14,6 @@ type Props = {
|
||||
|
||||
export const ControlLayerControlAdapterModel = memo(({ modelKey, onChange: onChangeModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const canvasManager = useCanvasManager();
|
||||
const currentBaseModel = useAppSelector(selectBase);
|
||||
const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
@@ -29,29 +24,8 @@ export const ControlLayerControlAdapterModel = memo(({ modelKey, onChange: onCha
|
||||
return;
|
||||
}
|
||||
onChangeModel(modelConfig);
|
||||
|
||||
// When we set the model for the first time, we'll set the default filter settings and open the filter popup
|
||||
|
||||
if (modelKey) {
|
||||
// If there is already a model key, this is not the first time we're setting the model
|
||||
return;
|
||||
}
|
||||
|
||||
// Open the filter popup by setting this entity as the filtering entity
|
||||
if (!canvasManager.filter.$adapter.get()) {
|
||||
// Update the filter, preferring the model's default
|
||||
if (isFilterType(modelConfig.default_settings?.preprocessor)) {
|
||||
canvasManager.filter.$config.set(
|
||||
IMAGE_FILTERS[modelConfig.default_settings.preprocessor].buildDefaults(modelConfig.base)
|
||||
);
|
||||
} else {
|
||||
canvasManager.filter.$config.set(IMAGE_FILTERS.canny_image_processor.buildDefaults(modelConfig.base));
|
||||
}
|
||||
canvasManager.filter.startFilter(entityIdentifier);
|
||||
canvasManager.filter.previewFilter();
|
||||
}
|
||||
},
|
||||
[canvasManager.filter, entityIdentifier, modelKey, onChangeModel]
|
||||
[onChangeModel]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
|
||||
@@ -1,37 +1,49 @@
|
||||
import { Button, ButtonGroup, Flex, Heading } from '@invoke-ai/ui-library';
|
||||
import { Button, ButtonGroup, Flex, FormControl, FormLabel, Heading, Spacer, Switch } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { FilterSettings } from 'features/controlLayers/components/Filters/FilterSettings';
|
||||
import { FilterTypeSelect } from 'features/controlLayers/components/Filters/FilterTypeSelect';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
|
||||
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
|
||||
import {
|
||||
selectAutoProcessFilter,
|
||||
settingsAutoProcessFilterToggled,
|
||||
} from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { type FilterConfig, IMAGE_FILTERS } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCheckBold, PiShootingStarBold, PiXBold } from 'react-icons/pi';
|
||||
import { PiArrowsCounterClockwiseBold, PiCheckBold, PiShootingStarBold, PiXBold } from 'react-icons/pi';
|
||||
|
||||
export const Filter = memo(() => {
|
||||
const FilterBox = memo(({ adapter }: { adapter: CanvasEntityAdapterRasterLayer | CanvasEntityAdapterControlLayer }) => {
|
||||
const { t } = useTranslation();
|
||||
const canvasManager = useCanvasManager();
|
||||
const config = useStore(canvasManager.filter.$config);
|
||||
const isFiltering = useStore(canvasManager.filter.$isFiltering);
|
||||
const isProcessing = useStore(canvasManager.filter.$isProcessing);
|
||||
const dispatch = useAppDispatch();
|
||||
const config = useStore(adapter.filterer.$filterConfig);
|
||||
const isProcessing = useStore(adapter.filterer.$isProcessing);
|
||||
const hasProcessed = useStore(adapter.filterer.$hasProcessed);
|
||||
const autoProcessFilter = useAppSelector(selectAutoProcessFilter);
|
||||
|
||||
const onChangeFilterConfig = useCallback(
|
||||
(filterConfig: FilterConfig) => {
|
||||
canvasManager.filter.$config.set(filterConfig);
|
||||
adapter.filterer.$filterConfig.set(filterConfig);
|
||||
},
|
||||
[canvasManager.filter.$config]
|
||||
[adapter.filterer.$filterConfig]
|
||||
);
|
||||
|
||||
const onChangeFilterType = useCallback(
|
||||
(filterType: FilterConfig['type']) => {
|
||||
canvasManager.filter.$config.set(IMAGE_FILTERS[filterType].buildDefaults());
|
||||
adapter.filterer.$filterConfig.set(IMAGE_FILTERS[filterType].buildDefaults());
|
||||
},
|
||||
[canvasManager.filter.$config]
|
||||
[adapter.filterer.$filterConfig]
|
||||
);
|
||||
|
||||
if (!isFiltering) {
|
||||
return null;
|
||||
}
|
||||
const onChangeAutoProcessFilter = useCallback(() => {
|
||||
dispatch(settingsAutoProcessFilterToggled());
|
||||
}, [dispatch]);
|
||||
|
||||
const isValid = useMemo(() => {
|
||||
return IMAGE_FILTERS[config.type].validateConfig?.(config as never) ?? true;
|
||||
}, [config]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
@@ -46,36 +58,56 @@ export const Filter = memo(() => {
|
||||
transitionProperty="height"
|
||||
transitionDuration="normal"
|
||||
>
|
||||
<Heading size="md" color="base.300" userSelect="none">
|
||||
{t('controlLayers.filter.filter')}
|
||||
</Heading>
|
||||
<Flex w="full">
|
||||
<Heading size="md" color="base.300" userSelect="none">
|
||||
{t('controlLayers.filter.filter')}
|
||||
</Heading>
|
||||
<Spacer />
|
||||
<FormControl w="min-content">
|
||||
<FormLabel m={0}>{t('controlLayers.filter.autoProcess')}</FormLabel>
|
||||
<Switch size="sm" isChecked={autoProcessFilter} onChange={onChangeAutoProcessFilter} />
|
||||
</FormControl>
|
||||
</Flex>
|
||||
<FilterTypeSelect filterType={config.type} onChange={onChangeFilterType} />
|
||||
<FilterSettings filterConfig={config} onChange={onChangeFilterConfig} />
|
||||
<ButtonGroup isAttached={false} size="sm" alignSelf="self-end">
|
||||
<ButtonGroup isAttached={false} size="sm" w="full">
|
||||
<Button
|
||||
variant="ghost"
|
||||
leftIcon={<PiShootingStarBold />}
|
||||
onClick={canvasManager.filter.previewFilter}
|
||||
onClick={adapter.filterer.process}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.preview')}
|
||||
loadingText={t('controlLayers.filter.process')}
|
||||
isDisabled={!isValid}
|
||||
>
|
||||
{t('controlLayers.filter.preview')}
|
||||
{t('controlLayers.filter.process')}
|
||||
</Button>
|
||||
<Spacer />
|
||||
<Button
|
||||
leftIcon={<PiArrowsCounterClockwiseBold />}
|
||||
onClick={adapter.filterer.reset}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.reset')}
|
||||
variant="ghost"
|
||||
>
|
||||
{t('controlLayers.filter.reset')}
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
leftIcon={<PiCheckBold />}
|
||||
onClick={canvasManager.filter.applyFilter}
|
||||
onClick={adapter.filterer.apply}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.apply')}
|
||||
isDisabled={!isValid || !hasProcessed}
|
||||
>
|
||||
{t('controlLayers.filter.apply')}
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
leftIcon={<PiXBold />}
|
||||
onClick={canvasManager.filter.cancelFilter}
|
||||
onClick={adapter.filterer.cancel}
|
||||
isLoading={isProcessing}
|
||||
loadingText={t('controlLayers.filter.cancel')}
|
||||
isDisabled={!isValid}
|
||||
>
|
||||
{t('controlLayers.filter.cancel')}
|
||||
</Button>
|
||||
@@ -84,4 +116,16 @@ export const Filter = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
FilterBox.displayName = 'FilterBox';
|
||||
|
||||
export const Filter = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const adapter = useStore(canvasManager.stateApi.$filteringAdapter);
|
||||
if (!adapter) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <FilterBox adapter={adapter} />;
|
||||
};
|
||||
|
||||
Filter.displayName = 'Filter';
|
||||
|
||||
@@ -10,6 +10,7 @@ import { FilterMediapipeFace } from 'features/controlLayers/components/Filters/F
|
||||
import { FilterMidasDepth } from 'features/controlLayers/components/Filters/FilterMidasDepth';
|
||||
import { FilterMlsdImage } from 'features/controlLayers/components/Filters/FilterMlsdImage';
|
||||
import { FilterPidi } from 'features/controlLayers/components/Filters/FilterPidi';
|
||||
import { FilterSpandrel } from 'features/controlLayers/components/Filters/FilterSpandrel';
|
||||
import type { FilterConfig } from 'features/controlLayers/store/types';
|
||||
import { IMAGE_FILTERS } from 'features/controlLayers/store/types';
|
||||
import { memo } from 'react';
|
||||
@@ -64,6 +65,10 @@ export const FilterSettings = memo(({ filterConfig, onChange }: Props) => {
|
||||
return <FilterPidi config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
if (filterConfig.type === 'spandrel_filter') {
|
||||
return <FilterSpandrel config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<IAINoContentFallback
|
||||
py={4}
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
import {
|
||||
Box,
|
||||
Combobox,
|
||||
CompositeNumberInput,
|
||||
CompositeSlider,
|
||||
Flex,
|
||||
FormControl,
|
||||
FormHelperText,
|
||||
FormLabel,
|
||||
Switch,
|
||||
Tooltip,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useModelCombobox } from 'common/hooks/useModelCombobox';
|
||||
import type { SpandrelFilterConfig } from 'features/controlLayers/store/types';
|
||||
import { IMAGE_FILTERS } from 'features/controlLayers/store/types';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { useCallback, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useSpandrelImageToImageModels } from 'services/api/hooks/modelsByType';
|
||||
import type { SpandrelImageToImageModelConfig } from 'services/api/types';
|
||||
|
||||
import type { FilterComponentProps } from './types';
|
||||
|
||||
type Props = FilterComponentProps<SpandrelFilterConfig>;
|
||||
const DEFAULTS = IMAGE_FILTERS['spandrel_filter'].buildDefaults();
|
||||
|
||||
export const FilterSpandrel = ({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const [modelConfigs, { isLoading }] = useSpandrelImageToImageModels();
|
||||
|
||||
const tooltipLabel = useMemo(() => {
|
||||
if (!modelConfigs.length || !config.model) {
|
||||
return;
|
||||
}
|
||||
return modelConfigs.find((m) => m.key === config.model?.key)?.description;
|
||||
}, [modelConfigs, config.model]);
|
||||
|
||||
const _onChange = useCallback(
|
||||
(v: SpandrelImageToImageModelConfig | null) => {
|
||||
onChange({ ...config, model: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const {
|
||||
options,
|
||||
value,
|
||||
onChange: onChangeModel,
|
||||
placeholder,
|
||||
noOptionsMessage,
|
||||
} = useModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChange,
|
||||
selectedModel: config.model,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
const onScaleChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, scale: v });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
const onAutoscaleChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, autoScale: e.target.checked });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!config.model) {
|
||||
onChangeModel(options[0] ?? null);
|
||||
}
|
||||
}, [config.model, onChangeModel, options]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormControl w="full" orientation="vertical">
|
||||
<Flex w="full" alignItems="center">
|
||||
<FormLabel m={0} flexGrow={1}>
|
||||
{t('controlLayers.filter.spandrel.paramAutoScale')}
|
||||
</FormLabel>
|
||||
<Switch size="sm" isChecked={config.autoScale} onChange={onAutoscaleChanged} />
|
||||
</Flex>
|
||||
<FormHelperText>{t('controlLayers.filter.spandrel.paramAutoScaleDesc')}</FormHelperText>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!config.autoScale}>
|
||||
<FormLabel m={0}>{t('controlLayers.filter.spandrel.paramScale')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.scale}
|
||||
onChange={onScaleChanged}
|
||||
defaultValue={DEFAULTS.scale}
|
||||
min={1}
|
||||
max={16}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.scale}
|
||||
onChange={onScaleChanged}
|
||||
defaultValue={DEFAULTS.scale}
|
||||
min={1}
|
||||
max={16}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlLayers.filter.spandrel.paramModel')}</FormLabel>
|
||||
<Tooltip label={tooltipLabel}>
|
||||
<Box w="full">
|
||||
<Combobox
|
||||
value={value}
|
||||
placeholder={placeholder}
|
||||
options={options}
|
||||
onChange={onChangeModel}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
isDisabled={options.length === 0}
|
||||
/>
|
||||
</Box>
|
||||
</Tooltip>
|
||||
</FormControl>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
FilterSpandrel.displayName = 'FilterSpandrel';
|
||||
@@ -0,0 +1,24 @@
|
||||
import { Grid } from '@invoke-ai/ui-library';
|
||||
import { CanvasHUDItemBbox } from 'features/controlLayers/components/HUD/CanvasHUDItemBbox';
|
||||
import { CanvasHUDItemScaledBbox } from 'features/controlLayers/components/HUD/CanvasHUDItemScaledBbox';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const CanvasHUD = memo(() => {
|
||||
return (
|
||||
<Grid
|
||||
bg="base.900"
|
||||
borderBottomEndRadius="base"
|
||||
p={2}
|
||||
gap={1}
|
||||
borderRadius="base"
|
||||
templateColumns="1fr 1fr"
|
||||
opacity={0.6}
|
||||
minW={64}
|
||||
>
|
||||
<CanvasHUDItemBbox />
|
||||
<CanvasHUDItemScaledBbox />
|
||||
</Grid>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasHUD.displayName = 'CanvasHUD';
|
||||
@@ -0,0 +1,26 @@
|
||||
import { GridItem, Text } from '@invoke-ai/ui-library';
|
||||
import type { Property } from 'csstype';
|
||||
import { memo } from 'react';
|
||||
|
||||
type Props = {
|
||||
label: string;
|
||||
value: string | number;
|
||||
color?: Property.Color;
|
||||
};
|
||||
|
||||
export const CanvasHUDItem = memo(({ label, value, color }: Props) => {
|
||||
return (
|
||||
<>
|
||||
<GridItem>
|
||||
<Text textAlign="end">{label}: </Text>
|
||||
</GridItem>
|
||||
<GridItem fontWeight="semibold">
|
||||
<Text textAlign="end" color={color}>
|
||||
{value}
|
||||
</Text>
|
||||
</GridItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasHUDItem.displayName = 'CanvasHUDItem';
|
||||
@@ -0,0 +1,17 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasHUDItem } from 'features/controlLayers/components/HUD/CanvasHUDItem';
|
||||
import { selectBbox } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectBboxRect = createSelector(selectBbox, (bbox) => bbox.rect);
|
||||
|
||||
export const CanvasHUDItemBbox = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const rect = useAppSelector(selectBboxRect);
|
||||
|
||||
return <CanvasHUDItem label={t('controlLayers.HUD.bbox')} value={`${rect.width}×${rect.height} px`} />;
|
||||
});
|
||||
|
||||
CanvasHUDItemBbox.displayName = 'CanvasHUDItemBbox';
|
||||
@@ -0,0 +1,19 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasHUDItem } from 'features/controlLayers/components/HUD/CanvasHUDItem';
|
||||
import { selectBbox } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectScaledSize = createSelector(selectBbox, (bbox) => bbox.scaledSize);
|
||||
|
||||
export const CanvasHUDItemScaledBbox = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const scaledSize = useAppSelector(selectScaledSize);
|
||||
|
||||
return (
|
||||
<CanvasHUDItem label={t('controlLayers.HUD.scaledBbox')} value={`${scaledSize.width}×${scaledSize.height} px`} />
|
||||
);
|
||||
});
|
||||
|
||||
CanvasHUDItemScaledBbox.displayName = 'CanvasHUDItemScaledBbox';
|
||||
@@ -0,0 +1,133 @@
|
||||
import { Box, Flex, Icon, Text } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import type { Property } from 'csstype';
|
||||
import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { useEntityTitle } from 'features/controlLayers/hooks/useEntityTitle';
|
||||
import { useEntityTypeIsHidden } from 'features/controlLayers/hooks/useEntityTypeIsHidden';
|
||||
import type { CanvasEntityAdapter } from 'features/controlLayers/konva/CanvasEntity/types';
|
||||
import {
|
||||
selectCanvasSlice,
|
||||
selectEntityOrThrow,
|
||||
selectSelectedEntityIdentifier,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { atom } from 'nanostores';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiWarningCircleFill } from 'react-icons/pi';
|
||||
|
||||
type ContentProps = {
|
||||
entityIdentifier: CanvasEntityIdentifier;
|
||||
adapter: CanvasEntityAdapter;
|
||||
};
|
||||
|
||||
const $isFilteringFallback = atom(false);
|
||||
|
||||
type EntityStatus = {
|
||||
value: string;
|
||||
color?: Property.Color;
|
||||
};
|
||||
|
||||
const CanvasSelectedEntityStatusAlertContent = memo(({ entityIdentifier, adapter }: ContentProps) => {
|
||||
const { t } = useTranslation();
|
||||
const title = useEntityTitle(entityIdentifier);
|
||||
const selectIsEnabled = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).isEnabled),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIsLocked = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).isLocked),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const isEnabled = useAppSelector(selectIsEnabled);
|
||||
const isLocked = useAppSelector(selectIsLocked);
|
||||
const isHidden = useEntityTypeIsHidden(entityIdentifier.type);
|
||||
const isFiltering = useStore(adapter.filterer?.$isFiltering ?? $isFilteringFallback);
|
||||
const isTransforming = useStore(adapter.transformer.$isTransforming);
|
||||
|
||||
const status = useMemo<EntityStatus | null>(() => {
|
||||
if (isFiltering) {
|
||||
return {
|
||||
value: t('controlLayers.HUD.entityStatus.isFiltering'),
|
||||
color: 'invokeBlue.300',
|
||||
};
|
||||
}
|
||||
|
||||
if (isTransforming) {
|
||||
return {
|
||||
value: t('controlLayers.HUD.entityStatus.isTransforming'),
|
||||
color: 'invokeBlue.300',
|
||||
};
|
||||
}
|
||||
|
||||
if (isHidden) {
|
||||
return {
|
||||
value: t('controlLayers.HUD.entityStatus.isHidden'),
|
||||
color: 'invokePurple.300',
|
||||
};
|
||||
}
|
||||
|
||||
if (isLocked) {
|
||||
return {
|
||||
value: t('controlLayers.HUD.entityStatus.isLocked'),
|
||||
color: 'invokeRed.300',
|
||||
};
|
||||
}
|
||||
|
||||
if (!isEnabled) {
|
||||
return {
|
||||
value: t('controlLayers.HUD.entityStatus.isDisabled'),
|
||||
color: 'invokeRed.300',
|
||||
};
|
||||
}
|
||||
|
||||
return null;
|
||||
}, [isFiltering, isTransforming, isHidden, isLocked, isEnabled, t]);
|
||||
|
||||
if (!status) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Box position="relative" shadow="dark-lg">
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
right={0}
|
||||
left={0}
|
||||
bottom={0}
|
||||
bg={status.color}
|
||||
opacity={0.3}
|
||||
borderRadius="base"
|
||||
borderColor="whiteAlpha.400"
|
||||
borderWidth={1}
|
||||
/>
|
||||
<Flex px={6} py={4} gap={6} alignItems="center" justifyContent="center">
|
||||
<Icon as={PiWarningCircleFill} />
|
||||
<Text as="span" h={8}>
|
||||
<Text as="span" fontWeight="semibold">
|
||||
{title}
|
||||
</Text>{' '}
|
||||
{status.value}
|
||||
</Text>
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasSelectedEntityStatusAlertContent.displayName = 'CanvasSelectedEntityStatusAlertContent';
|
||||
|
||||
export const CanvasSelectedEntityStatusAlert = memo(() => {
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const adapter = useEntityAdapterSafe(selectedEntityIdentifier);
|
||||
|
||||
if (!selectedEntityIdentifier || !adapter) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <CanvasSelectedEntityStatusAlertContent entityIdentifier={selectedEntityIdentifier} adapter={adapter} />;
|
||||
});
|
||||
|
||||
CanvasSelectedEntityStatusAlert.displayName = 'CanvasSelectedEntityStatusAlert';
|
||||
@@ -1,43 +0,0 @@
|
||||
import { Grid, GridItem, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectBbox = createSelector(selectCanvasSlice, (canvas) => canvas.bbox);
|
||||
|
||||
export const HeadsUpDisplay = memo(() => {
|
||||
const bbox = useAppSelector(selectBbox);
|
||||
|
||||
return (
|
||||
<Grid
|
||||
bg="base.900"
|
||||
borderBottomEndRadius="base"
|
||||
p={2}
|
||||
gap={2}
|
||||
borderRadius="base"
|
||||
templateColumns="auto auto"
|
||||
opacity={0.6}
|
||||
>
|
||||
<HUDItem label="BBox" value={`${bbox.rect.width}×${bbox.rect.height} px`} />
|
||||
<HUDItem label="Scaled BBox" value={`${bbox.scaledSize.width}×${bbox.scaledSize.height} px`} />
|
||||
</Grid>
|
||||
);
|
||||
});
|
||||
|
||||
HeadsUpDisplay.displayName = 'HeadsUpDisplay';
|
||||
|
||||
const HUDItem = memo(({ label, value }: { label: string; value: string | number }) => {
|
||||
return (
|
||||
<>
|
||||
<GridItem>
|
||||
<Text textAlign="end">{label}: </Text>
|
||||
</GridItem>
|
||||
<GridItem fontWeight="semibold">
|
||||
<Text>{value}</Text>
|
||||
</GridItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
HUDItem.displayName = 'HUDItem';
|
||||
@@ -9,7 +9,7 @@ import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/stor
|
||||
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
|
||||
import type { ImageWithDims } from 'features/controlLayers/store/types';
|
||||
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/DocumentSize/calculateNewSize';
|
||||
import { calculateNewSize } from 'features/parameters/components/Bbox/calculateNewSize';
|
||||
import { memo, useCallback, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi';
|
||||
@@ -85,7 +85,7 @@ export const IPAdapterImagePreview = memo(
|
||||
/>
|
||||
|
||||
{controlImage && (
|
||||
<Flex position="absolute" flexDir="column" top={1} insetInlineEnd={1} gap={1}>
|
||||
<Flex position="absolute" flexDir="column" top={2} insetInlineEnd={2} gap={1}>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleResetControlImage}
|
||||
icon={<PiArrowCounterClockwiseBold size={16} />}
|
||||
|
||||
@@ -1,42 +1,28 @@
|
||||
import { Button, Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import {
|
||||
rgIPAdapterAdded,
|
||||
rgNegativePromptChanged,
|
||||
rgPositivePromptChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
buildSelectValidRegionalGuidanceActions,
|
||||
useAddRegionalGuidanceIPAdapter,
|
||||
useAddRegionalGuidanceNegativePrompt,
|
||||
useAddRegionalGuidancePositivePrompt,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const RegionalGuidanceAddPromptsIPAdapterButtons = () => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const addRegionalGuidanceIPAdapter = useAddRegionalGuidanceIPAdapter(entityIdentifier);
|
||||
const addRegionalGuidancePositivePrompt = useAddRegionalGuidancePositivePrompt(entityIdentifier);
|
||||
const addRegionalGuidanceNegativePrompt = useAddRegionalGuidanceNegativePrompt(entityIdentifier);
|
||||
|
||||
const selectValidActions = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return {
|
||||
canAddPositivePrompt: entity?.positivePrompt === null,
|
||||
canAddNegativePrompt: entity?.negativePrompt === null,
|
||||
};
|
||||
}),
|
||||
() => buildSelectValidRegionalGuidanceActions(entityIdentifier),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const validActions = useAppSelector(selectValidActions);
|
||||
const addPositivePrompt = useCallback(() => {
|
||||
dispatch(rgPositivePromptChanged({ entityIdentifier, prompt: '' }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
const addNegativePrompt = useCallback(() => {
|
||||
dispatch(rgNegativePromptChanged({ entityIdentifier, prompt: '' }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(rgIPAdapterAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<Flex w="full" p={2} justifyContent="space-between">
|
||||
@@ -44,7 +30,7 @@ export const RegionalGuidanceAddPromptsIPAdapterButtons = () => {
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addPositivePrompt}
|
||||
onClick={addRegionalGuidancePositivePrompt}
|
||||
isDisabled={!validActions.canAddPositivePrompt}
|
||||
>
|
||||
{t('common.positivePrompt')}
|
||||
@@ -53,12 +39,12 @@ export const RegionalGuidanceAddPromptsIPAdapterButtons = () => {
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addNegativePrompt}
|
||||
onClick={addRegionalGuidanceNegativePrompt}
|
||||
isDisabled={!validActions.canAddNegativePrompt}
|
||||
>
|
||||
{t('common.negativePrompt')}
|
||||
</Button>
|
||||
<Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
<Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addRegionalGuidanceIPAdapter}>
|
||||
{t('common.ipAdapter')}
|
||||
</Button>
|
||||
</Flex>
|
||||
|
||||
@@ -1,53 +1,38 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import {
|
||||
rgIPAdapterAdded,
|
||||
rgNegativePromptChanged,
|
||||
rgPositivePromptChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
buildSelectValidRegionalGuidanceActions,
|
||||
useAddRegionalGuidanceIPAdapter,
|
||||
useAddRegionalGuidanceNegativePrompt,
|
||||
useAddRegionalGuidancePositivePrompt,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const RegionalGuidanceMenuItemsAddPromptsAndIPAdapter = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const addRegionalGuidanceIPAdapter = useAddRegionalGuidanceIPAdapter(entityIdentifier);
|
||||
const addRegionalGuidancePositivePrompt = useAddRegionalGuidancePositivePrompt(entityIdentifier);
|
||||
const addRegionalGuidanceNegativePrompt = useAddRegionalGuidanceNegativePrompt(entityIdentifier);
|
||||
const selectValidActions = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntity(canvas, entityIdentifier);
|
||||
return {
|
||||
canAddPositivePrompt: entity?.positivePrompt === null,
|
||||
canAddNegativePrompt: entity?.negativePrompt === null,
|
||||
};
|
||||
}),
|
||||
() => buildSelectValidRegionalGuidanceActions(entityIdentifier),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const validActions = useAppSelector(selectValidActions);
|
||||
const addPositivePrompt = useCallback(() => {
|
||||
dispatch(rgPositivePromptChanged({ entityIdentifier, prompt: '' }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
const addNegativePrompt = useCallback(() => {
|
||||
dispatch(rgNegativePromptChanged({ entityIdentifier, prompt: '' }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(rgIPAdapterAdded({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem onClick={addPositivePrompt} isDisabled={!validActions.canAddPositivePrompt || isBusy}>
|
||||
<MenuItem onClick={addRegionalGuidancePositivePrompt} isDisabled={!validActions.canAddPositivePrompt || isBusy}>
|
||||
{t('controlLayers.addPositivePrompt')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={addNegativePrompt} isDisabled={!validActions.canAddNegativePrompt || isBusy}>
|
||||
<MenuItem onClick={addRegionalGuidanceNegativePrompt} isDisabled={!validActions.canAddNegativePrompt || isBusy}>
|
||||
{t('controlLayers.addNegativePrompt')}
|
||||
</MenuItem>
|
||||
<MenuItem onClick={addIPAdapter} isDisabled={isBusy}>
|
||||
<MenuItem onClick={addRegionalGuidanceIPAdapter} isDisabled={isBusy}>
|
||||
{t('controlLayers.addIPAdapter')}
|
||||
</MenuItem>
|
||||
</>
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
import { Checkbox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectCanvasSettingsSlice, settingsAutoSaveToggled } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectAutoSave, settingsAutoSaveToggled } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectAutoSave = createSelector(selectCanvasSettingsSlice, (canvasSettings) => canvasSettings.autoSave);
|
||||
|
||||
export const CanvasSettingsAutoSaveCheckbox = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const autoSave = useAppSelector(selectAutoSave);
|
||||
const onChange = useCallback(() => dispatch(settingsAutoSaveToggled()), [dispatch]);
|
||||
const onChange = useCallback(() => {
|
||||
dispatch(settingsAutoSaveToggled());
|
||||
}, [dispatch]);
|
||||
return (
|
||||
<FormControl w="full">
|
||||
<FormLabel flexGrow={1}>{t('controlLayers.autoSave')}</FormLabel>
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
import { Checkbox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectSnapToGrid, settingsSnapToGridToggled } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import type { ChangeEventHandler } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const CanvasSettingsSnapToGridCheckbox = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const snapToGrid = useAppSelector(selectSnapToGrid);
|
||||
const onChange = useCallback<ChangeEventHandler<HTMLInputElement>>(() => {
|
||||
dispatch(settingsSnapToGridToggled());
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<FormControl w="full">
|
||||
<FormLabel flexGrow={1}>{t('controlLayers.settings.snapToGrid.label')}</FormLabel>
|
||||
<Checkbox isChecked={snapToGrid} onChange={onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasSettingsSnapToGridCheckbox.displayName = 'CanvasSettingsSnapToGrid';
|
||||
@@ -15,6 +15,7 @@ import { CanvasSettingsClearHistoryButton } from 'features/controlLayers/compone
|
||||
import { CanvasSettingsClipToBboxCheckbox } from 'features/controlLayers/components/Settings/CanvasSettingsClipToBboxCheckbox';
|
||||
import { CanvasSettingsCompositeMaskedRegionsCheckbox } from 'features/controlLayers/components/Settings/CanvasSettingsCompositeMaskedRegionsCheckbox';
|
||||
import { CanvasSettingsDynamicGridSwitch } from 'features/controlLayers/components/Settings/CanvasSettingsDynamicGridSwitch';
|
||||
import { CanvasSettingsSnapToGridCheckbox } from 'features/controlLayers/components/Settings/CanvasSettingsGridSize';
|
||||
import { CanvasSettingsInvertScrollCheckbox } from 'features/controlLayers/components/Settings/CanvasSettingsInvertScrollCheckbox';
|
||||
import { CanvasSettingsLogDebugInfoButton } from 'features/controlLayers/components/Settings/CanvasSettingsLogDebugInfo';
|
||||
import { CanvasSettingsRecalculateRectsButton } from 'features/controlLayers/components/Settings/CanvasSettingsRecalculateRectsButton';
|
||||
@@ -39,6 +40,7 @@ export const CanvasSettingsPopover = memo(() => {
|
||||
<CanvasSettingsInvertScrollCheckbox />
|
||||
<CanvasSettingsClipToBboxCheckbox />
|
||||
<CanvasSettingsCompositeMaskedRegionsCheckbox />
|
||||
<CanvasSettingsSnapToGridCheckbox />
|
||||
<CanvasSettingsDynamicGridSwitch />
|
||||
<CanvasSettingsShowHUDSwitch />
|
||||
<CanvasSettingsResetButton />
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Button } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { canvasReset } from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -7,9 +8,11 @@ import { useTranslation } from 'react-i18next';
|
||||
export const CanvasSettingsResetButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const canvasManager = useCanvasManager();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
}, [dispatch]);
|
||||
canvasManager.stage.fitLayersToStage();
|
||||
}, [canvasManager.stage, dispatch]);
|
||||
return (
|
||||
<Button onClick={onClick} colorScheme="error" size="sm">
|
||||
{t('controlLayers.resetCanvas')}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user