mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 05:08:03 -05:00
Compare commits
789 Commits
saas-targe
...
v6.2.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70ac58e64a | ||
|
|
e653837236 | ||
|
|
2bbfcc2f13 | ||
|
|
d6e0e439c5 | ||
|
|
26aab60f81 | ||
|
|
7bea2fa11f | ||
|
|
169d58ea4c | ||
|
|
b53d2250f7 | ||
|
|
242eea8295 | ||
|
|
4dabe09e0d | ||
|
|
07fa0d3b77 | ||
|
|
e97f82292f | ||
|
|
005bab9035 | ||
|
|
409173919c | ||
|
|
7915180047 | ||
|
|
4349b8387d | ||
|
|
f95b686bdc | ||
|
|
72afb9c3fd | ||
|
|
f004fc31f1 | ||
|
|
2aa163b3a2 | ||
|
|
f40900c173 | ||
|
|
2c1f2b2873 | ||
|
|
8418e34480 | ||
|
|
b548ac0ccf | ||
|
|
2af2b8b6c4 | ||
|
|
058dc06748 | ||
|
|
8acb1c0088 | ||
|
|
683732a37c | ||
|
|
b990eacca0 | ||
|
|
5f7e920deb | ||
|
|
55dfdc0a9c | ||
|
|
10d6d19e17 | ||
|
|
15542b954d | ||
|
|
6430d830c1 | ||
|
|
c3f6389291 | ||
|
|
070eef3eff | ||
|
|
b14d841d57 | ||
|
|
dd35ab026a | ||
|
|
7fc06db8ad | ||
|
|
9d1f09c0f3 | ||
|
|
cacfb183a6 | ||
|
|
564f4f7a60 | ||
|
|
113a118fcf | ||
|
|
1f930cdaf2 | ||
|
|
c490e0ce08 | ||
|
|
7640ee307c | ||
|
|
1f5f70f898 | ||
|
|
1430858112 | ||
|
|
48c27ec117 | ||
|
|
af7737e804 | ||
|
|
3eca0d2ba0 | ||
|
|
307259f096 | ||
|
|
bed01941a5 | ||
|
|
89fa43a3b6 | ||
|
|
d8fcb08abf | ||
|
|
c61bcd9f50 | ||
|
|
3fb0fcbbfb | ||
|
|
db9af5083f | ||
|
|
720f1bb65c | ||
|
|
7dfb318ba2 | ||
|
|
9b024da2b4 | ||
|
|
15ca3b727a | ||
|
|
74ca604ae0 | ||
|
|
6934b05c85 | ||
|
|
1a47a5317c | ||
|
|
bc3ef21c64 | ||
|
|
e329f5ad43 | ||
|
|
e6ad91bf89 | ||
|
|
2f586416a5 | ||
|
|
33b56f421c | ||
|
|
e58ee4c492 | ||
|
|
49691aa07e | ||
|
|
56570f235f | ||
|
|
a2d95cf5b6 | ||
|
|
704dbfd04a | ||
|
|
5d9e078043 | ||
|
|
875cde13ae | ||
|
|
77655aed86 | ||
|
|
0628b92d63 | ||
|
|
9e526d00c2 | ||
|
|
1a24396be8 | ||
|
|
d97e73a565 | ||
|
|
55b14c8aaf | ||
|
|
79f65e57eb | ||
|
|
b4c8950278 | ||
|
|
400b2e9a55 | ||
|
|
3a687c583a | ||
|
|
833950078d | ||
|
|
e698dcb148 | ||
|
|
218386e077 | ||
|
|
4426be9e64 | ||
|
|
86f4cf7857 | ||
|
|
49ae66d94a | ||
|
|
c10865c7ef | ||
|
|
f3478a189a | ||
|
|
43db29176a | ||
|
|
f38922929c | ||
|
|
7d02c58f86 | ||
|
|
6edce8be87 | ||
|
|
31f63e38bd | ||
|
|
78a68ac3a7 | ||
|
|
8cd3bcd1c0 | ||
|
|
264cc5ef46 | ||
|
|
8bfbea5ed3 | ||
|
|
f06a66da07 | ||
|
|
337cae9b22 | ||
|
|
bf926bb7d5 | ||
|
|
18ad9a6af3 | ||
|
|
b6ed31c222 | ||
|
|
200beb5af5 | ||
|
|
f82a948bdd | ||
|
|
dd03e3ddcd | ||
|
|
7561b73e8f | ||
|
|
caa97608c7 | ||
|
|
72a6d1edc1 | ||
|
|
b8bf89c2f1 | ||
|
|
a1ade2b8c0 | ||
|
|
4bdcae1f8f | ||
|
|
4b22c84407 | ||
|
|
c9daf1db30 | ||
|
|
06d3cfbe97 | ||
|
|
71e4901313 | ||
|
|
82fb897b62 | ||
|
|
192b00d969 | ||
|
|
7bb25ef1b4 | ||
|
|
62f52c74a8 | ||
|
|
97439c1daa | ||
|
|
b23bff1b53 | ||
|
|
d9a1efbabf | ||
|
|
d4e903ee2d | ||
|
|
bb3e5d16d8 | ||
|
|
e62d3f01a8 | ||
|
|
757ecdbf82 | ||
|
|
694c85b041 | ||
|
|
988d7ba24c | ||
|
|
ac981879ef | ||
|
|
fc71849c24 | ||
|
|
a19aa3b032 | ||
|
|
ef4d5d7377 | ||
|
|
6b0dfd8427 | ||
|
|
471c010217 | ||
|
|
b1193022f7 | ||
|
|
2152ca092c | ||
|
|
ccc62ba56d | ||
|
|
9cf82de8c5 | ||
|
|
aced349152 | ||
|
|
0d67ee6548 | ||
|
|
03c21d1607 | ||
|
|
752e8db1f5 | ||
|
|
85fc861dd9 | ||
|
|
458cbfd874 | ||
|
|
04331c070a | ||
|
|
632ddf0cb4 | ||
|
|
2b193ff416 | ||
|
|
96ee394f9e | ||
|
|
0badc80c0c | ||
|
|
78e6cbf96e | ||
|
|
0b969a661b | ||
|
|
6fe47ec9f8 | ||
|
|
3850dd61f8 | ||
|
|
75520eaf0f | ||
|
|
10e88c58c1 | ||
|
|
30ed4dbd92 | ||
|
|
ed9c090f33 | ||
|
|
d29f65ed22 | ||
|
|
2062ec8ac0 | ||
|
|
49e818338a | ||
|
|
1caab2b9c4 | ||
|
|
50079ea349 | ||
|
|
fffa1b24c4 | ||
|
|
a6d6170387 | ||
|
|
e5fceb0448 | ||
|
|
059baf5b29 | ||
|
|
1be8a9a310 | ||
|
|
7adc33e04d | ||
|
|
7f2dd22d47 | ||
|
|
bb50f4b8a2 | ||
|
|
a48958e0d4 | ||
|
|
e3a1e9af53 | ||
|
|
c6fe11c42f | ||
|
|
4eb1bd67df | ||
|
|
c376f914d2 | ||
|
|
b5d1c47ef7 | ||
|
|
004a52ca65 | ||
|
|
b1d5a51ddf | ||
|
|
2b2498eaa1 | ||
|
|
10dda4440e | ||
|
|
98f78abefa | ||
|
|
cc93fa270f | ||
|
|
014b27680f | ||
|
|
c3d8f875de | ||
|
|
79f9dc6e4a | ||
|
|
6e1c0c1105 | ||
|
|
0362524040 | ||
|
|
dc6656459b | ||
|
|
3ea1b97f6f | ||
|
|
a7c7405ccc | ||
|
|
c391f1117a | ||
|
|
b1e2cb8401 | ||
|
|
db6af134b7 | ||
|
|
7e6cffb00c | ||
|
|
5b187bcb00 | ||
|
|
0843d609a3 | ||
|
|
95bd9cef18 | ||
|
|
931d6521f6 | ||
|
|
e37665ff59 | ||
|
|
56857fbbe6 | ||
|
|
43cfb8a574 | ||
|
|
05b1682d15 | ||
|
|
69a08ee7f2 | ||
|
|
18212c7d8a | ||
|
|
7de26f8e69 | ||
|
|
0652b12a6f | ||
|
|
43a361a00f | ||
|
|
cf68ad9cbc | ||
|
|
ec02a39325 | ||
|
|
e52d7a05c2 | ||
|
|
c9d4e2b761 | ||
|
|
ac26aa9508 | ||
|
|
9ff6ada15b | ||
|
|
e81a115169 | ||
|
|
52827807de | ||
|
|
b631de4cb5 | ||
|
|
099ebdbc37 | ||
|
|
4de6549be9 | ||
|
|
368be34949 | ||
|
|
5baa4bd916 | ||
|
|
4229377532 | ||
|
|
2610772ffd | ||
|
|
193de6a8f2 | ||
|
|
7ea343c787 | ||
|
|
12179dabba | ||
|
|
ef135f9923 | ||
|
|
e6c67cc00f | ||
|
|
179b988148 | ||
|
|
d913a3c85b | ||
|
|
e79525c40c | ||
|
|
f409f913ac | ||
|
|
7a79f61d4c | ||
|
|
ea182c234b | ||
|
|
f2eee4a82d | ||
|
|
e129525306 | ||
|
|
ecedfce758 | ||
|
|
702cb2cb1e | ||
|
|
2e8db3cce3 | ||
|
|
7845623fa5 | ||
|
|
e6a25ca7a2 | ||
|
|
71e12bcebe | ||
|
|
863c7eb9e2 | ||
|
|
9945c20d02 | ||
|
|
e3c1334b1f | ||
|
|
c143f63ef0 | ||
|
|
067026a0d0 | ||
|
|
66991334fc | ||
|
|
b771c3b164 | ||
|
|
4925694dc1 | ||
|
|
0a737ced44 | ||
|
|
8d83caaae0 | ||
|
|
16c8017f1a | ||
|
|
61a35f1396 | ||
|
|
6bd004d868 | ||
|
|
b6a6d406c7 | ||
|
|
8e287c32ee | ||
|
|
2d8b5e26c2 | ||
|
|
50914b74ee | ||
|
|
0fc1c33536 | ||
|
|
3b08c35f72 | ||
|
|
607b2561fd | ||
|
|
d68f922efb | ||
|
|
2bbd74d418 | ||
|
|
3a5392a9ee | ||
|
|
6f80efe71d | ||
|
|
7fac833813 | ||
|
|
b67eb4134d | ||
|
|
522eeda2e2 | ||
|
|
76233241f0 | ||
|
|
54be9989c5 | ||
|
|
0d3af08d27 | ||
|
|
767ac91f2c | ||
|
|
68571ece8f | ||
|
|
01100a2b9a | ||
|
|
ce2e6d8ab6 | ||
|
|
4887424ca3 | ||
|
|
28f6a20e71 | ||
|
|
c4142e75b2 | ||
|
|
fefe563127 | ||
|
|
1c72f1ff9f | ||
|
|
605cc7369d | ||
|
|
e7ce08cffa | ||
|
|
983cb5ebd2 | ||
|
|
52dbdb7118 | ||
|
|
71e6f00e10 | ||
|
|
e73150c3e6 | ||
|
|
f2426c3ab2 | ||
|
|
9d9c4c0f1a | ||
|
|
acb930f6b9 | ||
|
|
585b54dc7d | ||
|
|
f65affc0ec | ||
|
|
22d574c92a | ||
|
|
f23be119fc | ||
|
|
2d06949e80 | ||
|
|
67804313e1 | ||
|
|
dc23be117a | ||
|
|
350de058fc | ||
|
|
fd5cd707a3 | ||
|
|
98ecefdce0 | ||
|
|
42688a0993 | ||
|
|
d94aa4abf7 | ||
|
|
69a56aafed | ||
|
|
56873f6936 | ||
|
|
6bc6a680cf | ||
|
|
9a49682f60 | ||
|
|
ff84b0a495 | ||
|
|
bcced8a5e8 | ||
|
|
4a18e9eaea | ||
|
|
dde5bf61be | ||
|
|
987e401709 | ||
|
|
5c5ac570e3 | ||
|
|
309903fe0f | ||
|
|
f16ea43e9a | ||
|
|
d794aedb43 | ||
|
|
9930440f33 | ||
|
|
f0a6c4aa1f | ||
|
|
f36d22f13c | ||
|
|
e0d7fab524 | ||
|
|
f20c230f4a | ||
|
|
05c9bc730e | ||
|
|
f17ac06591 | ||
|
|
b35f93d919 | ||
|
|
289d8076d8 | ||
|
|
604763d20f | ||
|
|
7b452f098d | ||
|
|
b41c18d35f | ||
|
|
8328081333 | ||
|
|
07517cf2c2 | ||
|
|
6b98ad9095 | ||
|
|
0de3967e7e | ||
|
|
1335377fb1 | ||
|
|
adbcc191d9 | ||
|
|
11fc7af1c8 | ||
|
|
6f12fd22b9 | ||
|
|
324b6e2af4 | ||
|
|
038010a1ca | ||
|
|
2dd1bc54c9 | ||
|
|
8b69842678 | ||
|
|
9821f7c4fc | ||
|
|
2290ff4ad6 | ||
|
|
8d82ad6d0b | ||
|
|
8ed9f652e8 | ||
|
|
ee8ed344bd | ||
|
|
6d16cfdbe2 | ||
|
|
3ef2872dda | ||
|
|
b52ba149b4 | ||
|
|
c6126c6875 | ||
|
|
3f78ac9295 | ||
|
|
79fea1ac40 | ||
|
|
6eade5781d | ||
|
|
3d8f865fb0 | ||
|
|
dc9cd22d9d | ||
|
|
fe115ff8f9 | ||
|
|
1d35aad213 | ||
|
|
195d6ce893 | ||
|
|
f13ced7ed4 | ||
|
|
735fc276e5 | ||
|
|
cd3caf8c30 | ||
|
|
e9012280ab | ||
|
|
fa72a97794 | ||
|
|
e817631ba3 | ||
|
|
d0619c033f | ||
|
|
6f4850f34f | ||
|
|
072cd9dee7 | ||
|
|
19b6dc1c1f | ||
|
|
7566d0d6c6 | ||
|
|
f123888b46 | ||
|
|
aeab7d0cab | ||
|
|
3f1b2c39ab | ||
|
|
72e3a4b4be | ||
|
|
58e0f80138 | ||
|
|
8b8e29d22d | ||
|
|
90201be670 | ||
|
|
46a5619100 | ||
|
|
d608a7469e | ||
|
|
a7d413d372 | ||
|
|
f5c9e68dbf | ||
|
|
1ded459f03 | ||
|
|
d9024dc230 | ||
|
|
40528692c3 | ||
|
|
f35b05be43 | ||
|
|
29e87fc615 | ||
|
|
ca26b2718e | ||
|
|
5fa6c0b413 | ||
|
|
c37c8c50cd | ||
|
|
f0a4de245d | ||
|
|
5db62f8643 | ||
|
|
e1c478f94c | ||
|
|
11fe3b6332 | ||
|
|
e4aae1a591 | ||
|
|
4d83d1c56d | ||
|
|
34def323e8 | ||
|
|
854956316b | ||
|
|
91afe7884a | ||
|
|
8417ee8a7b | ||
|
|
a035645ed3 | ||
|
|
e00ccba7d3 | ||
|
|
fb883d63aa | ||
|
|
b113c57fc4 | ||
|
|
7636007349 | ||
|
|
fda86ae981 | ||
|
|
c02be4bdf4 | ||
|
|
ed7772d993 | ||
|
|
baae998b5b | ||
|
|
4077ffe595 | ||
|
|
c1937b1379 | ||
|
|
5c66dfed8e | ||
|
|
126dcc96c0 | ||
|
|
cb9c7b4a28 | ||
|
|
e8c4f49a14 | ||
|
|
30fffae637 | ||
|
|
4558a292b6 | ||
|
|
825d17441c | ||
|
|
9b16504af9 | ||
|
|
46c92fadff | ||
|
|
c0467b82ac | ||
|
|
6dafa67286 | ||
|
|
eb406aa07e | ||
|
|
d9422ffebd | ||
|
|
d5c033be4d | ||
|
|
4662cd6f15 | ||
|
|
a740a22613 | ||
|
|
bf4016b4bc | ||
|
|
6fa7c8c2ee | ||
|
|
ea40f582da | ||
|
|
01caf56251 | ||
|
|
42d577e65a | ||
|
|
38d80c9ce5 | ||
|
|
6acaa8abbf | ||
|
|
4b84e34599 | ||
|
|
bbd21b1eb2 | ||
|
|
4fa83a6228 | ||
|
|
051876dcff | ||
|
|
8dc6d0b5ae | ||
|
|
40e9624954 | ||
|
|
ae27c83dc4 | ||
|
|
161059551b | ||
|
|
c196f8a5d5 | ||
|
|
2c6d22664e | ||
|
|
b9ce5389ef | ||
|
|
d1cbf56695 | ||
|
|
e379ac12c3 | ||
|
|
aa10373292 | ||
|
|
780f3692a0 | ||
|
|
3604dcfdd1 | ||
|
|
2b1cffde5e | ||
|
|
83d642ed15 | ||
|
|
455c73235e | ||
|
|
8efef8da41 | ||
|
|
060a9e57b9 | ||
|
|
099d75ca1e | ||
|
|
bbb5d68146 | ||
|
|
9066dc1839 | ||
|
|
075345bffd | ||
|
|
74d1239c87 | ||
|
|
51e1c56636 | ||
|
|
ca1df60e54 | ||
|
|
7549c1250d | ||
|
|
df8751b5a1 | ||
|
|
651b80b997 | ||
|
|
5d236ae4e7 | ||
|
|
e5dc606f5e | ||
|
|
dc6b8e13bd | ||
|
|
c1b34e1f11 | ||
|
|
89f1684072 | ||
|
|
14fbee17a3 | ||
|
|
5dbc32e06e | ||
|
|
23baf61e51 | ||
|
|
5e55f6074b | ||
|
|
f7c555e501 | ||
|
|
6aa605e811 | ||
|
|
f51014e108 | ||
|
|
9862ba9210 | ||
|
|
920aea08cc | ||
|
|
39e584297e | ||
|
|
62a14bb935 | ||
|
|
d7ae2cdf75 | ||
|
|
6172c859ac | ||
|
|
b26fb1f617 | ||
|
|
05167dfd7a | ||
|
|
c090ea7387 | ||
|
|
7ba6c67049 | ||
|
|
3de186061d | ||
|
|
a716381733 | ||
|
|
fb5df06835 | ||
|
|
33c597c224 | ||
|
|
19d882d038 | ||
|
|
ee4bc49bd4 | ||
|
|
188cf37f48 | ||
|
|
15a0a7134c | ||
|
|
22cea0de8b | ||
|
|
cd21816d12 | ||
|
|
605b912ba4 | ||
|
|
52e31112f9 | ||
|
|
a4c9346cd7 | ||
|
|
a1647e4c6e | ||
|
|
8c9ca088a7 | ||
|
|
7a7a2e147c | ||
|
|
adf4cc750a | ||
|
|
9f1ea9d1c7 | ||
|
|
571d286506 | ||
|
|
1320a2c5f8 | ||
|
|
26a9b3131d | ||
|
|
d48140b35d | ||
|
|
9757bb0325 | ||
|
|
38ccd8e09c | ||
|
|
7759b166a9 | ||
|
|
9fc51c7a6e | ||
|
|
62fa4f42f5 | ||
|
|
418ad0de38 | ||
|
|
f4a411326e | ||
|
|
6358f39ebb | ||
|
|
ea8da0bfbf | ||
|
|
5385282325 | ||
|
|
0bf84ab803 | ||
|
|
82f31f2258 | ||
|
|
966dd8857d | ||
|
|
1c778bd719 | ||
|
|
394a14cf61 | ||
|
|
0e843823d1 | ||
|
|
29462e62d2 | ||
|
|
175c0147f8 | ||
|
|
df6e67c982 | ||
|
|
4612f0ac50 | ||
|
|
386a932f2a | ||
|
|
32438532b0 | ||
|
|
ab5cb2c264 | ||
|
|
504daa0ae5 | ||
|
|
14f7c98e8a | ||
|
|
ab39305223 | ||
|
|
7948bca864 | ||
|
|
1a39d22b6c | ||
|
|
9424271d12 | ||
|
|
b5acc204a8 | ||
|
|
7aefa8f36b | ||
|
|
242da9e888 | ||
|
|
1aedc26041 | ||
|
|
2c7fa90892 | ||
|
|
6c8cf99ad2 | ||
|
|
a92ba2542c | ||
|
|
2367b9f945 | ||
|
|
a928ed0204 | ||
|
|
e164451dfe | ||
|
|
d74d079356 | ||
|
|
0eb4360c01 | ||
|
|
937c03f2ec | ||
|
|
f7b249252d | ||
|
|
b2b42be51c | ||
|
|
98368b0665 | ||
|
|
b5eb3d9798 | ||
|
|
1218f49e20 | ||
|
|
89c609fd61 | ||
|
|
b204fb6a91 | ||
|
|
6e3e316416 | ||
|
|
bf5fc9512d | ||
|
|
7080889ed4 | ||
|
|
adea983bfc | ||
|
|
f68d8ed36a | ||
|
|
d45197e0af | ||
|
|
434d8a2b12 | ||
|
|
f55c593705 | ||
|
|
8327d86774 | ||
|
|
c8254710e6 | ||
|
|
0a8f647260 | ||
|
|
32a5e9652a | ||
|
|
87909a06a8 | ||
|
|
2c8ce6f2f4 | ||
|
|
bee4cf41b4 | ||
|
|
049a8d8144 | ||
|
|
ac81ec41c3 | ||
|
|
a294e8e0fd | ||
|
|
4665f0df40 | ||
|
|
70382294f5 | ||
|
|
4028cadfaf | ||
|
|
d23cdfd0ad | ||
|
|
f0ba693922 | ||
|
|
214005d795 | ||
|
|
34aa131115 | ||
|
|
5d8061bea9 | ||
|
|
36ec1015d6 | ||
|
|
7208373576 | ||
|
|
e10afe3026 | ||
|
|
399d6e7bce | ||
|
|
8d0fe5522b | ||
|
|
81341deb46 | ||
|
|
a30933b09c | ||
|
|
3264188ffd | ||
|
|
3984b341e1 | ||
|
|
041023df53 | ||
|
|
b06f76cdb6 | ||
|
|
852badc90b | ||
|
|
01953cf057 | ||
|
|
241844bdef | ||
|
|
33a28ad4f9 | ||
|
|
7c4550cbd5 | ||
|
|
553d1a6ac6 | ||
|
|
f4794e409b | ||
|
|
df87800d61 | ||
|
|
16993cd216 | ||
|
|
7f222ffb9d | ||
|
|
e0ed56ff8d | ||
|
|
e7e1142c77 | ||
|
|
fcaeba290e | ||
|
|
6eecdca56c | ||
|
|
7f44da4902 | ||
|
|
abaa33e22c | ||
|
|
d5c238e7c2 | ||
|
|
18775e8b67 | ||
|
|
903776bfbc | ||
|
|
a5baf0c102 | ||
|
|
a7e45731ec | ||
|
|
32aa3e6d48 | ||
|
|
2f9ea91896 | ||
|
|
5ac5115269 | ||
|
|
161624c722 | ||
|
|
c31cb0b106 | ||
|
|
893f7a8744 | ||
|
|
2e0824a799 | ||
|
|
ed05bf2df3 | ||
|
|
0f1a69a0c3 | ||
|
|
450a0bf142 | ||
|
|
a28c15d545 | ||
|
|
1b1e1983d9 | ||
|
|
d08e2fbd82 | ||
|
|
45b1ef6231 | ||
|
|
3bb446c08f | ||
|
|
8d1ab0a2e5 | ||
|
|
48e2e7e4a1 | ||
|
|
5a2f5c105d | ||
|
|
aa93e95a94 | ||
|
|
a5e5cbd7c3 | ||
|
|
baa9141be3 | ||
|
|
c7ed351bab | ||
|
|
8c17bde4ea | ||
|
|
ba082ccc2f | ||
|
|
01784fb3bf | ||
|
|
a71a0e143c | ||
|
|
94afc13813 | ||
|
|
d640a9001b | ||
|
|
711fe91b24 | ||
|
|
2f26657c17 | ||
|
|
6754fde935 | ||
|
|
ac206f4767 | ||
|
|
c316f07fb2 | ||
|
|
e81dde0933 | ||
|
|
9f392c8c3c | ||
|
|
2531366386 | ||
|
|
9df69496e4 | ||
|
|
2ddcde13ff | ||
|
|
cc5083599d | ||
|
|
2431060a7e | ||
|
|
592c842632 | ||
|
|
bc3550f238 | ||
|
|
23511d68db | ||
|
|
cd0668dd0b | ||
|
|
bf5ed61b84 | ||
|
|
3038a797a6 | ||
|
|
9bbc31b2d9 | ||
|
|
526e6335a1 | ||
|
|
1412c079ad | ||
|
|
6570c0c3b9 | ||
|
|
3a08ea799a | ||
|
|
e3fc244126 | ||
|
|
56938ca0a1 | ||
|
|
5d80642ea4 | ||
|
|
da4b084a8b | ||
|
|
86e1a37a00 | ||
|
|
ea34690709 | ||
|
|
c8df7cd2c0 | ||
|
|
628367b97b | ||
|
|
002816653e | ||
|
|
b05de8634d | ||
|
|
5088e700ad | ||
|
|
d2155e98ef | ||
|
|
7ec511da01 | ||
|
|
985cd8272b | ||
|
|
cd136194ad | ||
|
|
2e2ac71278 | ||
|
|
db4220fb20 | ||
|
|
84f70942e7 | ||
|
|
0af20b03e5 | ||
|
|
e16414b452 | ||
|
|
5dbc2a74a2 | ||
|
|
ad736bc190 | ||
|
|
0e9b71801a | ||
|
|
e80f0b2b43 | ||
|
|
c9042e52d4 | ||
|
|
8a78e37634 | ||
|
|
5e93f58530 | ||
|
|
a3851e0b08 | ||
|
|
eb45a457e9 | ||
|
|
1446d3490b | ||
|
|
579318af70 | ||
|
|
57bfae6774 | ||
|
|
2a92524546 | ||
|
|
7a5fa25b48 | ||
|
|
b3f3020793 | ||
|
|
650809e50d | ||
|
|
7308428f32 | ||
|
|
4dc3f1bcee | ||
|
|
faeb5f0c3b | ||
|
|
d985dfe821 | ||
|
|
ce5ae83689 | ||
|
|
c0428ee7ef | ||
|
|
aa3b2106d4 | ||
|
|
cf2d67ef3d | ||
|
|
c4d1e78f59 | ||
|
|
02e4a3aa82 | ||
|
|
a0b0c30be9 | ||
|
|
5c4cbc7fa2 | ||
|
|
5f2f12f803 | ||
|
|
c9cd0a87be | ||
|
|
668c475271 | ||
|
|
341910739e | ||
|
|
53a3dc52bc | ||
|
|
23b0a4a7f4 | ||
|
|
6afbf31750 | ||
|
|
3cd4306eec | ||
|
|
827191d2fc | ||
|
|
aaa34f717d | ||
|
|
fe83c2f81f | ||
|
|
17dead3309 | ||
|
|
979bd33dfb | ||
|
|
5128f072a8 | ||
|
|
2ad5b5cc2e | ||
|
|
24d8a96071 | ||
|
|
f1e4665aa2 | ||
|
|
1cbfea3a21 | ||
|
|
981e8e217d | ||
|
|
e7ca30f406 | ||
|
|
2832ca300f | ||
|
|
de5f413440 | ||
|
|
fbc14c61ea | ||
|
|
77e029a49f | ||
|
|
61b049ad35 | ||
|
|
b88f4a24d0 | ||
|
|
8c632f0d32 | ||
|
|
150a876c73 | ||
|
|
62c3b01e4f | ||
|
|
e1157f343b | ||
|
|
6a78739076 | ||
|
|
0794eb43e7 | ||
|
|
4ee54eac1d | ||
|
|
5851c46c81 | ||
|
|
a296559e79 | ||
|
|
1fd83f5e68 | ||
|
|
637487c573 | ||
|
|
4e98e7d0a2 | ||
|
|
12f65d800d | ||
|
|
45d09f8f51 | ||
|
|
2876c72fa9 | ||
|
|
9b4fdb493e | ||
|
|
47e21d6e04 | ||
|
|
84ab4a1c30 | ||
|
|
85c4304efd | ||
|
|
8f152f162b | ||
|
|
63b49f045a | ||
|
|
291e0736d6 | ||
|
|
4bfa6439d4 | ||
|
|
a8d7969a1d | ||
|
|
46bfa24af3 | ||
|
|
a8cb8e128d | ||
|
|
8cef0f5bf5 | ||
|
|
911baeb58b | ||
|
|
312960645b | ||
|
|
50cf285efb | ||
|
|
a214f4fff5 | ||
|
|
2981591c36 | ||
|
|
b08f90c99f | ||
|
|
ab8c739cd8 | ||
|
|
5c5108c28a | ||
|
|
3df7cfd605 | ||
|
|
1ff3d44dba | ||
|
|
c80ad90f72 | ||
|
|
3b4d1b8786 | ||
|
|
c66201c7e1 | ||
|
|
35c7c59455 | ||
|
|
85f98ab3eb | ||
|
|
dac75685be | ||
|
|
d7b5a8b298 | ||
|
|
d3ecaa740f | ||
|
|
b5a6765a3d |
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
26
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@@ -21,6 +21,20 @@ body:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install_method
|
||||
attributes:
|
||||
label: Install method
|
||||
description: How did you install Invoke?
|
||||
multiple: false
|
||||
options:
|
||||
- "Invoke's Launcher"
|
||||
- 'Stability Matrix'
|
||||
- 'Pinokio'
|
||||
- 'Manual'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: __Describe your environment__
|
||||
@@ -76,8 +90,8 @@ body:
|
||||
attributes:
|
||||
label: Version number
|
||||
description: |
|
||||
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. 3.6.1
|
||||
The version of Invoke you have installed. If it is not the [latest version](https://github.com/invoke-ai/InvokeAI/releases/latest), please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||
placeholder: ex. v6.0.2
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -85,17 +99,17 @@ body:
|
||||
id: browser-version
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Your web browser and version.
|
||||
description: Your web browser and version, if you do not use the Launcher's provided GUI.
|
||||
placeholder: ex. Firefox 123.0b3
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: python-deps
|
||||
attributes:
|
||||
label: Python dependencies
|
||||
label: System Information
|
||||
description: |
|
||||
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||
Click the gear icon at the bottom left corner, then click "About". Click the copy button and then paste here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
@@ -3,15 +3,15 @@ description: Installs frontend dependencies with pnpm, with caching
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: setup node 18
|
||||
- name: setup node 20
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
node-version: '20'
|
||||
|
||||
- name: setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 8.15.6
|
||||
version: 10
|
||||
run_install: false
|
||||
|
||||
- name: get pnpm store directory
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -180,6 +180,7 @@ cython_debug/
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
.vscode/
|
||||
.zed/
|
||||
|
||||
# source installer files
|
||||
installer/*zip
|
||||
@@ -189,3 +190,5 @@ installer/update.bat
|
||||
installer/update.sh
|
||||
installer/InvokeAI-Installer/
|
||||
.aider*
|
||||
|
||||
.claude/
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@8.x
|
||||
RUN corepack enable
|
||||
RUN corepack use pnpm@10.x && corepack enable
|
||||
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
|
||||
@@ -41,7 +41,7 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
With the modifications made, the install command should look something like this:
|
||||
|
||||
```sh
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu126 --reinstall
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.12 --python-preference only-managed --index=https://download.pytorch.org/whl/cu128 --reinstall
|
||||
```
|
||||
|
||||
6. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||
@@ -50,11 +50,11 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
|
||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||
|
||||
7. Install the frontend dev toolchain:
|
||||
7. Install the frontend dev toolchain, paying attention to versions:
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (v20+)
|
||||
- [`nodejs`](https://nodejs.org/) (tested on LTS, v22)
|
||||
|
||||
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||
- [`pnpm`](https://pnpm.io/installation) (tested on v10)
|
||||
|
||||
8. Do a production build of the frontend:
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ More detail on system requirements can be found [here](./requirements.md).
|
||||
|
||||
## Step 2: Download
|
||||
|
||||
Download the most launcher for your operating system:
|
||||
Download the most recent launcher for your operating system:
|
||||
|
||||
- [Download for Windows](https://download.invoke.ai/Invoke%20Community%20Edition.exe)
|
||||
- [Download for macOS](https://download.invoke.ai/Invoke%20Community%20Edition.dmg)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from importlib.metadata import distributions
|
||||
from pathlib import Path
|
||||
from platform import python_version
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
accelerate: str = Field(description="accelerate version")
|
||||
compel: str = Field(description="compel version")
|
||||
cuda: Optional[str] = Field(description="CUDA version")
|
||||
diffusers: str = Field(description="diffusers version")
|
||||
numpy: str = Field(description="Numpy version")
|
||||
opencv: str = Field(description="OpenCV version")
|
||||
onnx: str = Field(description="ONNX version")
|
||||
pillow: str = Field(description="Pillow (PIL) version")
|
||||
python: str = Field(description="Python version")
|
||||
torch: str = Field(description="PyTorch version")
|
||||
torchvision: str = Field(description="PyTorch Vision version")
|
||||
transformers: str = Field(description="transformers version")
|
||||
xformers: Optional[str] = Field(description="xformers version")
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
|
||||
async def get_app_deps() -> AppDependencyVersions:
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
|
||||
async def get_app_deps() -> dict[str, str]:
|
||||
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
|
||||
try:
|
||||
xformers = version("xformers")
|
||||
except PackageNotFoundError:
|
||||
xformers = None
|
||||
return AppDependencyVersions(
|
||||
accelerate=version("accelerate"),
|
||||
compel=version("compel"),
|
||||
cuda=torch.version.cuda,
|
||||
diffusers=version("diffusers"),
|
||||
numpy=version("numpy"),
|
||||
opencv=version("opencv-python"),
|
||||
onnx=version("onnx"),
|
||||
pillow=version("pillow"),
|
||||
python=python_version(),
|
||||
torch=torch.version.__version__,
|
||||
torchvision=version("torchvision"),
|
||||
transformers=version("transformers"),
|
||||
xformers=xformers,
|
||||
)
|
||||
cuda = torch.version.cuda or "N/A"
|
||||
except Exception:
|
||||
cuda = "N/A"
|
||||
|
||||
deps["CUDA"] = cuda
|
||||
|
||||
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
|
||||
|
||||
return sorted_deps
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
|
||||
@@ -1,21 +1,12 @@
|
||||
from fastapi import Body, HTTPException
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.services.images.images_common import AddImagesToBoardResult, RemoveImagesFromBoardResult
|
||||
|
||||
board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
|
||||
|
||||
|
||||
class AddImagesToBoardResult(BaseModel):
|
||||
board_id: str = Field(description="The id of the board the images were added to")
|
||||
added_image_names: list[str] = Field(description="The image names that were added to the board")
|
||||
|
||||
|
||||
class RemoveImagesFromBoardResult(BaseModel):
|
||||
removed_image_names: list[str] = Field(description="The image names that were removed from their board")
|
||||
|
||||
|
||||
@board_images_router.post(
|
||||
"/",
|
||||
operation_id="add_image_to_board",
|
||||
@@ -23,17 +14,26 @@ class RemoveImagesFromBoardResult(BaseModel):
|
||||
201: {"description": "The image was added to a board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=AddImagesToBoardResult,
|
||||
)
|
||||
async def add_image_to_board(
|
||||
board_id: str = Body(description="The id of the board to add to"),
|
||||
image_name: str = Body(description="The name of the image to add"),
|
||||
):
|
||||
) -> AddImagesToBoardResult:
|
||||
"""Creates a board_image"""
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.board_images.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
added_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||
added_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
affected_boards.add(old_board_id)
|
||||
|
||||
return AddImagesToBoardResult(
|
||||
added_images=list(added_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
return result
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to add image to board")
|
||||
|
||||
@@ -45,14 +45,25 @@ async def add_image_to_board(
|
||||
201: {"description": "The image was removed from the board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=RemoveImagesFromBoardResult,
|
||||
)
|
||||
async def remove_image_from_board(
|
||||
image_name: str = Body(description="The name of the image to remove", embed=True),
|
||||
):
|
||||
) -> RemoveImagesFromBoardResult:
|
||||
"""Removes an image from its board, if it had one"""
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
return result
|
||||
removed_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
removed_images.add(image_name)
|
||||
affected_boards.add("none")
|
||||
affected_boards.add(old_board_id)
|
||||
return RemoveImagesFromBoardResult(
|
||||
removed_images=list(removed_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to remove image from board")
|
||||
|
||||
@@ -72,16 +83,25 @@ async def add_images_to_board(
|
||||
) -> AddImagesToBoardResult:
|
||||
"""Adds a list of images to a board"""
|
||||
try:
|
||||
added_image_names: list[str] = []
|
||||
added_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
board_id=board_id,
|
||||
image_name=image_name,
|
||||
)
|
||||
added_image_names.append(image_name)
|
||||
added_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
affected_boards.add(old_board_id)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
return AddImagesToBoardResult(board_id=board_id, added_image_names=added_image_names)
|
||||
return AddImagesToBoardResult(
|
||||
added_images=list(added_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to add images to board")
|
||||
|
||||
@@ -100,13 +120,20 @@ async def remove_images_from_board(
|
||||
) -> RemoveImagesFromBoardResult:
|
||||
"""Removes a list of images from their board, if they had one"""
|
||||
try:
|
||||
removed_image_names: list[str] = []
|
||||
removed_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
old_board_id = ApiDependencies.invoker.services.images.get_dto(image_name).board_id or "none"
|
||||
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
removed_image_names.append(image_name)
|
||||
removed_images.add(image_name)
|
||||
affected_boards.add("none")
|
||||
affected_boards.add(old_board_id)
|
||||
except Exception:
|
||||
pass
|
||||
return RemoveImagesFromBoardResult(removed_image_names=removed_image_names)
|
||||
return RemoveImagesFromBoardResult(
|
||||
removed_images=list(removed_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to remove images from board")
|
||||
|
||||
@@ -14,10 +14,17 @@ from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_i
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ImageCategory,
|
||||
ImageNamesResult,
|
||||
ImageRecordChanges,
|
||||
ResourceOrigin,
|
||||
)
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.images.images_common import (
|
||||
DeleteImagesResult,
|
||||
ImageDTO,
|
||||
ImageUrlsDTO,
|
||||
StarredImagesResult,
|
||||
UnstarredImagesResult,
|
||||
)
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
|
||||
@@ -65,7 +72,7 @@ async def upload_image(
|
||||
resize_to: Optional[str] = Body(
|
||||
default=None,
|
||||
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
|
||||
example='"[1024,1024]"',
|
||||
examples=['"[1024,1024]"'],
|
||||
),
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
@@ -99,7 +106,9 @@ async def upload_image(
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
|
||||
try:
|
||||
np_image = pil_to_np(pil_image)
|
||||
# heuristic_resize_fast expects an RGB or RGBA image
|
||||
pil_rgba = pil_image.convert("RGBA")
|
||||
np_image = pil_to_np(pil_rgba)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
@@ -151,18 +160,30 @@ async def create_image_upload_entry(
|
||||
raise HTTPException(status_code=501, detail="Not implemented")
|
||||
|
||||
|
||||
@images_router.delete("/i/{image_name}", operation_id="delete_image")
|
||||
@images_router.delete("/i/{image_name}", operation_id="delete_image", response_model=DeleteImagesResult)
|
||||
async def delete_image(
|
||||
image_name: str = Path(description="The name of the image to delete"),
|
||||
) -> None:
|
||||
) -> DeleteImagesResult:
|
||||
"""Deletes an image"""
|
||||
|
||||
deleted_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
|
||||
board_id = image_dto.board_id or "none"
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
except Exception:
|
||||
# TODO: Does this need any exception handling at all?
|
||||
pass
|
||||
|
||||
return DeleteImagesResult(
|
||||
deleted_images=list(deleted_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
|
||||
|
||||
@images_router.delete("/intermediates", operation_id="clear_intermediates")
|
||||
async def clear_intermediates() -> int:
|
||||
@@ -374,31 +395,32 @@ async def list_image_dtos(
|
||||
return image_dtos
|
||||
|
||||
|
||||
class DeleteImagesFromListResult(BaseModel):
|
||||
deleted_images: list[str]
|
||||
|
||||
|
||||
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesFromListResult)
|
||||
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesResult)
|
||||
async def delete_images_from_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to delete", embed=True),
|
||||
) -> DeleteImagesFromListResult:
|
||||
) -> DeleteImagesResult:
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
deleted_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.get_dto(image_name)
|
||||
board_id = image_dto.board_id or "none"
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
deleted_images.add(image_name)
|
||||
affected_boards.add(board_id)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
return DeleteImagesResult(
|
||||
deleted_images=list(deleted_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
@images_router.delete(
|
||||
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
|
||||
)
|
||||
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
|
||||
@images_router.delete("/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesResult)
|
||||
async def delete_uncategorized_images() -> DeleteImagesResult:
|
||||
"""Deletes all images that are uncategorized"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
@@ -406,14 +428,19 @@ async def delete_uncategorized_images() -> DeleteImagesFromListResult:
|
||||
)
|
||||
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
deleted_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
deleted_images.add(image_name)
|
||||
affected_boards.add("none")
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
return DeleteImagesResult(
|
||||
deleted_images=list(deleted_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
@@ -422,36 +449,50 @@ class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@images_router.post("/star", operation_id="star_images_in_list", response_model=ImagesUpdatedFromListResult)
|
||||
@images_router.post("/star", operation_id="star_images_in_list", response_model=StarredImagesResult)
|
||||
async def star_images_in_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to star", embed=True),
|
||||
) -> ImagesUpdatedFromListResult:
|
||||
) -> StarredImagesResult:
|
||||
try:
|
||||
updated_image_names: list[str] = []
|
||||
starred_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=True))
|
||||
updated_image_names.append(image_name)
|
||||
updated_image_dto = ApiDependencies.invoker.services.images.update(
|
||||
image_name, changes=ImageRecordChanges(starred=True)
|
||||
)
|
||||
starred_images.add(image_name)
|
||||
affected_boards.add(updated_image_dto.board_id or "none")
|
||||
except Exception:
|
||||
pass
|
||||
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
|
||||
return StarredImagesResult(
|
||||
starred_images=list(starred_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to star images")
|
||||
|
||||
|
||||
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=ImagesUpdatedFromListResult)
|
||||
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=UnstarredImagesResult)
|
||||
async def unstar_images_in_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to unstar", embed=True),
|
||||
) -> ImagesUpdatedFromListResult:
|
||||
) -> UnstarredImagesResult:
|
||||
try:
|
||||
updated_image_names: list[str] = []
|
||||
unstarred_images: set[str] = set()
|
||||
affected_boards: set[str] = set()
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=False))
|
||||
updated_image_names.append(image_name)
|
||||
updated_image_dto = ApiDependencies.invoker.services.images.update(
|
||||
image_name, changes=ImageRecordChanges(starred=False)
|
||||
)
|
||||
unstarred_images.add(image_name)
|
||||
affected_boards.add(updated_image_dto.board_id or "none")
|
||||
except Exception:
|
||||
pass
|
||||
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
|
||||
return UnstarredImagesResult(
|
||||
unstarred_images=list(unstarred_images),
|
||||
affected_boards=list(affected_boards),
|
||||
)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to unstar images")
|
||||
|
||||
@@ -522,3 +563,61 @@ async def get_bulk_download_item(
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get("/names", operation_id="get_image_names")
|
||||
async def get_image_names(
|
||||
image_origin: Optional[ResourceOrigin] = Query(default=None, description="The origin of images to list."),
|
||||
categories: Optional[list[ImageCategory]] = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate images."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find images without a board.",
|
||||
),
|
||||
order_dir: SQLiteDirection = Query(default=SQLiteDirection.Descending, description="The order of sort"),
|
||||
starred_first: bool = Query(default=True, description="Whether to sort by starred images first"),
|
||||
search_term: Optional[str] = Query(default=None, description="The term to search for"),
|
||||
) -> ImageNamesResult:
|
||||
"""Gets ordered list of image names with metadata for optimistic updates"""
|
||||
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.images.get_image_names(
|
||||
starred_first=starred_first,
|
||||
order_dir=order_dir,
|
||||
image_origin=image_origin,
|
||||
categories=categories,
|
||||
is_intermediate=is_intermediate,
|
||||
board_id=board_id,
|
||||
search_term=search_term,
|
||||
)
|
||||
return result
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to get image names")
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/images_by_names",
|
||||
operation_id="get_images_by_names",
|
||||
responses={200: {"model": list[ImageDTO]}},
|
||||
)
|
||||
async def get_images_by_names(
|
||||
image_names: list[str] = Body(embed=True, description="Object containing list of image names to fetch DTOs for"),
|
||||
) -> list[ImageDTO]:
|
||||
"""Gets image DTOs for the specified image names. Maintains order of input names."""
|
||||
|
||||
try:
|
||||
image_service = ApiDependencies.invoker.services.images
|
||||
|
||||
# Fetch DTOs preserving the order of requested names
|
||||
image_dtos: list[ImageDTO] = []
|
||||
for name in image_names:
|
||||
try:
|
||||
dto = image_service.get_dto(name)
|
||||
image_dtos.append(dto)
|
||||
except Exception:
|
||||
# Skip missing images - they may have been deleted between name fetch and DTO fetch
|
||||
continue
|
||||
|
||||
return image_dtos
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to get image DTOs")
|
||||
|
||||
@@ -41,6 +41,7 @@ from invokeai.backend.model_manager.starter_models import (
|
||||
STARTER_BUNDLES,
|
||||
STARTER_MODELS,
|
||||
StarterModel,
|
||||
StarterModelBundle,
|
||||
StarterModelWithoutDependencies,
|
||||
)
|
||||
|
||||
@@ -291,7 +292,7 @@ async def get_hugging_face_models(
|
||||
)
|
||||
async def update_model_record(
|
||||
key: Annotated[str, Path(description="Unique key of model")],
|
||||
changes: Annotated[ModelRecordChanges, Body(description="Model config", example=example_model_input)],
|
||||
changes: Annotated[ModelRecordChanges, Body(description="Model config", examples=[example_model_input])],
|
||||
) -> AnyModelConfig:
|
||||
"""Update a model's config."""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
@@ -449,7 +450,7 @@ async def install_model(
|
||||
access_token: Optional[str] = Query(description="access token for the remote resource", default=None),
|
||||
config: ModelRecordChanges = Body(
|
||||
description="Object containing fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
|
||||
example={"name": "string", "description": "string"},
|
||||
examples=[{"name": "string", "description": "string"}],
|
||||
),
|
||||
) -> ModelInstallJob:
|
||||
"""Install a model using a string identifier.
|
||||
@@ -799,7 +800,7 @@ async def convert_model(
|
||||
|
||||
class StarterModelResponse(BaseModel):
|
||||
starter_models: list[StarterModel]
|
||||
starter_bundles: dict[str, list[StarterModel]]
|
||||
starter_bundles: dict[str, StarterModelBundle]
|
||||
|
||||
|
||||
def get_is_installed(
|
||||
@@ -833,7 +834,7 @@ async def get_starter_models() -> StarterModelResponse:
|
||||
model.dependencies = missing_deps
|
||||
|
||||
for bundle in starter_bundles.values():
|
||||
for model in bundle:
|
||||
for model in bundle.models:
|
||||
model.is_installed = get_is_installed(model, installed_models)
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[StarterModelWithoutDependencies] = []
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import Body, Path, Query
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -14,13 +14,15 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
ClearResult,
|
||||
DeleteAllExceptCurrentResult,
|
||||
DeleteByDestinationResult,
|
||||
EnqueueBatchResult,
|
||||
FieldIdentifier,
|
||||
PruneResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueItemNotFoundError,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||
@@ -58,17 +60,19 @@ async def enqueue_batch(
|
||||
),
|
||||
) -> EnqueueBatchResult:
|
||||
"""Processes a batch and enqueues the output graphs for execution."""
|
||||
|
||||
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
|
||||
queue_id=queue_id, batch=batch, prepend=prepend
|
||||
)
|
||||
try:
|
||||
return await ApiDependencies.invoker.services.session_queue.enqueue_batch(
|
||||
queue_id=queue_id, batch=batch, prepend=prepend
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while enqueuing batch: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
"/{queue_id}/list",
|
||||
operation_id="list_queue_items",
|
||||
responses={
|
||||
200: {"model": CursorPaginatedResults[SessionQueueItemDTO]},
|
||||
200: {"model": CursorPaginatedResults[SessionQueueItem]},
|
||||
},
|
||||
)
|
||||
async def list_queue_items(
|
||||
@@ -77,12 +81,42 @@ async def list_queue_items(
|
||||
status: Optional[QUEUE_ITEM_STATUS] = Query(default=None, description="The status of items to fetch"),
|
||||
cursor: Optional[int] = Query(default=None, description="The pagination cursor"),
|
||||
priority: int = Query(default=0, description="The pagination cursor priority"),
|
||||
) -> CursorPaginatedResults[SessionQueueItemDTO]:
|
||||
"""Gets all queue items (without graphs)"""
|
||||
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
|
||||
) -> CursorPaginatedResults[SessionQueueItem]:
|
||||
"""Gets cursor-paginated queue items"""
|
||||
|
||||
return ApiDependencies.invoker.services.session_queue.list_queue_items(
|
||||
queue_id=queue_id, limit=limit, status=status, cursor=cursor, priority=priority
|
||||
)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.list_queue_items(
|
||||
queue_id=queue_id,
|
||||
limit=limit,
|
||||
status=status,
|
||||
cursor=cursor,
|
||||
priority=priority,
|
||||
destination=destination,
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all items: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
"/{queue_id}/list_all",
|
||||
operation_id="list_all_queue_items",
|
||||
responses={
|
||||
200: {"model": list[SessionQueueItem]},
|
||||
},
|
||||
)
|
||||
async def list_all_queue_items(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
destination: Optional[str] = Query(default=None, description="The destination of queue items to fetch"),
|
||||
) -> list[SessionQueueItem]:
|
||||
"""Gets all queue items"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.list_all_queue_items(
|
||||
queue_id=queue_id,
|
||||
destination=destination,
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while listing all queue items: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -94,7 +128,10 @@ async def resume(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionProcessorStatus:
|
||||
"""Resumes session processor"""
|
||||
return ApiDependencies.invoker.services.session_processor.resume()
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_processor.resume()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while resuming queue: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -106,7 +143,10 @@ async def Pause(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionProcessorStatus:
|
||||
"""Pauses session processor"""
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while pausing queue: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -118,7 +158,25 @@ async def cancel_all_except_current(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> CancelAllExceptCurrentResult:
|
||||
"""Immediately cancels all queue items except in-processing items"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling all except current: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/delete_all_except_current",
|
||||
operation_id="delete_all_except_current",
|
||||
responses={200: {"model": DeleteAllExceptCurrentResult}},
|
||||
)
|
||||
async def delete_all_except_current(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> DeleteAllExceptCurrentResult:
|
||||
"""Immediately deletes all queue items except in-processing items"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.delete_all_except_current(queue_id=queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting all except current: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -131,7 +189,12 @@ async def cancel_by_batch_ids(
|
||||
batch_ids: list[str] = Body(description="The list of batch_ids to cancel all queue items for", embed=True),
|
||||
) -> CancelByBatchIDsResult:
|
||||
"""Immediately cancels all queue items from the given batch ids"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(queue_id=queue_id, batch_ids=batch_ids)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_batch_ids(
|
||||
queue_id=queue_id, batch_ids=batch_ids
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling by batch id: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -144,9 +207,12 @@ async def cancel_by_destination(
|
||||
destination: str = Query(description="The destination to cancel all queue items for"),
|
||||
) -> CancelByDestinationResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling by destination: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -159,7 +225,10 @@ async def retry_items_by_id(
|
||||
item_ids: list[int] = Body(description="The queue item ids to retry"),
|
||||
) -> RetryItemsResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while retrying queue items: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -173,11 +242,14 @@ async def clear(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> ClearResult:
|
||||
"""Clears the queue entirely, immediately canceling the currently-executing session"""
|
||||
queue_item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
if queue_item is not None:
|
||||
ApiDependencies.invoker.services.session_queue.cancel_queue_item(queue_item.item_id)
|
||||
clear_result = ApiDependencies.invoker.services.session_queue.clear(queue_id)
|
||||
return clear_result
|
||||
try:
|
||||
queue_item = ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
if queue_item is not None:
|
||||
ApiDependencies.invoker.services.session_queue.cancel_queue_item(queue_item.item_id)
|
||||
clear_result = ApiDependencies.invoker.services.session_queue.clear(queue_id)
|
||||
return clear_result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while clearing queue: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -191,7 +263,10 @@ async def prune(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> PruneResult:
|
||||
"""Prunes all completed or errored queue items"""
|
||||
return ApiDependencies.invoker.services.session_queue.prune(queue_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.prune(queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while pruning queue: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -205,7 +280,10 @@ async def get_current_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> Optional[SessionQueueItem]:
|
||||
"""Gets the currently execution queue item"""
|
||||
return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_current(queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting current queue item: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -219,7 +297,10 @@ async def get_next_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> Optional[SessionQueueItem]:
|
||||
"""Gets the next queue item, without executing it"""
|
||||
return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_next(queue_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting next queue item: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -233,9 +314,12 @@ async def get_queue_status(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionQueueAndProcessorStatus:
|
||||
"""Gets the status of the session queue"""
|
||||
queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id)
|
||||
processor = ApiDependencies.invoker.services.session_processor.get_status()
|
||||
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
|
||||
try:
|
||||
queue = ApiDependencies.invoker.services.session_queue.get_queue_status(queue_id)
|
||||
processor = ApiDependencies.invoker.services.session_processor.get_status()
|
||||
return SessionQueueAndProcessorStatus(queue=queue, processor=processor)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting queue status: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -250,7 +334,10 @@ async def get_batch_status(
|
||||
batch_id: str = Path(description="The batch to get the status of"),
|
||||
) -> BatchStatus:
|
||||
"""Gets the status of the session queue"""
|
||||
return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_batch_status(queue_id=queue_id, batch_id=batch_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while getting batch status: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -266,7 +353,27 @@ async def get_queue_item(
|
||||
item_id: int = Path(description="The queue item to get"),
|
||||
) -> SessionQueueItem:
|
||||
"""Gets a queue item"""
|
||||
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_queue_item(item_id)
|
||||
except SessionQueueItemNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching queue item: {e}")
|
||||
|
||||
|
||||
@session_queue_router.delete(
|
||||
"/{queue_id}/i/{item_id}",
|
||||
operation_id="delete_queue_item",
|
||||
)
|
||||
async def delete_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
item_id: int = Path(description="The queue item to delete"),
|
||||
) -> None:
|
||||
"""Deletes a queue item"""
|
||||
try:
|
||||
ApiDependencies.invoker.services.session_queue.delete_queue_item(item_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting queue item: {e}")
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
@@ -281,8 +388,12 @@ async def cancel_queue_item(
|
||||
item_id: int = Path(description="The queue item to cancel"),
|
||||
) -> SessionQueueItem:
|
||||
"""Deletes a queue item"""
|
||||
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_queue_item(item_id)
|
||||
except SessionQueueItemNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Queue item with id {item_id} not found in queue {queue_id}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while canceling queue item: {e}")
|
||||
|
||||
|
||||
@session_queue_router.get(
|
||||
@@ -295,6 +406,27 @@ async def counts_by_destination(
|
||||
destination: str = Query(description="The destination to query"),
|
||||
) -> SessionQueueCountsByDestination:
|
||||
"""Gets the counts of queue items by destination"""
|
||||
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.get_counts_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while fetching counts by destination: {e}")
|
||||
|
||||
|
||||
@session_queue_router.delete(
|
||||
"/{queue_id}/d/{destination}",
|
||||
operation_id="delete_by_destination",
|
||||
responses={200: {"model": DeleteByDestinationResult}},
|
||||
)
|
||||
async def delete_by_destination(
|
||||
queue_id: str = Path(description="The queue id to query"),
|
||||
destination: str = Path(description="The destination to query"),
|
||||
) -> DeleteByDestinationResult:
|
||||
"""Deletes all items with the given destination"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.session_queue.delete_by_destination(
|
||||
queue_id=queue_id, destination=destination
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Unexpected error while deleting by destination: {e}")
|
||||
|
||||
@@ -158,7 +158,7 @@ web_root_path = Path(list(web_dir.__path__)[0])
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
@@ -499,7 +499,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
@@ -582,6 +582,8 @@ def invocation(
|
||||
|
||||
fields: dict[str, tuple[Any, FieldInfo]] = {}
|
||||
|
||||
original_model_fields: dict[str, OriginalModelField] = {}
|
||||
|
||||
for field_name, field_info in cls.model_fields.items():
|
||||
annotation = field_info.annotation
|
||||
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
|
||||
@@ -589,7 +591,7 @@ def invocation(
|
||||
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
|
||||
)
|
||||
|
||||
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
|
||||
validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info)
|
||||
|
||||
@@ -613,7 +615,7 @@ def invocation(
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
uiconfig["version"] = version
|
||||
else:
|
||||
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
uiconfig["version"] = "1.0.0"
|
||||
|
||||
cls.UIConfig = UIConfigBase(**uiconfig)
|
||||
@@ -676,6 +678,7 @@ def invocation(
|
||||
docstring = cls.__doc__
|
||||
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore
|
||||
new_class.__doc__ = docstring
|
||||
new_class._original_model_fields = original_model_fields
|
||||
|
||||
InvocationRegistry.register_invocation(new_class)
|
||||
|
||||
|
||||
@@ -114,6 +114,13 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
|
||||
@@ -222,7 +229,10 @@ class SDXLPromptInvocationBase:
|
||||
else:
|
||||
c_pooled = None
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
|
||||
@@ -64,6 +64,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
Imagen3Model = "Imagen3ModelField"
|
||||
Imagen4Model = "Imagen4ModelField"
|
||||
ChatGPT4oModel = "ChatGPT4oModelField"
|
||||
FluxKontextModel = "FluxKontextModelField"
|
||||
# endregion
|
||||
|
||||
# region Misc Field Types
|
||||
@@ -214,6 +215,7 @@ class FieldDescriptions:
|
||||
flux_redux_conditioning = "FLUX Redux conditioning tensor"
|
||||
vllm_model = "The VLLM model to use"
|
||||
flux_fill_conditioning = "FLUX Fill conditioning tensor"
|
||||
flux_kontext_conditioning = "FLUX Kontext conditioning (reference image)"
|
||||
|
||||
|
||||
class ImageField(BaseModel):
|
||||
@@ -290,6 +292,12 @@ class FluxFillConditioningField(BaseModel):
|
||||
mask: TensorField = Field(description="The FLUX Fill inpaint mask.")
|
||||
|
||||
|
||||
class FluxKontextConditioningField(BaseModel):
|
||||
"""A conditioning field for FLUX Kontext (reference image)."""
|
||||
|
||||
image: ImageField = Field(description="The Kontext reference image.")
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
@@ -437,7 +445,7 @@ class WithWorkflow:
|
||||
workflow = None
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
|
||||
)
|
||||
super().__init_subclass__()
|
||||
@@ -578,7 +586,7 @@ def InputField(
|
||||
|
||||
if default_factory is not _Unset and default_factory is not None:
|
||||
default = default_factory()
|
||||
logger.warn('"default_factory" is not supported, calling it now to set "default"')
|
||||
logger.warning('"default_factory" is not supported, calling it now to set "default"')
|
||||
|
||||
# These are the args we may wish pass to the pydantic `Field()` function
|
||||
field_args = {
|
||||
|
||||
@@ -16,13 +16,12 @@ from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
FluxFillConditioningField,
|
||||
FluxKontextConditioningField,
|
||||
FluxReduxConditioningField,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.flux_controlnet import FluxControlNetField
|
||||
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
|
||||
@@ -34,6 +33,7 @@ from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXCo
|
||||
from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlNetFlux
|
||||
from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.kontext_extension import KontextExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
@@ -63,9 +63,9 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.3.0",
|
||||
version="4.0.0",
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
class FluxDenoiseInvocation(BaseInvocation):
|
||||
"""Run denoising process with a FLUX transformer model."""
|
||||
|
||||
# If latents is provided, this means we are doing image-to-image.
|
||||
@@ -145,11 +145,20 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
# This node accepts a images for features like FLUX Fill, ControlNet, and Kontext, but needs to operate on them in
|
||||
# latent space. We'll run the VAE to encode them in this node instead of requiring the user to run the VAE in
|
||||
# upstream nodes.
|
||||
|
||||
ip_adapter: IPAdapterField | list[IPAdapterField] | None = InputField(
|
||||
description=FieldDescriptions.ip_adapter, title="IP-Adapter", default=None, input=Input.Connection
|
||||
)
|
||||
|
||||
kontext_conditioning: Optional[FluxKontextConditioningField] = InputField(
|
||||
default=None,
|
||||
description="FLUX Kontext conditioning (reference image).",
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = self._run_diffusion(context)
|
||||
@@ -376,6 +385,27 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning is not None:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning,
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
# Prepare Kontext conditioning if provided
|
||||
img_cond_seq = None
|
||||
img_cond_seq_ids = None
|
||||
if kontext_extension is not None:
|
||||
# Ensure batch sizes match
|
||||
kontext_extension.ensure_batch_size(x.shape[0])
|
||||
img_cond_seq, img_cond_seq_ids = kontext_extension.kontext_latents, kontext_extension.kontext_ids
|
||||
|
||||
x = denoise(
|
||||
model=transformer,
|
||||
img=x,
|
||||
@@ -391,6 +421,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
pos_ip_adapter_extensions=pos_ip_adapter_extensions,
|
||||
neg_ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
img_cond=img_cond,
|
||||
img_cond_seq=img_cond_seq,
|
||||
img_cond_seq_ids=img_cond_seq_ids,
|
||||
)
|
||||
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
@@ -865,7 +897,10 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
state.latents = unpack(state.latents.float(), self.height, self.width).squeeze()
|
||||
# The denoise function now handles Kontext conditioning correctly,
|
||||
# so we don't need to slice the latents here
|
||||
latents = state.latents.float()
|
||||
state.latents = unpack(latents, self.height, self.width).squeeze()
|
||||
context.util.flux_step_callback(state)
|
||||
|
||||
return step_callback
|
||||
|
||||
40
invokeai/app/invocations/flux_kontext.py
Normal file
40
invokeai/app/invocations/flux_kontext.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxKontextConditioningField,
|
||||
InputField,
|
||||
OutputField,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation_output("flux_kontext_output")
|
||||
class FluxKontextOutput(BaseInvocationOutput):
|
||||
"""The conditioning output of a FLUX Kontext invocation."""
|
||||
|
||||
kontext_cond: FluxKontextConditioningField = OutputField(
|
||||
description=FieldDescriptions.flux_kontext_conditioning, title="Kontext Conditioning"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_kontext",
|
||||
title="Kontext Conditioning - FLUX",
|
||||
tags=["conditioning", "kontext", "flux"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
)
|
||||
class FluxKontextInvocation(BaseInvocation):
|
||||
"""Prepares a reference image for FLUX Kontext conditioning."""
|
||||
|
||||
image: ImageField = InputField(description="The Kontext reference image.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxKontextOutput:
|
||||
"""Packages the provided image into a Kontext conditioning field."""
|
||||
return FluxKontextOutput(kontext_cond=FluxKontextConditioningField(image=self.image))
|
||||
@@ -1,5 +1,5 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Iterator, Literal, Optional, Tuple
|
||||
from typing import Iterator, Literal, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast
|
||||
@@ -111,6 +111,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
|
||||
if context.config.get().log_tokenization:
|
||||
self._log_t5_tokenization(context, t5_tokenizer)
|
||||
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
|
||||
@@ -151,6 +154,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
||||
|
||||
if context.config.get().log_tokenization:
|
||||
self._log_clip_tokenization(context, clip_tokenizer)
|
||||
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
|
||||
@@ -170,3 +176,88 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
assert isinstance(lora_info.model, ModelPatchRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
def _log_t5_tokenization(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
tokenizer: Union[T5Tokenizer, T5TokenizerFast],
|
||||
) -> None:
|
||||
"""Logs the tokenization of a prompt for a T5-based model like FLUX."""
|
||||
|
||||
# Tokenize the prompt using the same parameters as the model's text encoder.
|
||||
# T5 tokenizers add an EOS token (</s>) and then pad to max_length.
|
||||
tokenized_output = tokenizer(
|
||||
self.prompt,
|
||||
padding="max_length",
|
||||
max_length=self.t5_max_seq_len,
|
||||
truncation=True,
|
||||
add_special_tokens=True, # This is important for T5 to add the EOS token.
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
input_ids = tokenized_output.input_ids[0]
|
||||
tokens = tokenizer.convert_ids_to_tokens(input_ids)
|
||||
|
||||
# The T5 tokenizer uses a space-like character ' ' (U+2581) to denote spaces.
|
||||
# We'll replace it with a regular space for readability.
|
||||
tokens = [t.replace("\u2581", " ") for t in tokens]
|
||||
|
||||
tokenized_str = ""
|
||||
used_tokens = 0
|
||||
for token in tokens:
|
||||
if token == tokenizer.eos_token:
|
||||
tokenized_str += f"\x1b[0;31m{token}\x1b[0m" # Red for EOS
|
||||
used_tokens += 1
|
||||
elif token == tokenizer.pad_token:
|
||||
# tokenized_str += f"\x1b[0;34m{token}\x1b[0m" # Blue for PAD
|
||||
continue
|
||||
else:
|
||||
color = (used_tokens % 6) + 1 # Cycle through 6 colors
|
||||
tokenized_str += f"\x1b[0;3{color}m{token}\x1b[0m"
|
||||
used_tokens += 1
|
||||
|
||||
context.logger.info(f">> [T5 TOKENLOG] Tokens ({used_tokens}/{self.t5_max_seq_len}):")
|
||||
context.logger.info(f"{tokenized_str}\x1b[0m")
|
||||
|
||||
def _log_clip_tokenization(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
tokenizer: CLIPTokenizer,
|
||||
) -> None:
|
||||
"""Logs the tokenization of a prompt for a CLIP-based model."""
|
||||
max_length = tokenizer.model_max_length
|
||||
|
||||
tokenized_output = tokenizer(
|
||||
self.prompt,
|
||||
padding="max_length",
|
||||
max_length=max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
input_ids = tokenized_output.input_ids[0]
|
||||
attention_mask = tokenized_output.attention_mask[0]
|
||||
tokens = tokenizer.convert_ids_to_tokens(input_ids)
|
||||
|
||||
# The CLIP tokenizer uses '</w>' to denote spaces.
|
||||
# We'll replace it with a regular space for readability.
|
||||
tokens = [t.replace("</w>", " ") for t in tokens]
|
||||
|
||||
tokenized_str = ""
|
||||
used_tokens = 0
|
||||
for i, token in enumerate(tokens):
|
||||
if attention_mask[i] == 0:
|
||||
# Do not log padding tokens.
|
||||
continue
|
||||
|
||||
if token == tokenizer.bos_token:
|
||||
tokenized_str += f"\x1b[0;32m{token}\x1b[0m" # Green for BOS
|
||||
elif token == tokenizer.eos_token:
|
||||
tokenized_str += f"\x1b[0;31m{token}\x1b[0m" # Red for EOS
|
||||
else:
|
||||
color = (used_tokens % 6) + 1 # Cycle through 6 colors
|
||||
tokenized_str += f"\x1b[0;3{color}m{token}\x1b[0m"
|
||||
used_tokens += 1
|
||||
|
||||
context.logger.info(f">> [CLIP TOKENLOG] Tokens ({used_tokens}/{max_length}):")
|
||||
context.logger.info(f"{tokenized_str}\x1b[0m")
|
||||
|
||||
@@ -430,6 +430,15 @@ class FluxConditioningOutput(BaseInvocationOutput):
|
||||
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("flux_conditioning_collection_output")
|
||||
class FluxConditioningCollectionOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of conditioning tensors"""
|
||||
|
||||
collection: list[FluxConditioningField] = OutputField(
|
||||
description="The output conditioning tensors",
|
||||
)
|
||||
|
||||
|
||||
@invocation_output("sd3_conditioning_output")
|
||||
class SD3ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single SD3 conditioning tensor"""
|
||||
|
||||
@@ -14,15 +14,14 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def add_image_to_board(
|
||||
self,
|
||||
board_id: str,
|
||||
image_name: str,
|
||||
) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO board_images (board_id, image_name)
|
||||
@@ -31,17 +30,12 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
""",
|
||||
(board_id, image_name, board_id),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
|
||||
def remove_image_from_board(
|
||||
self,
|
||||
image_name: str,
|
||||
) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM board_images
|
||||
@@ -49,10 +43,6 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
|
||||
def get_images_for_board(
|
||||
self,
|
||||
@@ -60,27 +50,26 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
) -> OffsetPaginatedResults[ImageRecord]:
|
||||
# TODO: this isn't paginated yet?
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
ORDER BY board_images.updated_at DESC;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
ORDER BY board_images.updated_at DESC;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images WHERE 1=1;
|
||||
"""
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images WHERE 1=1;
|
||||
"""
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
|
||||
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
||||
|
||||
@@ -90,56 +79,55 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
categories: list[ImageCategory] | None,
|
||||
is_intermediate: bool | None,
|
||||
) -> list[str]:
|
||||
params: list[str | bool] = []
|
||||
with self._db.transaction() as cursor:
|
||||
params: list[str | bool] = []
|
||||
|
||||
# Base query is a join between images and board_images
|
||||
stmt = """
|
||||
SELECT images.image_name
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
# Base query is a join between images and board_images
|
||||
stmt = """
|
||||
SELECT images.image_name
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Handle board_id filter
|
||||
if board_id == "none":
|
||||
stmt += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
else:
|
||||
stmt += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
# Handle board_id filter
|
||||
if board_id == "none":
|
||||
stmt += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
else:
|
||||
stmt += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
stmt += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
"""
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
stmt += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
"""
|
||||
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
params.append(c)
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
params.append(c)
|
||||
|
||||
# Add the is_intermediate filter
|
||||
if is_intermediate is not None:
|
||||
stmt += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
"""
|
||||
params.append(is_intermediate)
|
||||
# Add the is_intermediate filter
|
||||
if is_intermediate is not None:
|
||||
stmt += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
"""
|
||||
params.append(is_intermediate)
|
||||
|
||||
# Put a ring on it
|
||||
stmt += ";"
|
||||
# Put a ring on it
|
||||
stmt += ";"
|
||||
|
||||
# Execute the query
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(stmt, params)
|
||||
cursor.execute(stmt, params)
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
image_names = [r[0] for r in result]
|
||||
return image_names
|
||||
|
||||
@@ -147,31 +135,31 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
self,
|
||||
image_name: str,
|
||||
) -> Optional[str]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT board_id
|
||||
FROM board_images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT board_id
|
||||
FROM board_images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
if result is None:
|
||||
return None
|
||||
return cast(str, result[0])
|
||||
|
||||
def get_image_count_for_board(self, board_id: str) -> int:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE images.is_intermediate = FALSE
|
||||
AND board_images.board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM board_images
|
||||
INNER JOIN images ON board_images.image_name = images.image_name
|
||||
WHERE images.is_intermediate = FALSE
|
||||
AND board_images.board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
return count
|
||||
|
||||
@@ -20,61 +20,57 @@ from invokeai.app.util.misc import uuid_string
|
||||
class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def delete(self, board_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM boards
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordDeleteException from e
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM boards
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
except Exception as e:
|
||||
raise BoardRecordDeleteException from e
|
||||
|
||||
def save(
|
||||
self,
|
||||
board_name: str,
|
||||
) -> BoardRecord:
|
||||
try:
|
||||
board_id = uuid_string()
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO boards (board_id, board_name)
|
||||
VALUES (?, ?);
|
||||
""",
|
||||
(board_id, board_name),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordSaveException from e
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
board_id = uuid_string()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO boards (board_id, board_name)
|
||||
VALUES (?, ?);
|
||||
""",
|
||||
(board_id, board_name),
|
||||
)
|
||||
except sqlite3.Error as e:
|
||||
raise BoardRecordSaveException from e
|
||||
return self.get(board_id)
|
||||
|
||||
def get(
|
||||
self,
|
||||
board_id: str,
|
||||
) -> BoardRecord:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM boards
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM boards
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
except sqlite3.Error as e:
|
||||
raise BoardRecordNotFoundException from e
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
except sqlite3.Error as e:
|
||||
raise BoardRecordNotFoundException from e
|
||||
if result is None:
|
||||
raise BoardRecordNotFoundException
|
||||
return BoardRecord(**dict(result))
|
||||
@@ -84,45 +80,43 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
board_id: str,
|
||||
changes: BoardChanges,
|
||||
) -> BoardRecord:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# Change the name of a board
|
||||
if changes.board_name is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET board_name = ?
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(changes.board_name, board_id),
|
||||
)
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
# Change the name of a board
|
||||
if changes.board_name is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET board_name = ?
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(changes.board_name, board_id),
|
||||
)
|
||||
|
||||
# Change the cover image of a board
|
||||
if changes.cover_image_name is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET cover_image_name = ?
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(changes.cover_image_name, board_id),
|
||||
)
|
||||
# Change the cover image of a board
|
||||
if changes.cover_image_name is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET cover_image_name = ?
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(changes.cover_image_name, board_id),
|
||||
)
|
||||
|
||||
# Change the archived status of a board
|
||||
if changes.archived is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET archived = ?
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(changes.archived, board_id),
|
||||
)
|
||||
# Change the archived status of a board
|
||||
if changes.archived is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE boards
|
||||
SET archived = ?
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(changes.archived, board_id),
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise BoardRecordSaveException from e
|
||||
except sqlite3.Error as e:
|
||||
raise BoardRecordSaveException from e
|
||||
return self.get(board_id)
|
||||
|
||||
def get_many(
|
||||
@@ -133,78 +127,77 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
limit: int = 10,
|
||||
include_archived: bool = False,
|
||||
) -> OffsetPaginatedResults[BoardRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
|
||||
# Build base query
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY {order_by} {direction}
|
||||
LIMIT ? OFFSET ?;
|
||||
"""
|
||||
|
||||
# Determine archived filter condition
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
# Execute query to fetch boards
|
||||
cursor.execute(final_query, (limit, offset))
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
# Determine count query
|
||||
if include_archived:
|
||||
count_query = """
|
||||
SELECT COUNT(*)
|
||||
FROM boards;
|
||||
"""
|
||||
else:
|
||||
count_query = """
|
||||
SELECT COUNT(*)
|
||||
with self._db.transaction() as cursor:
|
||||
# Build base query
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
WHERE archived = 0;
|
||||
{archived_filter}
|
||||
ORDER BY {order_by} {direction}
|
||||
LIMIT ? OFFSET ?;
|
||||
"""
|
||||
|
||||
# Execute count query
|
||||
cursor.execute(count_query)
|
||||
# Determine archived filter condition
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
# Execute query to fetch boards
|
||||
cursor.execute(final_query, (limit, offset))
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
# Determine count query
|
||||
if include_archived:
|
||||
count_query = """
|
||||
SELECT COUNT(*)
|
||||
FROM boards;
|
||||
"""
|
||||
else:
|
||||
count_query = """
|
||||
SELECT COUNT(*)
|
||||
FROM boards
|
||||
WHERE archived = 0;
|
||||
"""
|
||||
|
||||
# Execute count query
|
||||
cursor.execute(count_query)
|
||||
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
|
||||
return OffsetPaginatedResults[BoardRecord](items=boards, offset=offset, limit=limit, total=count)
|
||||
|
||||
def get_all(
|
||||
self, order_by: BoardRecordOrderBy, direction: SQLiteDirection, include_archived: bool = False
|
||||
) -> list[BoardRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
if order_by == BoardRecordOrderBy.Name:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY LOWER(board_name) {direction}
|
||||
"""
|
||||
else:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY {order_by} {direction}
|
||||
"""
|
||||
with self._db.transaction() as cursor:
|
||||
if order_by == BoardRecordOrderBy.Name:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY LOWER(board_name) {direction}
|
||||
"""
|
||||
else:
|
||||
base_query = """
|
||||
SELECT *
|
||||
FROM boards
|
||||
{archived_filter}
|
||||
ORDER BY {order_by} {direction}
|
||||
"""
|
||||
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
archived_filter = "" if include_archived else "WHERE archived = 0"
|
||||
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
final_query = base_query.format(
|
||||
archived_filter=archived_filter, order_by=order_by.value, direction=direction.value
|
||||
)
|
||||
|
||||
cursor.execute(final_query)
|
||||
cursor.execute(final_query)
|
||||
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
boards = [deserialize_board_record(dict(r)) for r in result]
|
||||
|
||||
return boards
|
||||
|
||||
@@ -24,7 +24,6 @@ from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
INIT_FILE = Path("invokeai.yaml")
|
||||
DB_FILE = Path("invokeai.db")
|
||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
|
||||
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
|
||||
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
|
||||
@@ -93,7 +92,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -176,7 +175,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
|
||||
|
||||
# DEVICE
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
|
||||
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
|
||||
|
||||
# GENERATION
|
||||
|
||||
@@ -8,6 +8,7 @@ import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from queue import Empty, PriorityQueue
|
||||
from shutil import disk_usage
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set
|
||||
|
||||
import requests
|
||||
@@ -335,6 +336,14 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
|
||||
assert job.download_path
|
||||
|
||||
free_space = disk_usage(job.download_path.parent).free
|
||||
GB = 2**30
|
||||
self._logger.debug(f"Download is {job.total_bytes / GB:.2f} GB of {free_space / GB:.2f} GB free.")
|
||||
if free_space < job.total_bytes:
|
||||
raise RuntimeError(
|
||||
f"Free disk space {free_space / GB:.2f} GB is not enough for download of {job.total_bytes / GB:.2f} GB."
|
||||
)
|
||||
|
||||
# Don't clobber an existing file. See commit 82c2c85202f88c6d24ff84710f297cfc6ae174af
|
||||
# for code that instead resumes an interrupted download.
|
||||
if job.download_path.exists():
|
||||
|
||||
@@ -5,6 +5,7 @@ from typing import Optional
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ImageCategory,
|
||||
ImageNamesResult,
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
ResourceOrigin,
|
||||
@@ -97,3 +98,17 @@ class ImageRecordStorageBase(ABC):
|
||||
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
|
||||
"""Gets the most recent image for a board."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_image_names(
|
||||
self,
|
||||
starred_first: bool = True,
|
||||
order_dir: SQLiteDirection = SQLiteDirection.Descending,
|
||||
image_origin: Optional[ResourceOrigin] = None,
|
||||
categories: Optional[list[ImageCategory]] = None,
|
||||
is_intermediate: Optional[bool] = None,
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> ImageNamesResult:
|
||||
"""Gets ordered list of image names with metadata for optimistic updates."""
|
||||
pass
|
||||
|
||||
@@ -3,7 +3,7 @@ import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import Field, StrictBool, StrictStr
|
||||
from pydantic import BaseModel, Field, StrictBool, StrictStr
|
||||
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
from invokeai.app.util.misc import get_iso_timestamp
|
||||
@@ -207,3 +207,16 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
|
||||
starred=starred,
|
||||
has_workflow=has_workflow,
|
||||
)
|
||||
|
||||
|
||||
class ImageCollectionCounts(BaseModel):
|
||||
starred_count: int = Field(description="The number of starred images in the collection.")
|
||||
unstarred_count: int = Field(description="The number of unstarred images in the collection.")
|
||||
|
||||
|
||||
class ImageNamesResult(BaseModel):
|
||||
"""Response containing ordered image names with metadata for optimistic updates."""
|
||||
|
||||
image_names: list[str] = Field(description="Ordered list of image names")
|
||||
starred_count: int = Field(description="Number of starred images (when starred_first=True)")
|
||||
total_count: int = Field(description="Total number of images matching the query")
|
||||
|
||||
@@ -7,6 +7,7 @@ from invokeai.app.services.image_records.image_records_base import ImageRecordSt
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
IMAGE_DTO_COLS,
|
||||
ImageCategory,
|
||||
ImageNamesResult,
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
ImageRecordDeleteException,
|
||||
@@ -23,22 +24,22 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def get(self, image_name: str) -> ImageRecord:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS} FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS} FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordNotFoundException from e
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordNotFoundException from e
|
||||
|
||||
if not result:
|
||||
raise ImageRecordNotFoundException
|
||||
@@ -46,17 +47,20 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
return deserialize_image_record(dict(result))
|
||||
|
||||
def get_metadata(self, image_name: str) -> Optional[MetadataField]:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT metadata FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT metadata FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordNotFoundException from e
|
||||
|
||||
if not result:
|
||||
raise ImageRecordNotFoundException
|
||||
@@ -64,64 +68,60 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
as_dict = dict(result)
|
||||
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
|
||||
return MetadataFieldValidator.validate_json(metadata_raw) if metadata_raw is not None else None
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordNotFoundException from e
|
||||
|
||||
def update(
|
||||
self,
|
||||
image_name: str,
|
||||
changes: ImageRecordChanges,
|
||||
) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# Change the category of the image
|
||||
if changes.image_category is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET image_category = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.image_category, image_name),
|
||||
)
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
# Change the category of the image
|
||||
if changes.image_category is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET image_category = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.image_category, image_name),
|
||||
)
|
||||
|
||||
# Change the session associated with the image
|
||||
if changes.session_id is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET session_id = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.session_id, image_name),
|
||||
)
|
||||
# Change the session associated with the image
|
||||
if changes.session_id is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET session_id = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.session_id, image_name),
|
||||
)
|
||||
|
||||
# Change the image's `is_intermediate`` flag
|
||||
if changes.is_intermediate is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET is_intermediate = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.is_intermediate, image_name),
|
||||
)
|
||||
# Change the image's `is_intermediate`` flag
|
||||
if changes.is_intermediate is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET is_intermediate = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.is_intermediate, image_name),
|
||||
)
|
||||
|
||||
# Change the image's `starred`` state
|
||||
if changes.starred is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET starred = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.starred, image_name),
|
||||
)
|
||||
# Change the image's `starred`` state
|
||||
if changes.starred is not None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE images
|
||||
SET starred = ?
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(changes.starred, image_name),
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordSaveException from e
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordSaveException from e
|
||||
|
||||
def get_many(
|
||||
self,
|
||||
@@ -135,166 +135,162 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> OffsetPaginatedResults[ImageRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
|
||||
# Manually build two queries - one for the count, one for the records
|
||||
count_query = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
images_query = f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS}
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
query_conditions = ""
|
||||
query_params: list[Union[int, str, bool]] = []
|
||||
|
||||
if image_origin is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.image_origin = ?
|
||||
"""
|
||||
query_params.append(image_origin.value)
|
||||
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
|
||||
query_conditions += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
with self._db.transaction() as cursor:
|
||||
# Manually build two queries - one for the count, one for the records
|
||||
count_query = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
query_params.append(c)
|
||||
|
||||
if is_intermediate is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
images_query = f"""--sql
|
||||
SELECT {IMAGE_DTO_COLS}
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
query_params.append(is_intermediate)
|
||||
query_conditions = ""
|
||||
query_params: list[Union[int, str, bool]] = []
|
||||
|
||||
# board_id of "none" is reserved for images without a board
|
||||
if board_id == "none":
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
elif board_id is not None:
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
query_params.append(board_id)
|
||||
if image_origin is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.image_origin = ?
|
||||
"""
|
||||
query_params.append(image_origin.value)
|
||||
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND images.metadata LIKE ?
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
if categories is not None:
|
||||
# Convert the enum values to unique list of strings
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
# Create the correct length of placeholders
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
else:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
query_conditions += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
"""
|
||||
|
||||
# Final images query with pagination
|
||||
images_query += query_conditions + query_pagination + ";"
|
||||
# Add all the parameters
|
||||
images_params = query_params.copy()
|
||||
# Add the pagination parameters
|
||||
images_params.extend([limit, offset])
|
||||
# Unpack the included categories into the query params
|
||||
for c in category_strings:
|
||||
query_params.append(c)
|
||||
|
||||
# Build the list of images, deserializing each row
|
||||
cursor.execute(images_query, images_params)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
if is_intermediate is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
"""
|
||||
|
||||
# Set up and execute the count query, without pagination
|
||||
count_query += query_conditions + ";"
|
||||
count_params = query_params.copy()
|
||||
cursor.execute(count_query, count_params)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
query_params.append(is_intermediate)
|
||||
|
||||
# board_id of "none" is reserved for images without a board
|
||||
if board_id == "none":
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
elif board_id is not None:
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
query_params.append(board_id)
|
||||
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND (
|
||||
images.metadata LIKE ?
|
||||
OR images.created_at LIKE ?
|
||||
)
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.starred DESC, images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
else:
|
||||
query_pagination = f"""--sql
|
||||
ORDER BY images.created_at {order_dir.value} LIMIT ? OFFSET ?
|
||||
"""
|
||||
|
||||
# Final images query with pagination
|
||||
images_query += query_conditions + query_pagination + ";"
|
||||
# Add all the parameters
|
||||
images_params = query_params.copy()
|
||||
# Add the pagination parameters
|
||||
images_params.extend([limit, offset])
|
||||
|
||||
# Build the list of images, deserializing each row
|
||||
cursor.execute(images_query, images_params)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
|
||||
images = [deserialize_image_record(dict(r)) for r in result]
|
||||
|
||||
# Set up and execute the count query, without pagination
|
||||
count_query += query_conditions + ";"
|
||||
count_params = query_params.copy()
|
||||
cursor.execute(count_query, count_params)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
|
||||
return OffsetPaginatedResults(items=images, offset=offset, limit=limit, total=count)
|
||||
|
||||
def delete(self, image_name: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordDeleteException from e
|
||||
|
||||
def delete_many(self, image_names: list[str]) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
placeholders = ",".join("?" for _ in image_names)
|
||||
|
||||
placeholders = ",".join("?" for _ in image_names)
|
||||
# Construct the SQLite query with the placeholders
|
||||
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
|
||||
|
||||
# Construct the SQLite query with the placeholders
|
||||
query = f"DELETE FROM images WHERE image_name IN ({placeholders})"
|
||||
# Execute the query with the list of IDs as parameters
|
||||
cursor.execute(query, image_names)
|
||||
|
||||
# Execute the query with the list of IDs as parameters
|
||||
cursor.execute(query, image_names)
|
||||
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordDeleteException from e
|
||||
|
||||
def get_intermediates_count(self) -> int:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
self._conn.commit()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*) FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
return count
|
||||
|
||||
def delete_intermediates(self) -> list[str]:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT image_name FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
image_names = [r[0] for r in result]
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
self._conn.commit()
|
||||
return image_names
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT image_name FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
image_names = [r[0] for r in result]
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordDeleteException from e
|
||||
return image_names
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -310,75 +306,165 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO images (
|
||||
image_name,
|
||||
image_origin,
|
||||
image_category,
|
||||
width,
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||
""",
|
||||
(
|
||||
image_name,
|
||||
image_origin.value,
|
||||
image_category.value,
|
||||
width,
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow,
|
||||
),
|
||||
)
|
||||
self._conn.commit()
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO images (
|
||||
image_name,
|
||||
image_origin,
|
||||
image_category,
|
||||
width,
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||
""",
|
||||
(
|
||||
image_name,
|
||||
image_origin.value,
|
||||
image_category.value,
|
||||
width,
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow,
|
||||
),
|
||||
)
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT created_at
|
||||
FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT created_at
|
||||
FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
created_at = datetime.fromisoformat(cursor.fetchone()[0])
|
||||
created_at = datetime.fromisoformat(cursor.fetchone()[0])
|
||||
|
||||
return created_at
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordSaveException from e
|
||||
except sqlite3.Error as e:
|
||||
raise ImageRecordSaveException from e
|
||||
return created_at
|
||||
|
||||
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM images
|
||||
JOIN board_images ON images.image_name = board_images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
AND images.is_intermediate = FALSE
|
||||
ORDER BY images.starred DESC, images.created_at DESC
|
||||
LIMIT 1;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT images.*
|
||||
FROM images
|
||||
JOIN board_images ON images.image_name = board_images.image_name
|
||||
WHERE board_images.board_id = ?
|
||||
AND images.is_intermediate = FALSE
|
||||
ORDER BY images.starred DESC, images.created_at DESC
|
||||
LIMIT 1;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
result = cast(Optional[sqlite3.Row], cursor.fetchone())
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
return deserialize_image_record(dict(result))
|
||||
|
||||
def get_image_names(
|
||||
self,
|
||||
starred_first: bool = True,
|
||||
order_dir: SQLiteDirection = SQLiteDirection.Descending,
|
||||
image_origin: Optional[ResourceOrigin] = None,
|
||||
categories: Optional[list[ImageCategory]] = None,
|
||||
is_intermediate: Optional[bool] = None,
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> ImageNamesResult:
|
||||
with self._db.transaction() as cursor:
|
||||
# Build query conditions (reused for both starred count and image names queries)
|
||||
query_conditions = ""
|
||||
query_params: list[Union[int, str, bool]] = []
|
||||
|
||||
if image_origin is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.image_origin = ?
|
||||
"""
|
||||
query_params.append(image_origin.value)
|
||||
|
||||
if categories is not None:
|
||||
category_strings = [c.value for c in set(categories)]
|
||||
placeholders = ",".join("?" * len(category_strings))
|
||||
query_conditions += f"""--sql
|
||||
AND images.image_category IN ( {placeholders} )
|
||||
"""
|
||||
for c in category_strings:
|
||||
query_params.append(c)
|
||||
|
||||
if is_intermediate is not None:
|
||||
query_conditions += """--sql
|
||||
AND images.is_intermediate = ?
|
||||
"""
|
||||
query_params.append(is_intermediate)
|
||||
|
||||
if board_id == "none":
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
elif board_id is not None:
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
query_params.append(board_id)
|
||||
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND (
|
||||
images.metadata LIKE ?
|
||||
OR images.created_at LIKE ?
|
||||
)
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
|
||||
# Get starred count if starred_first is enabled
|
||||
starred_count = 0
|
||||
if starred_first:
|
||||
starred_count_query = f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE images.starred = TRUE AND (1=1{query_conditions})
|
||||
"""
|
||||
cursor.execute(starred_count_query, query_params)
|
||||
starred_count = cast(int, cursor.fetchone()[0])
|
||||
|
||||
# Get all image names with proper ordering
|
||||
if starred_first:
|
||||
names_query = f"""--sql
|
||||
SELECT images.image_name
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1{query_conditions}
|
||||
ORDER BY images.starred DESC, images.created_at {order_dir.value}
|
||||
"""
|
||||
else:
|
||||
names_query = f"""--sql
|
||||
SELECT images.image_name
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1{query_conditions}
|
||||
ORDER BY images.created_at {order_dir.value}
|
||||
"""
|
||||
|
||||
cursor.execute(names_query, query_params)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
image_names = [row[0] for row in result]
|
||||
|
||||
return ImageNamesResult(image_names=image_names, starred_count=starred_count, total_count=len(image_names))
|
||||
|
||||
@@ -6,6 +6,7 @@ from PIL.Image import Image as PILImageType
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ImageCategory,
|
||||
ImageNamesResult,
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
ResourceOrigin,
|
||||
@@ -125,7 +126,7 @@ class ImageServiceABC(ABC):
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> OffsetPaginatedResults[ImageDTO]:
|
||||
"""Gets a paginated list of image DTOs."""
|
||||
"""Gets a paginated list of image DTOs with starred images first when starred_first=True."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@@ -147,3 +148,17 @@ class ImageServiceABC(ABC):
|
||||
def delete_images_on_board(self, board_id: str):
|
||||
"""Deletes all images on a board."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_image_names(
|
||||
self,
|
||||
starred_first: bool = True,
|
||||
order_dir: SQLiteDirection = SQLiteDirection.Descending,
|
||||
image_origin: Optional[ResourceOrigin] = None,
|
||||
categories: Optional[list[ImageCategory]] = None,
|
||||
is_intermediate: Optional[bool] = None,
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> ImageNamesResult:
|
||||
"""Gets ordered list of image names with metadata for optimistic updates."""
|
||||
pass
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.services.image_records.image_records_common import ImageRecord
|
||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
@@ -39,3 +39,27 @@ def image_record_to_dto(
|
||||
thumbnail_url=thumbnail_url,
|
||||
board_id=board_id,
|
||||
)
|
||||
|
||||
|
||||
class ResultWithAffectedBoards(BaseModel):
|
||||
affected_boards: list[str] = Field(description="The ids of boards affected by the delete operation")
|
||||
|
||||
|
||||
class DeleteImagesResult(ResultWithAffectedBoards):
|
||||
deleted_images: list[str] = Field(description="The names of the images that were deleted")
|
||||
|
||||
|
||||
class StarredImagesResult(ResultWithAffectedBoards):
|
||||
starred_images: list[str] = Field(description="The names of the images that were starred")
|
||||
|
||||
|
||||
class UnstarredImagesResult(ResultWithAffectedBoards):
|
||||
unstarred_images: list[str] = Field(description="The names of the images that were unstarred")
|
||||
|
||||
|
||||
class AddImagesToBoardResult(ResultWithAffectedBoards):
|
||||
added_images: list[str] = Field(description="The image names that were added to the board")
|
||||
|
||||
|
||||
class RemoveImagesFromBoardResult(ResultWithAffectedBoards):
|
||||
removed_images: list[str] = Field(description="The image names that were removed from their board")
|
||||
|
||||
@@ -10,6 +10,7 @@ from invokeai.app.services.image_files.image_files_common import (
|
||||
)
|
||||
from invokeai.app.services.image_records.image_records_common import (
|
||||
ImageCategory,
|
||||
ImageNamesResult,
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
ImageRecordDeleteException,
|
||||
@@ -78,7 +79,7 @@ class ImageService(ImageServiceABC):
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
@@ -309,3 +310,27 @@ class ImageService(ImageServiceABC):
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Problem getting intermediates count")
|
||||
raise e
|
||||
|
||||
def get_image_names(
|
||||
self,
|
||||
starred_first: bool = True,
|
||||
order_dir: SQLiteDirection = SQLiteDirection.Descending,
|
||||
image_origin: Optional[ResourceOrigin] = None,
|
||||
categories: Optional[list[ImageCategory]] = None,
|
||||
is_intermediate: Optional[bool] = None,
|
||||
board_id: Optional[str] = None,
|
||||
search_term: Optional[str] = None,
|
||||
) -> ImageNamesResult:
|
||||
try:
|
||||
return self.__invoker.services.image_records.get_image_names(
|
||||
starred_first=starred_first,
|
||||
order_dir=order_dir,
|
||||
image_origin=image_origin,
|
||||
categories=categories,
|
||||
is_intermediate=is_intermediate,
|
||||
board_id=board_id,
|
||||
search_term=search_term,
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Problem getting image names")
|
||||
raise e
|
||||
|
||||
@@ -51,6 +51,7 @@ from invokeai.backend.model_manager.metadata import (
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant, ModelSourceType
|
||||
from invokeai.backend.model_manager.util.lora_metadata_extractor import apply_lora_metadata
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
@@ -148,7 +149,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def _clear_pending_jobs(self) -> None:
|
||||
for job in self.list_jobs():
|
||||
if not job.in_terminal_state:
|
||||
self._logger.warning("Cancelling job {job.id}")
|
||||
self._logger.warning(f"Cancelling job {job.id}")
|
||||
self.cancel_job(job)
|
||||
while True:
|
||||
try:
|
||||
@@ -667,6 +668,10 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
info = info or self._probe(model_path, config)
|
||||
|
||||
# Apply LoRA metadata if applicable
|
||||
model_images_path = self.app_config.models_path / "model_images"
|
||||
apply_lora_metadata(info, model_path.resolve(), model_images_path)
|
||||
|
||||
model_path = model_path.resolve()
|
||||
|
||||
# Models in the Invoke-managed models dir should use relative paths.
|
||||
|
||||
@@ -78,11 +78,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
self._db = db
|
||||
self._logger = logger
|
||||
|
||||
@property
|
||||
def db(self) -> SqliteDatabase:
|
||||
"""Return the underlying database."""
|
||||
return self._db
|
||||
|
||||
def add_model(self, config: AnyModelConfig) -> AnyModelConfig:
|
||||
"""
|
||||
Add a model to the database.
|
||||
@@ -93,38 +88,33 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
Can raise DuplicateModelException and InvalidModelConfigException exceptions.
|
||||
"""
|
||||
try:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO models (
|
||||
id,
|
||||
config
|
||||
)
|
||||
VALUES (?,?);
|
||||
""",
|
||||
(
|
||||
config.key,
|
||||
config.model_dump_json(),
|
||||
),
|
||||
)
|
||||
self._db.conn.commit()
|
||||
with self._db.transaction() as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO models (
|
||||
id,
|
||||
config
|
||||
)
|
||||
VALUES (?,?);
|
||||
""",
|
||||
(
|
||||
config.key,
|
||||
config.model_dump_json(),
|
||||
),
|
||||
)
|
||||
|
||||
except sqlite3.IntegrityError as e:
|
||||
self._db.conn.rollback()
|
||||
if "UNIQUE constraint failed" in str(e):
|
||||
if "models.path" in str(e):
|
||||
msg = f"A model with path '{config.path}' is already installed"
|
||||
elif "models.name" in str(e):
|
||||
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
|
||||
except sqlite3.IntegrityError as e:
|
||||
if "UNIQUE constraint failed" in str(e):
|
||||
if "models.path" in str(e):
|
||||
msg = f"A model with path '{config.path}' is already installed"
|
||||
elif "models.name" in str(e):
|
||||
msg = f"A model with name='{config.name}', type='{config.type}', base='{config.base}' is already installed"
|
||||
else:
|
||||
msg = f"A model with key '{config.key}' is already installed"
|
||||
raise DuplicateModelException(msg) from e
|
||||
else:
|
||||
msg = f"A model with key '{config.key}' is already installed"
|
||||
raise DuplicateModelException(msg) from e
|
||||
else:
|
||||
raise e
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
raise e
|
||||
|
||||
return self.get_model(config.key)
|
||||
|
||||
@@ -136,8 +126,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
Can raise an UnknownModelException
|
||||
"""
|
||||
try:
|
||||
cursor = self._db.conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM models
|
||||
@@ -147,22 +136,17 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
)
|
||||
if cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
self._db.conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
|
||||
def update_model(self, key: str, changes: ModelRecordChanges) -> AnyModelConfig:
|
||||
record = self.get_model(key)
|
||||
with self._db.transaction() as cursor:
|
||||
record = self.get_model(key)
|
||||
|
||||
# Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic.
|
||||
for field_name in changes.model_fields_set:
|
||||
setattr(record, field_name, getattr(changes, field_name))
|
||||
# Model configs use pydantic's `validate_assignment`, so each change is validated by pydantic.
|
||||
for field_name in changes.model_fields_set:
|
||||
setattr(record, field_name, getattr(changes, field_name))
|
||||
|
||||
json_serialized = record.model_dump_json()
|
||||
json_serialized = record.model_dump_json()
|
||||
|
||||
try:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE models
|
||||
@@ -174,10 +158,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
)
|
||||
if cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
self._db.conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
|
||||
return self.get_model(key)
|
||||
|
||||
@@ -189,30 +169,30 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
Exceptions: UnknownModelException
|
||||
"""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
rows = cursor.fetchone()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
rows = cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
return model
|
||||
|
||||
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
rows = cursor.fetchone()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
rows = cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
@@ -224,15 +204,15 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
:param key: Unique key for the model to be deleted
|
||||
"""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
select count(*) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
select count(*) FROM models
|
||||
WHERE id=?;
|
||||
""",
|
||||
(key,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
return count > 0
|
||||
|
||||
def search_by_attr(
|
||||
@@ -255,43 +235,42 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
If none of the optional filters are passed, will return all
|
||||
models in the database.
|
||||
"""
|
||||
with self._db.transaction() as cursor:
|
||||
assert isinstance(order_by, ModelRecordOrderBy)
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||
ModelRecordOrderBy.Type: "type",
|
||||
ModelRecordOrderBy.Base: "base",
|
||||
ModelRecordOrderBy.Name: "name",
|
||||
ModelRecordOrderBy.Format: "format",
|
||||
}
|
||||
|
||||
assert isinstance(order_by, ModelRecordOrderBy)
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||
ModelRecordOrderBy.Type: "type",
|
||||
ModelRecordOrderBy.Base: "base",
|
||||
ModelRecordOrderBy.Name: "name",
|
||||
ModelRecordOrderBy.Format: "format",
|
||||
}
|
||||
where_clause: list[str] = []
|
||||
bindings: list[str] = []
|
||||
if model_name:
|
||||
where_clause.append("name=?")
|
||||
bindings.append(model_name)
|
||||
if base_model:
|
||||
where_clause.append("base=?")
|
||||
bindings.append(base_model)
|
||||
if model_type:
|
||||
where_clause.append("type=?")
|
||||
bindings.append(model_type)
|
||||
if model_format:
|
||||
where_clause.append("format=?")
|
||||
bindings.append(model_format)
|
||||
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
||||
|
||||
where_clause: list[str] = []
|
||||
bindings: list[str] = []
|
||||
if model_name:
|
||||
where_clause.append("name=?")
|
||||
bindings.append(model_name)
|
||||
if base_model:
|
||||
where_clause.append("base=?")
|
||||
bindings.append(base_model)
|
||||
if model_type:
|
||||
where_clause.append("type=?")
|
||||
bindings.append(model_type)
|
||||
if model_format:
|
||||
where_clause.append("format=?")
|
||||
bindings.append(model_format)
|
||||
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
||||
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config, strftime('%s',updated_at)
|
||||
FROM models
|
||||
{where}
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
||||
""",
|
||||
tuple(bindings),
|
||||
)
|
||||
result = cursor.fetchall()
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config, strftime('%s',updated_at)
|
||||
FROM models
|
||||
{where}
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
||||
""",
|
||||
tuple(bindings),
|
||||
)
|
||||
result = cursor.fetchall()
|
||||
|
||||
# Parse the model configs.
|
||||
results: list[AnyModelConfig] = []
|
||||
@@ -313,69 +292,68 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
|
||||
"""Return models with the indicated path."""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||
return results
|
||||
|
||||
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
|
||||
"""Return models with the indicated hash."""
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in cursor.fetchall()]
|
||||
return results
|
||||
|
||||
def list_models(
|
||||
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||
) -> PaginatedResults[ModelSummary]:
|
||||
"""Return a paginated summary listing of each model in the database."""
|
||||
assert isinstance(order_by, ModelRecordOrderBy)
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||
ModelRecordOrderBy.Type: "type",
|
||||
ModelRecordOrderBy.Base: "base",
|
||||
ModelRecordOrderBy.Name: "name",
|
||||
ModelRecordOrderBy.Format: "format",
|
||||
}
|
||||
with self._db.transaction() as cursor:
|
||||
assert isinstance(order_by, ModelRecordOrderBy)
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||
ModelRecordOrderBy.Type: "type",
|
||||
ModelRecordOrderBy.Base: "base",
|
||||
ModelRecordOrderBy.Name: "name",
|
||||
ModelRecordOrderBy.Format: "format",
|
||||
}
|
||||
|
||||
cursor = self._db.conn.cursor()
|
||||
# Lock so that the database isn't updated while we're doing the two queries.
|
||||
# query1: get the total number of model configs
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
select count(*) from models;
|
||||
""",
|
||||
(),
|
||||
)
|
||||
total = int(cursor.fetchone()[0])
|
||||
|
||||
# Lock so that the database isn't updated while we're doing the two queries.
|
||||
# query1: get the total number of model configs
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
select count(*) from models;
|
||||
""",
|
||||
(),
|
||||
)
|
||||
total = int(cursor.fetchone()[0])
|
||||
|
||||
# query2: fetch key fields
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config
|
||||
FROM models
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||
LIMIT ?
|
||||
OFFSET ?;
|
||||
""",
|
||||
(
|
||||
per_page,
|
||||
page * per_page,
|
||||
),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
# query2: fetch key fields
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config
|
||||
FROM models
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||
LIMIT ?
|
||||
OFFSET ?;
|
||||
""",
|
||||
(
|
||||
per_page,
|
||||
page * per_page,
|
||||
),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
items = [ModelSummary.model_validate(dict(x)) for x in rows]
|
||||
return PaginatedResults(page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
|
||||
ModelRelationshipRecordStorageBase,
|
||||
)
|
||||
@@ -9,58 +7,49 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
class SqliteModelRelationshipRecordStorage(ModelRelationshipRecordStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
if model_key_1 == model_key_2:
|
||||
raise ValueError("Cannot relate a model to itself.")
|
||||
a, b = sorted([model_key_1, model_key_2])
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
if model_key_1 == model_key_2:
|
||||
raise ValueError("Cannot relate a model to itself.")
|
||||
a, b = sorted([model_key_1, model_key_2])
|
||||
cursor.execute(
|
||||
"INSERT OR IGNORE INTO model_relationships (model_key_1, model_key_2) VALUES (?, ?)",
|
||||
(a, b),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
|
||||
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
a, b = sorted([model_key_1, model_key_2])
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
a, b = sorted([model_key_1, model_key_2])
|
||||
cursor.execute(
|
||||
"DELETE FROM model_relationships WHERE model_key_1 = ? AND model_key_2 = ?",
|
||||
(a, b),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
|
||||
def get_related_model_keys(self, model_key: str) -> list[str]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
|
||||
UNION
|
||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
|
||||
""",
|
||||
(model_key, model_key),
|
||||
)
|
||||
return [row[0] for row in cursor.fetchall()]
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
|
||||
UNION
|
||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
|
||||
""",
|
||||
(model_key, model_key),
|
||||
)
|
||||
result = [row[0] for row in cursor.fetchall()]
|
||||
return result
|
||||
|
||||
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
||||
cursor = self._conn.cursor()
|
||||
|
||||
key_list = ",".join("?" for _ in model_keys)
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
|
||||
UNION
|
||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
|
||||
""",
|
||||
model_keys + model_keys,
|
||||
)
|
||||
return [row[0] for row in cursor.fetchall()]
|
||||
with self._db.transaction() as cursor:
|
||||
key_list = ",".join("?" for _ in model_keys)
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
|
||||
UNION
|
||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
|
||||
""",
|
||||
model_keys + model_keys,
|
||||
)
|
||||
result = [row[0] for row in cursor.fetchall()]
|
||||
return result
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import gc
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
from threading import BoundedSemaphore, Thread
|
||||
@@ -439,6 +440,12 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
|
||||
# GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
|
||||
# Most queue items take seconds to execute, so the relative cost of a GC is very small.
|
||||
# Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
|
||||
# allocation is well worth it.
|
||||
gc.collect()
|
||||
|
||||
self._invoker.services.logger.info(
|
||||
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
|
||||
)
|
||||
|
||||
@@ -10,6 +10,8 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
ClearResult,
|
||||
DeleteAllExceptCurrentResult,
|
||||
DeleteByDestinationResult,
|
||||
EnqueueBatchResult,
|
||||
IsEmptyResult,
|
||||
IsFullResult,
|
||||
@@ -17,7 +19,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.app.services.shared.graph import GraphExecutionState
|
||||
@@ -92,6 +93,11 @@ class SessionQueueBase(ABC):
|
||||
"""Cancels a session queue item"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_queue_item(self, item_id: int) -> None:
|
||||
"""Deletes a session queue item"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def fail_queue_item(
|
||||
self, item_id: int, error_type: str, error_message: str, error_traceback: str
|
||||
@@ -109,6 +115,11 @@ class SessionQueueBase(ABC):
|
||||
"""Cancels all queue items with the given batch destination"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
|
||||
"""Deletes all queue items with the given batch destination"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
||||
"""Cancels all queue items with matching queue ID"""
|
||||
@@ -119,6 +130,11 @@ class SessionQueueBase(ABC):
|
||||
"""Cancels all queue items except in-progress items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
|
||||
"""Deletes all queue items except in-progress items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_queue_items(
|
||||
self,
|
||||
@@ -127,10 +143,20 @@ class SessionQueueBase(ABC):
|
||||
priority: int,
|
||||
cursor: Optional[int] = None,
|
||||
status: Optional[QUEUE_ITEM_STATUS] = None,
|
||||
) -> CursorPaginatedResults[SessionQueueItemDTO]:
|
||||
destination: Optional[str] = None,
|
||||
) -> CursorPaginatedResults[SessionQueueItem]:
|
||||
"""Gets a page of session queue items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_all_queue_items(
|
||||
self,
|
||||
queue_id: str,
|
||||
destination: Optional[str] = None,
|
||||
) -> list[SessionQueueItem]:
|
||||
"""Gets all queue items that match the given parameters"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
"""Gets a session queue item by ID"""
|
||||
|
||||
@@ -205,9 +205,10 @@ class FieldIdentifier(BaseModel):
|
||||
kind: Literal["input", "output"] = Field(description="The kind of field")
|
||||
node_id: str = Field(description="The ID of the node")
|
||||
field_name: str = Field(description="The name of the field")
|
||||
user_label: str | None = Field(description="The user label of the field, if any")
|
||||
|
||||
|
||||
class SessionQueueItemWithoutGraph(BaseModel):
|
||||
class SessionQueueItem(BaseModel):
|
||||
"""Session queue item without the full graph. Used for serialization."""
|
||||
|
||||
item_id: int = Field(description="The identifier of the session queue item")
|
||||
@@ -251,42 +252,7 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
default=None,
|
||||
description="The ID of the published workflow associated with this queue item",
|
||||
)
|
||||
api_input_fields: Optional[list[FieldIdentifier]] = Field(
|
||||
default=None, description="The fields that were used as input to the API"
|
||||
)
|
||||
api_output_fields: Optional[list[FieldIdentifier]] = Field(
|
||||
default=None, description="The nodes that were used as output from the API"
|
||||
)
|
||||
credits: Optional[float] = Field(default=None, description="The total credits used for this queue item")
|
||||
|
||||
@classmethod
|
||||
def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO":
|
||||
# must parse these manually
|
||||
queue_item_dict["field_values"] = get_field_values(queue_item_dict)
|
||||
return SessionQueueItemDTO(**queue_item_dict)
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"required": [
|
||||
"item_id",
|
||||
"status",
|
||||
"batch_id",
|
||||
"queue_id",
|
||||
"session_id",
|
||||
"priority",
|
||||
"session_id",
|
||||
"created_at",
|
||||
"updated_at",
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class SessionQueueItemDTO(SessionQueueItemWithoutGraph):
|
||||
pass
|
||||
|
||||
|
||||
class SessionQueueItem(SessionQueueItemWithoutGraph):
|
||||
session: GraphExecutionState = Field(description="The fully-populated session to be executed")
|
||||
workflow: Optional[WorkflowWithoutID] = Field(
|
||||
default=None, description="The workflow associated with this queue item"
|
||||
@@ -366,6 +332,7 @@ class EnqueueBatchResult(BaseModel):
|
||||
requested: int = Field(description="The total number of queue items requested to be enqueued")
|
||||
batch: Batch = Field(description="The batch that was enqueued")
|
||||
priority: int = Field(description="The priority of the enqueued batch")
|
||||
item_ids: list[int] = Field(description="The IDs of the queue items that were enqueued")
|
||||
|
||||
|
||||
class RetryItemsResult(BaseModel):
|
||||
@@ -397,6 +364,18 @@ class CancelByDestinationResult(CancelByBatchIDsResult):
|
||||
pass
|
||||
|
||||
|
||||
class DeleteByDestinationResult(BaseModel):
|
||||
"""Result of deleting by a destination"""
|
||||
|
||||
deleted: int = Field(..., description="Number of queue items deleted")
|
||||
|
||||
|
||||
class DeleteAllExceptCurrentResult(DeleteByDestinationResult):
|
||||
"""Result of deleting all except current"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CancelByQueueIDResult(CancelByBatchIDsResult):
|
||||
"""Result of canceling by queue id"""
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
ClearResult,
|
||||
DeleteAllExceptCurrentResult,
|
||||
DeleteByDestinationResult,
|
||||
EnqueueBatchResult,
|
||||
IsEmptyResult,
|
||||
IsFullResult,
|
||||
@@ -24,7 +26,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueItemNotFoundError,
|
||||
SessionQueueStatus,
|
||||
ValueToInsertTuple,
|
||||
@@ -46,22 +47,17 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
clear_result = self.clear(DEFAULT_QUEUE_ID)
|
||||
if clear_result.deleted > 0:
|
||||
self.__invoker.services.logger.info(f"Cleared all {clear_result.deleted} queue items")
|
||||
else:
|
||||
prune_result = self.prune(DEFAULT_QUEUE_ID)
|
||||
if prune_result.deleted > 0:
|
||||
self.__invoker.services.logger.info(f"Pruned {prune_result.deleted} finished queue items")
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def _set_in_progress_to_canceled(self) -> None:
|
||||
"""
|
||||
Sets all in_progress queue items to canceled. Run on app startup, not associated with any queue.
|
||||
This is necessary because the invoker may have been killed while processing a queue item.
|
||||
"""
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
@@ -69,102 +65,104 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
WHERE status = 'in_progress';
|
||||
"""
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
def _get_current_queue_size(self, queue_id: str) -> int:
|
||||
"""Gets the current number of pending queue items"""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
return cast(int, cursor.fetchone()[0])
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = cast(int, cursor.fetchone()[0])
|
||||
return count
|
||||
|
||||
def _get_highest_priority(self, queue_id: str) -> int:
|
||||
"""Gets the highest priority value in the queue"""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT MAX(priority)
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
return cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT MAX(priority)
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
priority = cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||
return priority
|
||||
|
||||
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
max_new_queue_items = max_queue_size - current_queue_size
|
||||
|
||||
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# TODO: how does this work in a multi-user scenario?
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
max_new_queue_items = max_queue_size - current_queue_size
|
||||
priority = 0
|
||||
if prepend:
|
||||
priority = self._get_highest_priority(queue_id) + 1
|
||||
|
||||
priority = 0
|
||||
if prepend:
|
||||
priority = self._get_highest_priority(queue_id) + 1
|
||||
|
||||
requested_count = calc_session_count(batch)
|
||||
values_to_insert = prepare_values_to_insert(
|
||||
queue_id=queue_id,
|
||||
batch=batch,
|
||||
priority=priority,
|
||||
max_new_queue_items=max_new_queue_items,
|
||||
)
|
||||
enqueued_count = len(values_to_insert)
|
||||
|
||||
if requested_count > enqueued_count:
|
||||
values_to_insert = values_to_insert[:max_new_queue_items]
|
||||
requested_count = await asyncio.to_thread(
|
||||
calc_session_count,
|
||||
batch=batch,
|
||||
)
|
||||
values_to_insert = await asyncio.to_thread(
|
||||
prepare_values_to_insert,
|
||||
queue_id=queue_id,
|
||||
batch=batch,
|
||||
priority=priority,
|
||||
max_new_queue_items=max_new_queue_items,
|
||||
)
|
||||
enqueued_count = len(values_to_insert)
|
||||
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT item_id
|
||||
FROM session_queue
|
||||
WHERE batch_id = ?
|
||||
ORDER BY item_id DESC;
|
||||
""",
|
||||
(batch.batch_id,),
|
||||
)
|
||||
item_ids = [row[0] for row in cursor.fetchall()]
|
||||
enqueue_result = EnqueueBatchResult(
|
||||
queue_id=queue_id,
|
||||
requested=requested_count,
|
||||
enqueued=enqueued_count,
|
||||
batch=batch,
|
||||
priority=priority,
|
||||
item_ids=item_ids,
|
||||
)
|
||||
self.__invoker.services.events.emit_batch_enqueued(enqueue_result)
|
||||
return enqueue_result
|
||||
|
||||
def dequeue(self) -> Optional[SessionQueueItem]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
if result is None:
|
||||
return None
|
||||
queue_item = SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
@@ -172,40 +170,40 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return queue_item
|
||||
|
||||
def get_next(self, queue_id: str) -> Optional[SessionQueueItem]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
created_at ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'pending'
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
created_at ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
if result is None:
|
||||
return None
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def get_current(self, queue_id: str) -> Optional[SessionQueueItem]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'in_progress'
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND status = 'in_progress'
|
||||
LIMIT 1
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
if result is None:
|
||||
return None
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
@@ -218,8 +216,23 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
error_message: Optional[str] = None,
|
||||
error_traceback: Optional[str] = None,
|
||||
) -> SessionQueueItem:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status FROM session_queue WHERE item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||
current_status = row[0]
|
||||
|
||||
# Only update if not already finished (completed, failed or canceled)
|
||||
if current_status in ("completed", "failed", "canceled"):
|
||||
return self.get_queue_item(item_id)
|
||||
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
@@ -228,10 +241,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(status, error_type, error_message, error_traceback, item_id),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
|
||||
@@ -239,35 +249,34 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return queue_item
|
||||
|
||||
def is_empty(self, queue_id: str) -> IsEmptyResult:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
is_empty = cast(int, cursor.fetchone()[0]) == 0
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
is_empty = cast(int, cursor.fetchone()[0]) == 0
|
||||
return IsEmptyResult(is_empty=is_empty)
|
||||
|
||||
def is_full(self, queue_id: str) -> IsFullResult:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
is_full = cast(int, cursor.fetchone()[0]) >= max_queue_size
|
||||
return IsFullResult(is_full=is_full)
|
||||
|
||||
def clear(self, queue_id: str) -> ClearResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*)
|
||||
@@ -285,24 +294,19 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
self.__invoker.services.events.emit_queue_cleared(queue_id)
|
||||
return ClearResult(deleted=count)
|
||||
|
||||
def prune(self, queue_id: str) -> PruneResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND (
|
||||
queue_id = ?
|
||||
AND (
|
||||
status = 'completed'
|
||||
OR status = 'failed'
|
||||
OR status = 'canceled'
|
||||
)
|
||||
)
|
||||
"""
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
@@ -321,16 +325,28 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return PruneResult(deleted=count)
|
||||
|
||||
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
queue_item = self._set_queue_item_status(item_id=item_id, status="canceled")
|
||||
return queue_item
|
||||
|
||||
def delete_queue_item(self, item_id: int) -> None:
|
||||
"""Deletes a session queue item"""
|
||||
try:
|
||||
self.cancel_queue_item(item_id)
|
||||
except SessionQueueItemNotFoundError:
|
||||
pass
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE
|
||||
FROM session_queue
|
||||
WHERE item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
|
||||
def complete_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
queue_item = self._set_queue_item_status(item_id=item_id, status="completed")
|
||||
return queue_item
|
||||
@@ -352,8 +368,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return queue_item
|
||||
|
||||
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
placeholders = ", ".join(["?" for _ in batch_ids])
|
||||
where = f"""--sql
|
||||
@@ -363,6 +378,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
AND status != 'canceled'
|
||||
AND status != 'completed'
|
||||
AND status != 'failed'
|
||||
-- We will cancel the current item separately below - skip it here
|
||||
AND status != 'in_progress'
|
||||
"""
|
||||
params = [queue_id] + batch_ids
|
||||
cursor.execute(
|
||||
@@ -382,17 +399,14 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
tuple(params),
|
||||
)
|
||||
self._conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
|
||||
return CancelByBatchIDsResult(canceled=count)
|
||||
|
||||
def cancel_by_destination(self, queue_id: str, destination: str) -> CancelByDestinationResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
where = """--sql
|
||||
WHERE
|
||||
@@ -401,6 +415,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
AND status != 'canceled'
|
||||
AND status != 'completed'
|
||||
AND status != 'failed'
|
||||
-- We will cancel the current item separately below - skip it here
|
||||
AND status != 'in_progress'
|
||||
"""
|
||||
params = (queue_id, destination)
|
||||
cursor.execute(
|
||||
@@ -420,17 +436,67 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
params,
|
||||
)
|
||||
self._conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
return CancelByDestinationResult(canceled=count)
|
||||
|
||||
def delete_by_destination(self, queue_id: str, destination: str) -> DeleteByDestinationResult:
|
||||
with self._db.transaction() as cursor:
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
if current_queue_item is not None and current_queue_item.destination == destination:
|
||||
self.cancel_queue_item(current_queue_item.item_id)
|
||||
params = (queue_id, destination)
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND destination = ?;
|
||||
""",
|
||||
params,
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND destination = ?;
|
||||
""",
|
||||
params,
|
||||
)
|
||||
return DeleteByDestinationResult(deleted=count)
|
||||
|
||||
def delete_all_except_current(self, queue_id: str) -> DeleteAllExceptCurrentResult:
|
||||
with self._db.transaction() as cursor:
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
AND status == 'pending'
|
||||
"""
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
{where};
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = cursor.fetchone()[0]
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
DELETE
|
||||
FROM session_queue
|
||||
{where};
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
return DeleteAllExceptCurrentResult(deleted=count)
|
||||
|
||||
def cancel_by_queue_id(self, queue_id: str) -> CancelByQueueIDResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
current_queue_item = self.get_current(queue_id)
|
||||
where = """--sql
|
||||
WHERE
|
||||
@@ -438,6 +504,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
AND status != 'canceled'
|
||||
AND status != 'completed'
|
||||
AND status != 'failed'
|
||||
-- We will cancel the current item separately below - skip it here
|
||||
AND status != 'in_progress'
|
||||
"""
|
||||
params = [queue_id]
|
||||
cursor.execute(
|
||||
@@ -457,21 +525,13 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
tuple(params),
|
||||
)
|
||||
self._conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
|
||||
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_id)
|
||||
self.__invoker.services.events.emit_queue_item_status_changed(
|
||||
current_queue_item, batch_status, queue_status
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
|
||||
self._set_queue_item_status(current_queue_item.item_id, "canceled")
|
||||
return CancelByQueueIDResult(canceled=count)
|
||||
|
||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
@@ -494,30 +554,25 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return CancelAllExceptCurrentResult(canceled=count)
|
||||
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT * FROM session_queue
|
||||
WHERE
|
||||
item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT * FROM session_queue
|
||||
WHERE
|
||||
item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
result = cast(Union[sqlite3.Row, None], cursor.fetchone())
|
||||
if result is None:
|
||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
|
||||
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
|
||||
# during execution.
|
||||
@@ -530,10 +585,6 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
""",
|
||||
(session_json, item_id),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return self.get_queue_item(item_id)
|
||||
|
||||
def list_queue_items(
|
||||
@@ -543,53 +594,45 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
priority: int,
|
||||
cursor: Optional[int] = None,
|
||||
status: Optional[QUEUE_ITEM_STATUS] = None,
|
||||
) -> CursorPaginatedResults[SessionQueueItemDTO]:
|
||||
cursor_ = self._conn.cursor()
|
||||
item_id = cursor
|
||||
query = """--sql
|
||||
SELECT item_id,
|
||||
status,
|
||||
priority,
|
||||
field_values,
|
||||
error_type,
|
||||
error_message,
|
||||
error_traceback,
|
||||
created_at,
|
||||
updated_at,
|
||||
completed_at,
|
||||
started_at,
|
||||
session_id,
|
||||
batch_id,
|
||||
queue_id,
|
||||
origin,
|
||||
destination
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
"""
|
||||
params: list[Union[str, int]] = [queue_id]
|
||||
|
||||
if status is not None:
|
||||
query += """--sql
|
||||
AND status = ?
|
||||
"""
|
||||
params.append(status)
|
||||
|
||||
if item_id is not None:
|
||||
query += """--sql
|
||||
AND (priority < ?) OR (priority = ? AND item_id > ?)
|
||||
"""
|
||||
params.extend([priority, priority, item_id])
|
||||
|
||||
query += """--sql
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT ?
|
||||
destination: Optional[str] = None,
|
||||
) -> CursorPaginatedResults[SessionQueueItem]:
|
||||
with self._db.transaction() as cursor_:
|
||||
item_id = cursor
|
||||
query = """--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
"""
|
||||
params.append(limit + 1)
|
||||
cursor_.execute(query, params)
|
||||
results = cast(list[sqlite3.Row], cursor_.fetchall())
|
||||
items = [SessionQueueItemDTO.queue_item_dto_from_dict(dict(result)) for result in results]
|
||||
params: list[Union[str, int]] = [queue_id]
|
||||
|
||||
if status is not None:
|
||||
query += """--sql
|
||||
AND status = ?
|
||||
"""
|
||||
params.append(status)
|
||||
|
||||
if destination is not None:
|
||||
query += """---sql
|
||||
AND destination = ?
|
||||
"""
|
||||
params.append(destination)
|
||||
|
||||
if item_id is not None:
|
||||
query += """--sql
|
||||
AND (priority < ?) OR (priority = ? AND item_id > ?)
|
||||
"""
|
||||
params.extend([priority, priority, item_id])
|
||||
|
||||
query += """--sql
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
LIMIT ?
|
||||
"""
|
||||
params.append(limit + 1)
|
||||
cursor_.execute(query, params)
|
||||
results = cast(list[sqlite3.Row], cursor_.fetchall())
|
||||
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
|
||||
has_more = False
|
||||
if len(items) > limit:
|
||||
# remove the extra item
|
||||
@@ -597,21 +640,52 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
has_more = True
|
||||
return CursorPaginatedResults(items=items, limit=limit, has_more=has_more)
|
||||
|
||||
def list_all_queue_items(
|
||||
self,
|
||||
queue_id: str,
|
||||
destination: Optional[str] = None,
|
||||
) -> list[SessionQueueItem]:
|
||||
"""Gets all queue items that match the given parameters"""
|
||||
with self._db.transaction() as cursor:
|
||||
query = """--sql
|
||||
SELECT *
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
"""
|
||||
params: list[Union[str, int]] = [queue_id]
|
||||
|
||||
if destination is not None:
|
||||
query += """---sql
|
||||
AND destination = ?
|
||||
"""
|
||||
params.append(destination)
|
||||
|
||||
query += """--sql
|
||||
ORDER BY
|
||||
priority DESC,
|
||||
item_id ASC
|
||||
;
|
||||
"""
|
||||
cursor.execute(query, params)
|
||||
results = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
items = [SessionQueueItem.queue_item_from_dict(dict(result)) for result in results]
|
||||
return items
|
||||
|
||||
def get_queue_status(self, queue_id: str) -> SessionQueueStatus:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
|
||||
current_item = self.get_current(queue_id=queue_id)
|
||||
total = sum(row[1] for row in counts_result)
|
||||
total = sum(row[1] or 0 for row in counts_result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
|
||||
return SessionQueueStatus(
|
||||
queue_id=queue_id,
|
||||
@@ -627,20 +701,20 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
|
||||
def get_batch_status(self, queue_id: str, batch_id: str) -> BatchStatus:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*), origin, destination
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND batch_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, batch_id),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
total = sum(row[1] for row in result)
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*), origin, destination
|
||||
FROM session_queue
|
||||
WHERE
|
||||
queue_id = ?
|
||||
AND batch_id = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, batch_id),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
total = sum(row[1] or 0 for row in result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in result}
|
||||
origin = result[0]["origin"] if result else None
|
||||
destination = result[0]["destination"] if result else None
|
||||
@@ -659,20 +733,20 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
|
||||
def get_counts_by_destination(self, queue_id: str, destination: str) -> SessionQueueCountsByDestination:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
AND destination = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, destination),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT status, count(*)
|
||||
FROM session_queue
|
||||
WHERE queue_id = ?
|
||||
AND destination = ?
|
||||
GROUP BY status
|
||||
""",
|
||||
(queue_id, destination),
|
||||
)
|
||||
counts_result = cast(list[sqlite3.Row], cursor.fetchall())
|
||||
|
||||
total = sum(row[1] for row in counts_result)
|
||||
total = sum(row[1] or 0 for row in counts_result)
|
||||
counts: dict[str, int] = {row[0]: row[1] for row in counts_result}
|
||||
|
||||
return SessionQueueCountsByDestination(
|
||||
@@ -688,8 +762,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
|
||||
"""Retries the given queue items"""
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
values_to_insert: list[ValueToInsertTuple] = []
|
||||
retried_item_ids: list[int] = []
|
||||
|
||||
@@ -740,10 +813,6 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
values_to_insert,
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
retry_result = RetryItemsResult(
|
||||
queue_id=queue_id,
|
||||
retried_item_ids=retried_item_ids,
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
from typing import Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
|
||||
from typing import Any, Optional, TypeVar, Union, get_args, get_origin
|
||||
|
||||
import networkx as nx
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
GetCoreSchemaHandler,
|
||||
GetJsonSchemaHandler,
|
||||
ValidationError,
|
||||
@@ -57,17 +58,32 @@ class Edge(BaseModel):
|
||||
|
||||
|
||||
def get_output_field_type(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_outputs = get_type_hints(node_type.get_output_annotation())
|
||||
node_output_field = node_outputs.get(field) or None
|
||||
return node_output_field
|
||||
# TODO(psyche): This is awkward - if field_info is None, it means the field is not defined in the output, which
|
||||
# really should raise. The consumers of this utility expect it to never raise, and return None instead. Fixing this
|
||||
# would require some fairly significant changes and I don't want risk breaking anything.
|
||||
try:
|
||||
invocation_class = type(node)
|
||||
invocation_output_class = invocation_class.get_output_annotation()
|
||||
field_info = invocation_output_class.model_fields.get(field)
|
||||
assert field_info is not None, f"Output field '{field}' not found in {invocation_output_class.get_type()}"
|
||||
output_field_type = field_info.annotation
|
||||
return output_field_type
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_input_field_type(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_inputs = get_type_hints(node_type)
|
||||
node_input_field = node_inputs.get(field) or None
|
||||
return node_input_field
|
||||
# TODO(psyche): This is awkward - if field_info is None, it means the field is not defined in the output, which
|
||||
# really should raise. The consumers of this utility expect it to never raise, and return None instead. Fixing this
|
||||
# would require some fairly significant changes and I don't want risk breaking anything.
|
||||
try:
|
||||
invocation_class = type(node)
|
||||
field_info = invocation_class.model_fields.get(field)
|
||||
assert field_info is not None, f"Input field '{field}' not found in {invocation_class.get_type()}"
|
||||
input_field_type = field_info.annotation
|
||||
return input_field_type
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def is_union_subtype(t1, t2):
|
||||
@@ -787,6 +803,22 @@ class GraphExecutionState(BaseModel):
|
||||
default_factory=dict,
|
||||
)
|
||||
|
||||
model_config = ConfigDict(
|
||||
json_schema_extra={
|
||||
"required": [
|
||||
"id",
|
||||
"graph",
|
||||
"execution_graph",
|
||||
"executed",
|
||||
"executed_history",
|
||||
"results",
|
||||
"errors",
|
||||
"prepared_source_mapping",
|
||||
"source_prepared_mapping",
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
@field_validator("graph")
|
||||
def graph_is_valid(cls, v: Graph):
|
||||
"""Validates that the graph is valid"""
|
||||
@@ -975,10 +1007,11 @@ class GraphExecutionState(BaseModel):
|
||||
new_node_ids = []
|
||||
if isinstance(next_node, CollectInvocation):
|
||||
# Collapse all iterator input mappings and create a single execution node for the collect invocation
|
||||
all_iteration_mappings = list(
|
||||
itertools.chain(*(((s, p) for p in self.source_prepared_mapping[s]) for s in next_node_parents))
|
||||
)
|
||||
# all_iteration_mappings = list(set(itertools.chain(*prepared_parent_mappings)))
|
||||
all_iteration_mappings = []
|
||||
for source_node_id in next_node_parents:
|
||||
prepared_nodes = self.source_prepared_mapping[source_node_id]
|
||||
all_iteration_mappings.extend([(source_node_id, p) for p in prepared_nodes])
|
||||
|
||||
create_results = self._create_execution_node(next_node_id, all_iteration_mappings)
|
||||
if create_results is not None:
|
||||
new_node_ids.extend(create_results)
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import sqlite3
|
||||
import threading
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
|
||||
@@ -26,46 +29,65 @@ class SqliteDatabase:
|
||||
|
||||
def __init__(self, db_path: Path | None, logger: Logger, verbose: bool = False) -> None:
|
||||
"""Initializes the database. This is used internally by the class constructor."""
|
||||
self.logger = logger
|
||||
self.db_path = db_path
|
||||
self.verbose = verbose
|
||||
self._logger = logger
|
||||
self._db_path = db_path
|
||||
self._verbose = verbose
|
||||
self._lock = threading.RLock()
|
||||
|
||||
if not self.db_path:
|
||||
if not self._db_path:
|
||||
logger.info("Initializing in-memory database")
|
||||
else:
|
||||
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.logger.info(f"Initializing database at {self.db_path}")
|
||||
self._db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self._logger.info(f"Initializing database at {self._db_path}")
|
||||
|
||||
self.conn = sqlite3.connect(database=self.db_path or sqlite_memory, check_same_thread=False)
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
self._conn = sqlite3.connect(database=self._db_path or sqlite_memory, check_same_thread=False)
|
||||
self._conn.row_factory = sqlite3.Row
|
||||
|
||||
if self.verbose:
|
||||
self.conn.set_trace_callback(self.logger.debug)
|
||||
if self._verbose:
|
||||
self._conn.set_trace_callback(self._logger.debug)
|
||||
|
||||
# Enable foreign key constraints
|
||||
self.conn.execute("PRAGMA foreign_keys = ON;")
|
||||
self._conn.execute("PRAGMA foreign_keys = ON;")
|
||||
|
||||
# Enable Write-Ahead Logging (WAL) mode for better concurrency
|
||||
self.conn.execute("PRAGMA journal_mode = WAL;")
|
||||
self._conn.execute("PRAGMA journal_mode = WAL;")
|
||||
|
||||
# Set a busy timeout to prevent database lockups during writes
|
||||
self.conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
|
||||
self._conn.execute("PRAGMA busy_timeout = 5000;") # 5 seconds
|
||||
|
||||
def clean(self) -> None:
|
||||
"""
|
||||
Cleans the database by running the VACUUM command, reporting on the freed space.
|
||||
"""
|
||||
# No need to clean in-memory database
|
||||
if not self.db_path:
|
||||
if not self._db_path:
|
||||
return
|
||||
try:
|
||||
initial_db_size = Path(self.db_path).stat().st_size
|
||||
self.conn.execute("VACUUM;")
|
||||
self.conn.commit()
|
||||
final_db_size = Path(self.db_path).stat().st_size
|
||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||
if freed_space_in_mb > 0:
|
||||
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||
with self._conn as conn:
|
||||
initial_db_size = Path(self._db_path).stat().st_size
|
||||
conn.execute("VACUUM;")
|
||||
conn.commit()
|
||||
final_db_size = Path(self._db_path).stat().st_size
|
||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||
if freed_space_in_mb > 0:
|
||||
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error cleaning database: {e}")
|
||||
self._logger.error(f"Error cleaning database: {e}")
|
||||
raise
|
||||
|
||||
@contextmanager
|
||||
def transaction(self) -> Generator[sqlite3.Cursor, None, None]:
|
||||
"""
|
||||
Thread-safe context manager for DB work.
|
||||
Acquires the RLock, yields a Cursor, then commits or rolls back.
|
||||
"""
|
||||
with self._lock:
|
||||
cursor = self._conn.cursor()
|
||||
try:
|
||||
yield cursor
|
||||
self._conn.commit()
|
||||
except:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
@@ -32,7 +32,7 @@ class SqliteMigrator:
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
self._db = db
|
||||
self._logger = db.logger
|
||||
self._logger = db._logger
|
||||
self._migration_set = MigrationSet()
|
||||
self._backup_path: Optional[Path] = None
|
||||
|
||||
@@ -45,7 +45,7 @@ class SqliteMigrator:
|
||||
"""Migrates the database to the latest version."""
|
||||
# This throws if there is a problem.
|
||||
self._migration_set.validate_migration_chain()
|
||||
cursor = self._db.conn.cursor()
|
||||
cursor = self._db._conn.cursor()
|
||||
self._create_migrations_table(cursor=cursor)
|
||||
|
||||
if self._migration_set.count == 0:
|
||||
@@ -59,13 +59,13 @@ class SqliteMigrator:
|
||||
self._logger.info("Database update needed")
|
||||
|
||||
# Make a backup of the db if it needs to be updated and is a file db
|
||||
if self._db.db_path is not None:
|
||||
if self._db._db_path is not None:
|
||||
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
self._backup_path = self._db.db_path.parent / f"{self._db.db_path.stem}_backup_{timestamp}.db"
|
||||
self._backup_path = self._db._db_path.parent / f"{self._db._db_path.stem}_backup_{timestamp}.db"
|
||||
self._logger.info(f"Backing up database to {str(self._backup_path)}")
|
||||
# Use SQLite to do the backup
|
||||
with closing(sqlite3.connect(self._backup_path)) as backup_conn:
|
||||
self._db.conn.backup(backup_conn)
|
||||
self._db._conn.backup(backup_conn)
|
||||
else:
|
||||
self._logger.info("Using in-memory database, no backup needed")
|
||||
|
||||
@@ -81,7 +81,7 @@ class SqliteMigrator:
|
||||
try:
|
||||
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
|
||||
# exception is raised.
|
||||
with self._db.conn as conn:
|
||||
with self._db._conn as conn:
|
||||
cursor = conn.cursor()
|
||||
if self._get_current_version(cursor) != migration.from_version:
|
||||
raise MigrationError(
|
||||
|
||||
@@ -17,7 +17,7 @@ from invokeai.app.util.misc import uuid_string
|
||||
class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
@@ -25,24 +25,23 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
|
||||
def get(self, style_preset_id: str) -> StylePresetRecordDTO:
|
||||
"""Gets a style preset by ID."""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM style_presets
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT *
|
||||
FROM style_presets
|
||||
WHERE id = ?;
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
raise StylePresetNotFoundError(f"Style preset with id {style_preset_id} not found")
|
||||
return StylePresetRecordDTO.from_dict(dict(row))
|
||||
|
||||
def create(self, style_preset: StylePresetWithoutId) -> StylePresetRecordDTO:
|
||||
style_preset_id = uuid_string()
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO style_presets (
|
||||
@@ -60,16 +59,11 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
style_preset.type,
|
||||
),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return self.get(style_preset_id)
|
||||
|
||||
def create_many(self, style_presets: list[StylePresetWithoutId]) -> None:
|
||||
style_preset_ids = []
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
for style_preset in style_presets:
|
||||
style_preset_id = uuid_string()
|
||||
style_preset_ids.append(style_preset_id)
|
||||
@@ -90,16 +84,11 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
style_preset.type,
|
||||
),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
return None
|
||||
|
||||
def update(self, style_preset_id: str, changes: StylePresetChanges) -> StylePresetRecordDTO:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
# Change the name of a style preset
|
||||
if changes.name is not None:
|
||||
cursor.execute(
|
||||
@@ -122,15 +111,10 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
(changes.preset_data.model_dump_json(), style_preset_id),
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return self.get(style_preset_id)
|
||||
|
||||
def delete(self, style_preset_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE from style_presets
|
||||
@@ -138,51 +122,41 @@ class SqliteStylePresetRecordsStorage(StylePresetRecordsStorageBase):
|
||||
""",
|
||||
(style_preset_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return None
|
||||
|
||||
def get_many(self, type: PresetType | None = None) -> list[StylePresetRecordDTO]:
|
||||
main_query = """
|
||||
SELECT
|
||||
*
|
||||
FROM style_presets
|
||||
"""
|
||||
with self._db.transaction() as cursor:
|
||||
main_query = """
|
||||
SELECT
|
||||
*
|
||||
FROM style_presets
|
||||
"""
|
||||
|
||||
if type is not None:
|
||||
main_query += "WHERE type = ? "
|
||||
if type is not None:
|
||||
main_query += "WHERE type = ? "
|
||||
|
||||
main_query += "ORDER BY LOWER(name) ASC"
|
||||
main_query += "ORDER BY LOWER(name) ASC"
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
if type is not None:
|
||||
cursor.execute(main_query, (type,))
|
||||
else:
|
||||
cursor.execute(main_query)
|
||||
if type is not None:
|
||||
cursor.execute(main_query, (type,))
|
||||
else:
|
||||
cursor.execute(main_query)
|
||||
|
||||
rows = cursor.fetchall()
|
||||
rows = cursor.fetchall()
|
||||
style_presets = [StylePresetRecordDTO.from_dict(dict(row)) for row in rows]
|
||||
|
||||
return style_presets
|
||||
|
||||
def _sync_default_style_presets(self) -> None:
|
||||
"""Syncs default style presets to the database. Internal use only."""
|
||||
|
||||
# First delete all existing default style presets
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
# First delete all existing default style presets
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM style_presets
|
||||
WHERE type = "default";
|
||||
"""
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
# Next, parse and create the default style presets
|
||||
with open(Path(__file__).parent / Path("default_style_presets.json"), "r") as file:
|
||||
presets = json.load(file)
|
||||
|
||||
@@ -25,7 +25,7 @@ SQL_TIME_FORMAT = "%Y-%m-%d %H:%M:%f"
|
||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
self._db = db
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
@@ -33,16 +33,16 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
|
||||
def get(self, workflow_id: str) -> WorkflowRecordDTO:
|
||||
"""Gets a workflow by ID. Updates the opened_at column."""
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||
FROM workflow_library
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
SELECT workflow_id, workflow, name, created_at, updated_at, opened_at
|
||||
FROM workflow_library
|
||||
WHERE workflow_id = ?;
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
raise WorkflowNotFoundError(f"Workflow with id {workflow_id} not found")
|
||||
return WorkflowRecordDTO.from_dict(dict(row))
|
||||
@@ -51,9 +51,8 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
if workflow.meta.category is WorkflowCategory.Default:
|
||||
raise ValueError("Default workflows cannot be created via this method")
|
||||
|
||||
try:
|
||||
with self._db.transaction() as cursor:
|
||||
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE INTO workflow_library (
|
||||
@@ -64,18 +63,13 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
""",
|
||||
(workflow_with_id.id, workflow_with_id.model_dump_json()),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return self.get(workflow_with_id.id)
|
||||
|
||||
def update(self, workflow: Workflow) -> WorkflowRecordDTO:
|
||||
if workflow.meta.category is WorkflowCategory.Default:
|
||||
raise ValueError("Default workflows cannot be updated")
|
||||
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
UPDATE workflow_library
|
||||
@@ -84,18 +78,13 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
""",
|
||||
(workflow.model_dump_json(), workflow.id),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return self.get(workflow.id)
|
||||
|
||||
def delete(self, workflow_id: str) -> None:
|
||||
if self.get(workflow_id).workflow.meta.category is WorkflowCategory.Default:
|
||||
raise ValueError("Default workflows cannot be deleted")
|
||||
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
DELETE from workflow_library
|
||||
@@ -103,10 +92,6 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
return None
|
||||
|
||||
def get_many(
|
||||
@@ -121,108 +106,108 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
has_been_opened: Optional[bool] = None,
|
||||
is_published: Optional[bool] = None,
|
||||
) -> PaginatedResults[WorkflowRecordListItemDTO]:
|
||||
# sanitize!
|
||||
assert order_by in WorkflowRecordOrderBy
|
||||
assert direction in SQLiteDirection
|
||||
with self._db.transaction() as cursor:
|
||||
# sanitize!
|
||||
assert order_by in WorkflowRecordOrderBy
|
||||
assert direction in SQLiteDirection
|
||||
|
||||
# We will construct the query dynamically based on the query params
|
||||
# We will construct the query dynamically based on the query params
|
||||
|
||||
# The main query to get the workflows / counts
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
category,
|
||||
name,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
opened_at,
|
||||
tags
|
||||
FROM workflow_library
|
||||
"""
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library"
|
||||
# The main query to get the workflows / counts
|
||||
main_query = """
|
||||
SELECT
|
||||
workflow_id,
|
||||
category,
|
||||
name,
|
||||
description,
|
||||
created_at,
|
||||
updated_at,
|
||||
opened_at,
|
||||
tags
|
||||
FROM workflow_library
|
||||
"""
|
||||
count_query = "SELECT COUNT(*) FROM workflow_library"
|
||||
|
||||
# Start with an empty list of conditions and params
|
||||
conditions: list[str] = []
|
||||
params: list[str | int] = []
|
||||
# Start with an empty list of conditions and params
|
||||
conditions: list[str] = []
|
||||
params: list[str | int] = []
|
||||
|
||||
if categories:
|
||||
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
|
||||
if categories:
|
||||
# Categories is a list of WorkflowCategory enum values, and a single string in the DB
|
||||
|
||||
# Ensure all categories are valid (is this necessary?)
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
# Ensure all categories are valid (is this necessary?)
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
|
||||
# Construct a placeholder string for the number of categories
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
# Construct a placeholder string for the number of categories
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
|
||||
# Construct the condition string & params
|
||||
category_condition = f"category IN ({placeholders})"
|
||||
category_params = [category.value for category in categories]
|
||||
# Construct the condition string & params
|
||||
category_condition = f"category IN ({placeholders})"
|
||||
category_params = [category.value for category in categories]
|
||||
|
||||
conditions.append(category_condition)
|
||||
params.extend(category_params)
|
||||
conditions.append(category_condition)
|
||||
params.extend(category_params)
|
||||
|
||||
if tags:
|
||||
# Tags is a list of strings, and a single string in the DB
|
||||
# The string in the DB has no guaranteed format
|
||||
if tags:
|
||||
# Tags is a list of strings, and a single string in the DB
|
||||
# The string in the DB has no guaranteed format
|
||||
|
||||
# Construct a list of conditions for each tag
|
||||
tags_conditions = ["tags LIKE ?" for _ in tags]
|
||||
tags_conditions_joined = " OR ".join(tags_conditions)
|
||||
tags_condition = f"({tags_conditions_joined})"
|
||||
# Construct a list of conditions for each tag
|
||||
tags_conditions = ["tags LIKE ?" for _ in tags]
|
||||
tags_conditions_joined = " OR ".join(tags_conditions)
|
||||
tags_condition = f"({tags_conditions_joined})"
|
||||
|
||||
# And the params for the tags, case-insensitive
|
||||
tags_params = [f"%{t.strip()}%" for t in tags]
|
||||
# And the params for the tags, case-insensitive
|
||||
tags_params = [f"%{t.strip()}%" for t in tags]
|
||||
|
||||
conditions.append(tags_condition)
|
||||
params.extend(tags_params)
|
||||
conditions.append(tags_condition)
|
||||
params.extend(tags_params)
|
||||
|
||||
if has_been_opened:
|
||||
conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
conditions.append("opened_at IS NULL")
|
||||
if has_been_opened:
|
||||
conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
conditions.append("opened_at IS NULL")
|
||||
|
||||
# Ignore whitespace in the query
|
||||
stripped_query = query.strip() if query else None
|
||||
if stripped_query:
|
||||
# Construct a wildcard query for the name, description, and tags
|
||||
wildcard_query = "%" + stripped_query + "%"
|
||||
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
|
||||
# Ignore whitespace in the query
|
||||
stripped_query = query.strip() if query else None
|
||||
if stripped_query:
|
||||
# Construct a wildcard query for the name, description, and tags
|
||||
wildcard_query = "%" + stripped_query + "%"
|
||||
query_condition = "(name LIKE ? OR description LIKE ? OR tags LIKE ?)"
|
||||
|
||||
conditions.append(query_condition)
|
||||
params.extend([wildcard_query, wildcard_query, wildcard_query])
|
||||
conditions.append(query_condition)
|
||||
params.extend([wildcard_query, wildcard_query, wildcard_query])
|
||||
|
||||
if conditions:
|
||||
# If there are conditions, add a WHERE clause and then join the conditions
|
||||
main_query += " WHERE "
|
||||
count_query += " WHERE "
|
||||
if conditions:
|
||||
# If there are conditions, add a WHERE clause and then join the conditions
|
||||
main_query += " WHERE "
|
||||
count_query += " WHERE "
|
||||
|
||||
all_conditions = " AND ".join(conditions)
|
||||
main_query += all_conditions
|
||||
count_query += all_conditions
|
||||
all_conditions = " AND ".join(conditions)
|
||||
main_query += all_conditions
|
||||
count_query += all_conditions
|
||||
|
||||
# After this point, the query and params differ for the main query and the count query
|
||||
main_params = params.copy()
|
||||
count_params = params.copy()
|
||||
# After this point, the query and params differ for the main query and the count query
|
||||
main_params = params.copy()
|
||||
count_params = params.copy()
|
||||
|
||||
# Main query also gets ORDER BY and LIMIT/OFFSET
|
||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||
# Main query also gets ORDER BY and LIMIT/OFFSET
|
||||
main_query += f" ORDER BY {order_by.value} {direction.value}"
|
||||
|
||||
if per_page:
|
||||
main_query += " LIMIT ? OFFSET ?"
|
||||
main_params.extend([per_page, page * per_page])
|
||||
if per_page:
|
||||
main_query += " LIMIT ? OFFSET ?"
|
||||
main_params.extend([per_page, page * per_page])
|
||||
|
||||
# Put a ring on it
|
||||
main_query += ";"
|
||||
count_query += ";"
|
||||
# Put a ring on it
|
||||
main_query += ";"
|
||||
count_query += ";"
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(main_query, main_params)
|
||||
rows = cursor.fetchall()
|
||||
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
||||
cursor.execute(main_query, main_params)
|
||||
rows = cursor.fetchall()
|
||||
workflows = [WorkflowRecordListItemDTOValidator.validate_python(dict(row)) for row in rows]
|
||||
|
||||
cursor.execute(count_query, count_params)
|
||||
total = cursor.fetchone()[0]
|
||||
cursor.execute(count_query, count_params)
|
||||
total = cursor.fetchone()[0]
|
||||
|
||||
if per_page:
|
||||
pages = total // per_page + (total % per_page > 0)
|
||||
@@ -247,46 +232,46 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
if not tags:
|
||||
return {}
|
||||
|
||||
cursor = self._conn.cursor()
|
||||
result: dict[str, int] = {}
|
||||
# Base conditions for categories and selected tags
|
||||
base_conditions: list[str] = []
|
||||
base_params: list[str | int] = []
|
||||
with self._db.transaction() as cursor:
|
||||
result: dict[str, int] = {}
|
||||
# Base conditions for categories and selected tags
|
||||
base_conditions: list[str] = []
|
||||
base_params: list[str | int] = []
|
||||
|
||||
# Add category conditions
|
||||
if categories:
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
base_conditions.append(f"category IN ({placeholders})")
|
||||
base_params.extend([category.value for category in categories])
|
||||
# Add category conditions
|
||||
if categories:
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
base_conditions.append(f"category IN ({placeholders})")
|
||||
base_params.extend([category.value for category in categories])
|
||||
|
||||
if has_been_opened:
|
||||
base_conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
base_conditions.append("opened_at IS NULL")
|
||||
if has_been_opened:
|
||||
base_conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
base_conditions.append("opened_at IS NULL")
|
||||
|
||||
# For each tag to count, run a separate query
|
||||
for tag in tags:
|
||||
# Start with the base conditions
|
||||
conditions = base_conditions.copy()
|
||||
params = base_params.copy()
|
||||
# For each tag to count, run a separate query
|
||||
for tag in tags:
|
||||
# Start with the base conditions
|
||||
conditions = base_conditions.copy()
|
||||
params = base_params.copy()
|
||||
|
||||
# Add this specific tag condition
|
||||
conditions.append("tags LIKE ?")
|
||||
params.append(f"%{tag.strip()}%")
|
||||
# Add this specific tag condition
|
||||
conditions.append("tags LIKE ?")
|
||||
params.append(f"%{tag.strip()}%")
|
||||
|
||||
# Construct the full query
|
||||
stmt = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM workflow_library
|
||||
"""
|
||||
# Construct the full query
|
||||
stmt = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM workflow_library
|
||||
"""
|
||||
|
||||
if conditions:
|
||||
stmt += " WHERE " + " AND ".join(conditions)
|
||||
if conditions:
|
||||
stmt += " WHERE " + " AND ".join(conditions)
|
||||
|
||||
cursor.execute(stmt, params)
|
||||
count = cursor.fetchone()[0]
|
||||
result[tag] = count
|
||||
cursor.execute(stmt, params)
|
||||
count = cursor.fetchone()[0]
|
||||
result[tag] = count
|
||||
|
||||
return result
|
||||
|
||||
@@ -296,52 +281,51 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
has_been_opened: Optional[bool] = None,
|
||||
is_published: Optional[bool] = None,
|
||||
) -> dict[str, int]:
|
||||
cursor = self._conn.cursor()
|
||||
result: dict[str, int] = {}
|
||||
# Base conditions for categories
|
||||
base_conditions: list[str] = []
|
||||
base_params: list[str | int] = []
|
||||
with self._db.transaction() as cursor:
|
||||
result: dict[str, int] = {}
|
||||
# Base conditions for categories
|
||||
base_conditions: list[str] = []
|
||||
base_params: list[str | int] = []
|
||||
|
||||
# Add category conditions
|
||||
if categories:
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
base_conditions.append(f"category IN ({placeholders})")
|
||||
base_params.extend([category.value for category in categories])
|
||||
# Add category conditions
|
||||
if categories:
|
||||
assert all(c in WorkflowCategory for c in categories)
|
||||
placeholders = ", ".join("?" for _ in categories)
|
||||
base_conditions.append(f"category IN ({placeholders})")
|
||||
base_params.extend([category.value for category in categories])
|
||||
|
||||
if has_been_opened:
|
||||
base_conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
base_conditions.append("opened_at IS NULL")
|
||||
if has_been_opened:
|
||||
base_conditions.append("opened_at IS NOT NULL")
|
||||
elif has_been_opened is False:
|
||||
base_conditions.append("opened_at IS NULL")
|
||||
|
||||
# For each category to count, run a separate query
|
||||
for category in categories:
|
||||
# Start with the base conditions
|
||||
conditions = base_conditions.copy()
|
||||
params = base_params.copy()
|
||||
# For each category to count, run a separate query
|
||||
for category in categories:
|
||||
# Start with the base conditions
|
||||
conditions = base_conditions.copy()
|
||||
params = base_params.copy()
|
||||
|
||||
# Add this specific category condition
|
||||
conditions.append("category = ?")
|
||||
params.append(category.value)
|
||||
# Add this specific category condition
|
||||
conditions.append("category = ?")
|
||||
params.append(category.value)
|
||||
|
||||
# Construct the full query
|
||||
stmt = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM workflow_library
|
||||
"""
|
||||
# Construct the full query
|
||||
stmt = """--sql
|
||||
SELECT COUNT(*)
|
||||
FROM workflow_library
|
||||
"""
|
||||
|
||||
if conditions:
|
||||
stmt += " WHERE " + " AND ".join(conditions)
|
||||
if conditions:
|
||||
stmt += " WHERE " + " AND ".join(conditions)
|
||||
|
||||
cursor.execute(stmt, params)
|
||||
count = cursor.fetchone()[0]
|
||||
result[category.value] = count
|
||||
cursor.execute(stmt, params)
|
||||
count = cursor.fetchone()[0]
|
||||
result[category.value] = count
|
||||
|
||||
return result
|
||||
|
||||
def update_opened_at(self, workflow_id: str) -> None:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE workflow_library
|
||||
@@ -350,10 +334,6 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
""",
|
||||
(workflow_id,),
|
||||
)
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
def _sync_default_workflows(self) -> None:
|
||||
"""Syncs default workflows to the database. Internal use only."""
|
||||
@@ -368,8 +348,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
meaningless, as they are overwritten every time the server starts.
|
||||
"""
|
||||
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
with self._db.transaction() as cursor:
|
||||
workflows_from_file: list[Workflow] = []
|
||||
workflows_to_update: list[Workflow] = []
|
||||
workflows_to_add: list[Workflow] = []
|
||||
@@ -449,8 +428,3 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
""",
|
||||
(w.model_dump_json(), w.id),
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
|
||||
@@ -123,7 +123,11 @@ def calc_percentage(intermediate_state: PipelineIntermediateState) -> float:
|
||||
if total_steps == 0:
|
||||
return 0.0
|
||||
if order == 2:
|
||||
return floor(step / 2) / floor(total_steps / 2)
|
||||
# Prevent division by zero when total_steps is 1 or 2
|
||||
denominator = floor(total_steps / 2)
|
||||
if denominator == 0:
|
||||
return 0.0
|
||||
return floor(step / 2) / denominator
|
||||
# order == 1
|
||||
return step / total_steps
|
||||
|
||||
|
||||
@@ -30,8 +30,11 @@ def denoise(
|
||||
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension],
|
||||
pos_ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
neg_ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
# extra img tokens
|
||||
# extra img tokens (channel-wise)
|
||||
img_cond: torch.Tensor | None,
|
||||
# extra img tokens (sequence-wise) - for Kontext conditioning
|
||||
img_cond_seq: torch.Tensor | None = None,
|
||||
img_cond_seq_ids: torch.Tensor | None = None,
|
||||
):
|
||||
# step 0 is the initial state
|
||||
total_steps = len(timesteps) - 1
|
||||
@@ -46,6 +49,10 @@ def denoise(
|
||||
)
|
||||
# guidance_vec is ignored for schnell.
|
||||
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
||||
|
||||
# Store original sequence length for slicing predictions
|
||||
original_seq_len = img.shape[1]
|
||||
|
||||
for step_index, (t_curr, t_prev) in tqdm(list(enumerate(zip(timesteps[:-1], timesteps[1:], strict=True)))):
|
||||
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
||||
|
||||
@@ -71,10 +78,26 @@ def denoise(
|
||||
# controlnet_residuals datastructure is efficient in that it likely contains multiple references to the same
|
||||
# tensors. Calculating the sum materializes each tensor into its own instance.
|
||||
merged_controlnet_residuals = sum_controlnet_flux_outputs(controlnet_residuals)
|
||||
pred_img = torch.cat((img, img_cond), dim=-1) if img_cond is not None else img
|
||||
|
||||
# Prepare input for model - concatenate fresh each step
|
||||
img_input = img
|
||||
img_input_ids = img_ids
|
||||
|
||||
# Add channel-wise conditioning (for ControlNet, FLUX Fill, etc.)
|
||||
if img_cond is not None:
|
||||
img_input = torch.cat((img_input, img_cond), dim=-1)
|
||||
|
||||
# Add sequence-wise conditioning (for Kontext)
|
||||
if img_cond_seq is not None:
|
||||
assert img_cond_seq_ids is not None, (
|
||||
"You need to provide either both or neither of the sequence conditioning"
|
||||
)
|
||||
img_input = torch.cat((img_input, img_cond_seq), dim=1)
|
||||
img_input_ids = torch.cat((img_input_ids, img_cond_seq_ids), dim=1)
|
||||
|
||||
pred = model(
|
||||
img=pred_img,
|
||||
img_ids=img_ids,
|
||||
img=img_input,
|
||||
img_ids=img_input_ids,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
@@ -88,6 +111,10 @@ def denoise(
|
||||
regional_prompting_extension=pos_regional_prompting_extension,
|
||||
)
|
||||
|
||||
# Slice prediction to only include the main image tokens
|
||||
if img_input_ids is not None:
|
||||
pred = pred[:, :original_seq_len]
|
||||
|
||||
step_cfg_scale = cfg_scale[step_index]
|
||||
|
||||
# If step_cfg_scale, is 1.0, then we don't need to run the negative prediction.
|
||||
|
||||
149
invokeai/backend/flux/extensions/kontext_extension.py
Normal file
149
invokeai/backend/flux/extensions/kontext_extension.py
Normal file
@@ -0,0 +1,149 @@
|
||||
import einops
|
||||
import numpy as np
|
||||
import torch
|
||||
from einops import repeat
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.fields import FluxKontextConditioningField
|
||||
from invokeai.app.invocations.flux_vae_encode import FluxVaeEncodeInvocation
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.sampling_utils import pack
|
||||
from invokeai.backend.flux.util import PREFERED_KONTEXT_RESOLUTIONS
|
||||
|
||||
|
||||
def generate_img_ids_with_offset(
|
||||
latent_height: int,
|
||||
latent_width: int,
|
||||
batch_size: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
idx_offset: int = 0,
|
||||
) -> torch.Tensor:
|
||||
"""Generate tensor of image position ids with an optional offset.
|
||||
|
||||
Args:
|
||||
latent_height (int): Height of image in latent space (after packing, this becomes h//2).
|
||||
latent_width (int): Width of image in latent space (after packing, this becomes w//2).
|
||||
batch_size (int): Number of images in the batch.
|
||||
device (torch.device): Device to create tensors on.
|
||||
dtype (torch.dtype): Data type for the tensors.
|
||||
idx_offset (int): Offset to add to the first dimension of the image ids.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Image position ids with shape [batch_size, (latent_height//2 * latent_width//2), 3].
|
||||
"""
|
||||
|
||||
if device.type == "mps":
|
||||
orig_dtype = dtype
|
||||
dtype = torch.float16
|
||||
|
||||
# After packing, the spatial dimensions are halved due to the 2x2 patch structure
|
||||
packed_height = latent_height // 2
|
||||
packed_width = latent_width // 2
|
||||
|
||||
# Create base tensor for position IDs with shape [packed_height, packed_width, 3]
|
||||
# The 3 channels represent: [batch_offset, y_position, x_position]
|
||||
img_ids = torch.zeros(packed_height, packed_width, 3, device=device, dtype=dtype)
|
||||
|
||||
# Set the batch offset for all positions
|
||||
img_ids[..., 0] = idx_offset
|
||||
|
||||
# Create y-coordinate indices (vertical positions)
|
||||
y_indices = torch.arange(packed_height, device=device, dtype=dtype)
|
||||
# Broadcast y_indices to match the spatial dimensions [packed_height, 1]
|
||||
img_ids[..., 1] = y_indices[:, None]
|
||||
|
||||
# Create x-coordinate indices (horizontal positions)
|
||||
x_indices = torch.arange(packed_width, device=device, dtype=dtype)
|
||||
# Broadcast x_indices to match the spatial dimensions [1, packed_width]
|
||||
img_ids[..., 2] = x_indices[None, :]
|
||||
|
||||
# Expand to include batch dimension: [batch_size, (packed_height * packed_width), 3]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
|
||||
|
||||
if device.type == "mps":
|
||||
img_ids = img_ids.to(orig_dtype)
|
||||
|
||||
return img_ids
|
||||
|
||||
|
||||
class KontextExtension:
|
||||
"""Applies FLUX Kontext (reference image) conditioning."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
kontext_conditioning: FluxKontextConditioningField,
|
||||
context: InvocationContext,
|
||||
vae_field: VAEField,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
):
|
||||
"""
|
||||
Initializes the KontextExtension, pre-processing the reference image
|
||||
into latents and positional IDs.
|
||||
"""
|
||||
self._context = context
|
||||
self._device = device
|
||||
self._dtype = dtype
|
||||
self._vae_field = vae_field
|
||||
self.kontext_conditioning = kontext_conditioning
|
||||
|
||||
# Pre-process and cache the kontext latents and ids upon initialization.
|
||||
self.kontext_latents, self.kontext_ids = self._prepare_kontext()
|
||||
|
||||
def _prepare_kontext(self) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Encodes the reference image and prepares its latents and IDs."""
|
||||
image = self._context.images.get_pil(self.kontext_conditioning.image.image_name)
|
||||
|
||||
# Calculate aspect ratio of input image
|
||||
width, height = image.size
|
||||
aspect_ratio = width / height
|
||||
|
||||
# Find the closest preferred resolution by aspect ratio
|
||||
_, target_width, target_height = min(
|
||||
((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS), key=lambda x: x[0]
|
||||
)
|
||||
|
||||
# Apply BFL's scaling formula
|
||||
# This ensures compatibility with the model's training
|
||||
scaled_width = 2 * int(target_width / 16)
|
||||
scaled_height = 2 * int(target_height / 16)
|
||||
|
||||
# Resize to the exact resolution used during training
|
||||
image = image.convert("RGB")
|
||||
final_width = 8 * scaled_width
|
||||
final_height = 8 * scaled_height
|
||||
image = image.resize((final_width, final_height), Image.Resampling.LANCZOS)
|
||||
|
||||
# Convert to tensor with same normalization as BFL
|
||||
image_np = np.array(image)
|
||||
image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0
|
||||
image_tensor = einops.rearrange(image_tensor, "h w c -> 1 c h w")
|
||||
image_tensor = image_tensor.to(self._device)
|
||||
|
||||
# Continue with VAE encoding
|
||||
vae_info = self._context.models.load(self._vae_field.vae)
|
||||
kontext_latents_unpacked = FluxVaeEncodeInvocation.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
# Extract tensor dimensions
|
||||
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||
|
||||
# Pack the latents and generate IDs
|
||||
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
||||
kontext_ids = generate_img_ids_with_offset(
|
||||
latent_height=latent_height,
|
||||
latent_width=latent_width,
|
||||
batch_size=batch_size,
|
||||
device=self._device,
|
||||
dtype=self._dtype,
|
||||
idx_offset=1,
|
||||
)
|
||||
|
||||
return kontext_latents_packed, kontext_ids
|
||||
|
||||
def ensure_batch_size(self, target_batch_size: int) -> None:
|
||||
"""Ensures the kontext latents and IDs match the target batch size by repeating if necessary."""
|
||||
if self.kontext_latents.shape[0] != target_batch_size:
|
||||
self.kontext_latents = self.kontext_latents.repeat(target_batch_size, 1, 1)
|
||||
self.kontext_ids = self.kontext_ids.repeat(target_batch_size, 1, 1)
|
||||
@@ -174,11 +174,13 @@ def generate_img_ids(h: int, w: int, batch_size: int, device: torch.device, dtyp
|
||||
dtype = torch.float16
|
||||
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3, device=device, dtype=dtype)
|
||||
# Set batch offset to 0 for main image tokens
|
||||
img_ids[..., 0] = 0
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=device, dtype=dtype)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=device, dtype=dtype)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=batch_size)
|
||||
|
||||
if device.type == "mps":
|
||||
img_ids.to(orig_dtype)
|
||||
img_ids = img_ids.to(orig_dtype)
|
||||
|
||||
return img_ids
|
||||
|
||||
@@ -18,6 +18,29 @@ class ModelSpec:
|
||||
repo_ae: str | None
|
||||
|
||||
|
||||
# Preferred resolutions for Kontext models to avoid tiling artifacts
|
||||
# These are the specific resolutions the model was trained on
|
||||
PREFERED_KONTEXT_RESOLUTIONS = [
|
||||
(672, 1568),
|
||||
(688, 1504),
|
||||
(720, 1456),
|
||||
(752, 1392),
|
||||
(800, 1328),
|
||||
(832, 1248),
|
||||
(880, 1184),
|
||||
(944, 1104),
|
||||
(1024, 1024),
|
||||
(1104, 944),
|
||||
(1184, 880),
|
||||
(1248, 832),
|
||||
(1328, 800),
|
||||
(1392, 752),
|
||||
(1456, 720),
|
||||
(1504, 688),
|
||||
(1568, 672),
|
||||
]
|
||||
|
||||
|
||||
max_seq_lengths: Dict[str, Literal[256, 512]] = {
|
||||
"flux-dev": 512,
|
||||
"flux-dev-fill": 512,
|
||||
|
||||
@@ -42,4 +42,5 @@ IP-Adapters:
|
||||
- [InvokeAI/ip_adapter_plus_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_sd15)
|
||||
- [InvokeAI/ip_adapter_plus_face_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15)
|
||||
- [InvokeAI/ip_adapter_sdxl](https://huggingface.co/InvokeAI/ip_adapter_sdxl)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip-adapter-plus_sdxl_vit-h](https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h)
|
||||
@@ -37,6 +37,7 @@ from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.model_hash.hash_validator import validate_hash
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
|
||||
from invokeai.backend.model_manager.model_on_disk import ModelOnDisk
|
||||
from invokeai.backend.model_manager.omi import flux_dev_1_lora, stable_diffusion_xl_1_lora
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
@@ -186,7 +187,7 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
else:
|
||||
return config_cls.from_model_on_disk(mod, **overrides)
|
||||
|
||||
raise InvalidModelConfigException("No valid config found")
|
||||
raise InvalidModelConfigException("Unable to determine model type")
|
||||
|
||||
@classmethod
|
||||
def get_tag(cls) -> Tag:
|
||||
@@ -296,7 +297,7 @@ class LoRAConfigBase(ABC, BaseModel):
|
||||
from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict
|
||||
|
||||
sd = mod.load_state_dict(mod.path)
|
||||
value = flux_format_from_state_dict(sd)
|
||||
value = flux_format_from_state_dict(sd, mod.metadata())
|
||||
mod.cache[key] = value
|
||||
return value
|
||||
|
||||
@@ -334,6 +335,36 @@ class T5EncoderBnbQuantizedLlmInt8bConfig(T5EncoderConfigBase, LegacyProbeMixin,
|
||||
format: Literal[ModelFormat.BnbQuantizedLlmInt8b] = ModelFormat.BnbQuantizedLlmInt8b
|
||||
|
||||
|
||||
class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase):
|
||||
format: Literal[ModelFormat.OMI] = ModelFormat.OMI
|
||||
|
||||
@classmethod
|
||||
def matches(cls, mod: ModelOnDisk) -> bool:
|
||||
if mod.path.is_dir():
|
||||
return False
|
||||
|
||||
metadata = mod.metadata()
|
||||
return (
|
||||
metadata.get("modelspec.sai_model_spec")
|
||||
and metadata.get("ot_branch") == "omi_format"
|
||||
and metadata["modelspec.architecture"].split("/")[1].lower() == "lora"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
|
||||
metadata = mod.metadata()
|
||||
architecture = metadata["modelspec.architecture"]
|
||||
|
||||
if architecture == stable_diffusion_xl_1_lora:
|
||||
base = BaseModelType.StableDiffusionXL
|
||||
elif architecture == flux_dev_1_lora:
|
||||
base = BaseModelType.Flux
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unrecognised/unsupported architecture for OMI LoRA: {architecture}")
|
||||
|
||||
return {"base": base}
|
||||
|
||||
|
||||
class LoRALyCORISConfig(LoRAConfigBase, ModelConfigBase):
|
||||
"""Model config for LoRA/Lycoris models."""
|
||||
|
||||
@@ -350,7 +381,7 @@ class LoRALyCORISConfig(LoRAConfigBase, ModelConfigBase):
|
||||
|
||||
state_dict = mod.load_state_dict()
|
||||
for key in state_dict.keys():
|
||||
if type(key) is int:
|
||||
if isinstance(key, int):
|
||||
continue
|
||||
|
||||
if key.startswith(("lora_te_", "lora_unet_", "lora_te1_", "lora_te2_", "lora_transformer_")):
|
||||
@@ -668,6 +699,7 @@ AnyModelConfig = Annotated[
|
||||
Annotated[ControlNetDiffusersConfig, ControlNetDiffusersConfig.get_tag()],
|
||||
Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()],
|
||||
Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()],
|
||||
Annotated[LoRAOmiConfig, LoRAOmiConfig.get_tag()],
|
||||
Annotated[ControlLoRALyCORISConfig, ControlLoRALyCORISConfig.get_tag()],
|
||||
Annotated[ControlLoRADiffusersConfig, ControlLoRADiffusersConfig.get_tag()],
|
||||
Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()],
|
||||
|
||||
@@ -7,7 +7,14 @@ from typing import Optional
|
||||
import accelerate
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
from transformers import AutoConfig, AutoModelForTextEncoding, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForTextEncoding,
|
||||
CLIPTextModel,
|
||||
CLIPTokenizer,
|
||||
T5EncoderModel,
|
||||
T5TokenizerFast,
|
||||
)
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.flux.controlnet.instantx_controlnet_flux import InstantXControlNetFlux
|
||||
@@ -139,7 +146,7 @@ class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
|
||||
)
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
te2_model_path = Path(config.path) / "text_encoder_2"
|
||||
model_config = AutoConfig.from_pretrained(te2_model_path)
|
||||
@@ -183,7 +190,7 @@ class T5EncoderCheckpointModel(ModelLoader):
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
return T5TokenizerFast.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
return T5EncoderModel.from_pretrained(
|
||||
Path(config.path) / "text_encoder_2", torch_dtype="auto", low_cpu_mem_usage=True
|
||||
|
||||
@@ -13,6 +13,7 @@ from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache import ModelCache
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.model_manager.omi.omi import convert_from_omi
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyModel,
|
||||
BaseModelType,
|
||||
@@ -20,6 +21,10 @@ from invokeai.backend.model_manager.taxonomy import (
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
lora_model_from_flux_aitoolkit_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
|
||||
is_state_dict_likely_flux_control,
|
||||
lora_model_from_flux_control_state_dict,
|
||||
@@ -39,6 +44,8 @@ from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import l
|
||||
from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.LoRA, format=ModelFormat.OMI)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.LoRA, format=ModelFormat.OMI)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.LyCORIS)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.ControlLoRa, format=ModelFormat.LyCORIS)
|
||||
@@ -73,12 +80,23 @@ class LoRALoader(ModelLoader):
|
||||
else:
|
||||
state_dict = torch.load(model_path, map_location="cpu")
|
||||
|
||||
# Strip 'bundle_emb' keys - these are unused and currently cause downstream errors.
|
||||
# To revisit later to determine if they're needed/useful.
|
||||
state_dict = {k: v for k, v in state_dict.items() if not k.startswith("bundle_emb")}
|
||||
|
||||
# At the time of writing, we support the OMI standard for base models Flux and SDXL
|
||||
if config.format == ModelFormat.OMI and self._model_base in [
|
||||
BaseModelType.StableDiffusionXL,
|
||||
BaseModelType.Flux,
|
||||
]:
|
||||
state_dict = convert_from_omi(state_dict, config.base) # type: ignore
|
||||
|
||||
# Apply state_dict key conversions, if necessary.
|
||||
if self._model_base == BaseModelType.StableDiffusionXL:
|
||||
state_dict = convert_sdxl_keys_to_diffusers_format(state_dict)
|
||||
model = lora_model_from_sd_state_dict(state_dict=state_dict)
|
||||
elif self._model_base == BaseModelType.Flux:
|
||||
if config.format == ModelFormat.Diffusers:
|
||||
if config.format in [ModelFormat.Diffusers, ModelFormat.OMI]:
|
||||
# HACK(ryand): We set alpha=None for diffusers PEFT format models. These models are typically
|
||||
# distributed as a single file without the associated metadata containing the alpha value. We chose
|
||||
# alpha=None, because this is treated as alpha=rank internally in `LoRALayerBase.scale()`. alpha=rank
|
||||
@@ -92,8 +110,10 @@ class LoRALoader(ModelLoader):
|
||||
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_flux_control(state_dict=state_dict):
|
||||
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict=state_dict):
|
||||
model = lora_model_from_flux_aitoolkit_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
raise ValueError("LoRA model is in unsupported FLUX format")
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:
|
||||
|
||||
7
invokeai/backend/model_manager/omi/__init__.py
Normal file
7
invokeai/backend/model_manager/omi/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from invokeai.backend.model_manager.omi.omi import convert_from_omi
|
||||
from invokeai.backend.model_manager.omi.vendor.model_spec.architecture import (
|
||||
flux_dev_1_lora,
|
||||
stable_diffusion_xl_1_lora,
|
||||
)
|
||||
|
||||
__all__ = ["flux_dev_1_lora", "stable_diffusion_xl_1_lora", "convert_from_omi"]
|
||||
21
invokeai/backend/model_manager/omi/omi.py
Normal file
21
invokeai/backend/model_manager/omi/omi.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from invokeai.backend.model_manager.model_on_disk import StateDict
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora import (
|
||||
convert_flux_lora as omi_flux,
|
||||
)
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora import (
|
||||
convert_lora_util as lora_util,
|
||||
)
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora import (
|
||||
convert_sdxl_lora as omi_sdxl,
|
||||
)
|
||||
from invokeai.backend.model_manager.taxonomy import BaseModelType
|
||||
|
||||
|
||||
def convert_from_omi(weights_sd: StateDict, base: BaseModelType):
|
||||
keyset = {
|
||||
BaseModelType.Flux: omi_flux.convert_flux_lora_key_sets(),
|
||||
BaseModelType.StableDiffusionXL: omi_sdxl.convert_sdxl_lora_key_sets(),
|
||||
}[base]
|
||||
source = "omi"
|
||||
target = "legacy_diffusers"
|
||||
return lora_util.__convert(weights_sd, keyset, source, target) # type: ignore
|
||||
0
invokeai/backend/model_manager/omi/vendor/__init__.py
vendored
Normal file
0
invokeai/backend/model_manager/omi/vendor/__init__.py
vendored
Normal file
0
invokeai/backend/model_manager/omi/vendor/convert/__init__.py
vendored
Normal file
0
invokeai/backend/model_manager/omi/vendor/convert/__init__.py
vendored
Normal file
0
invokeai/backend/model_manager/omi/vendor/convert/lora/__init__.py
vendored
Normal file
0
invokeai/backend/model_manager/omi/vendor/convert/lora/__init__.py
vendored
Normal file
20
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_clip.py
vendored
Normal file
20
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_clip.py
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
|
||||
LoraConversionKeySet,
|
||||
map_prefix_range,
|
||||
)
|
||||
|
||||
|
||||
def map_clip(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("text_projection", "text_projection", parent=key_prefix)]
|
||||
|
||||
for k in map_prefix_range("text_model.encoder.layers", "text_model.encoder.layers", parent=key_prefix):
|
||||
keys += [LoraConversionKeySet("mlp.fc1", "mlp.fc1", parent=k)]
|
||||
keys += [LoraConversionKeySet("mlp.fc2", "mlp.fc2", parent=k)]
|
||||
keys += [LoraConversionKeySet("self_attn.k_proj", "self_attn.k_proj", parent=k)]
|
||||
keys += [LoraConversionKeySet("self_attn.out_proj", "self_attn.out_proj", parent=k)]
|
||||
keys += [LoraConversionKeySet("self_attn.q_proj", "self_attn.q_proj", parent=k)]
|
||||
keys += [LoraConversionKeySet("self_attn.v_proj", "self_attn.v_proj", parent=k)]
|
||||
|
||||
return keys
|
||||
84
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_flux_lora.py
vendored
Normal file
84
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_flux_lora.py
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_clip import map_clip
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
|
||||
LoraConversionKeySet,
|
||||
map_prefix_range,
|
||||
)
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_t5 import map_t5
|
||||
|
||||
|
||||
def __map_double_transformer_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("img_attn.qkv.0", "attn.to_q", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("img_attn.qkv.1", "attn.to_k", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("img_attn.qkv.2", "attn.to_v", parent=key_prefix)]
|
||||
|
||||
keys += [LoraConversionKeySet("txt_attn.qkv.0", "attn.add_q_proj", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("txt_attn.qkv.1", "attn.add_k_proj", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("txt_attn.qkv.2", "attn.add_v_proj", parent=key_prefix)]
|
||||
|
||||
keys += [LoraConversionKeySet("img_attn.proj", "attn.to_out.0", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("img_mlp.0", "ff.net.0.proj", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("img_mlp.2", "ff.net.2", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("img_mod.lin", "norm1.linear", parent=key_prefix)]
|
||||
|
||||
keys += [LoraConversionKeySet("txt_attn.proj", "attn.to_add_out", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("txt_mlp.0", "ff_context.net.0.proj", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("txt_mlp.2", "ff_context.net.2", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("txt_mod.lin", "norm1_context.linear", parent=key_prefix)]
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_single_transformer_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("linear1.0", "attn.to_q", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("linear1.1", "attn.to_k", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("linear1.2", "attn.to_v", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("linear1.3", "proj_mlp", parent=key_prefix)]
|
||||
|
||||
keys += [LoraConversionKeySet("linear2", "proj_out", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("modulation.lin", "norm.linear", parent=key_prefix)]
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_transformer(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("txt_in", "context_embedder", parent=key_prefix)]
|
||||
keys += [
|
||||
LoraConversionKeySet("final_layer.adaLN_modulation.1", "norm_out.linear", parent=key_prefix, swap_chunks=True)
|
||||
]
|
||||
keys += [LoraConversionKeySet("final_layer.linear", "proj_out", parent=key_prefix)]
|
||||
keys += [
|
||||
LoraConversionKeySet("guidance_in.in_layer", "time_text_embed.guidance_embedder.linear_1", parent=key_prefix)
|
||||
]
|
||||
keys += [
|
||||
LoraConversionKeySet("guidance_in.out_layer", "time_text_embed.guidance_embedder.linear_2", parent=key_prefix)
|
||||
]
|
||||
keys += [LoraConversionKeySet("vector_in.in_layer", "time_text_embed.text_embedder.linear_1", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("vector_in.out_layer", "time_text_embed.text_embedder.linear_2", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("time_in.in_layer", "time_text_embed.timestep_embedder.linear_1", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("time_in.out_layer", "time_text_embed.timestep_embedder.linear_2", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("img_in.proj", "x_embedder", parent=key_prefix)]
|
||||
|
||||
for k in map_prefix_range("double_blocks", "transformer_blocks", parent=key_prefix):
|
||||
keys += __map_double_transformer_block(k)
|
||||
|
||||
for k in map_prefix_range("single_blocks", "single_transformer_blocks", parent=key_prefix):
|
||||
keys += __map_single_transformer_block(k)
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def convert_flux_lora_key_sets() -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("bundle_emb", "bundle_emb")]
|
||||
keys += __map_transformer(LoraConversionKeySet("transformer", "lora_transformer"))
|
||||
keys += map_clip(LoraConversionKeySet("clip_l", "lora_te1"))
|
||||
keys += map_t5(LoraConversionKeySet("t5", "lora_te2"))
|
||||
|
||||
return keys
|
||||
217
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_lora_util.py
vendored
Normal file
217
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_lora_util.py
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
import torch
|
||||
from torch import Tensor
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
class LoraConversionKeySet:
|
||||
def __init__(
|
||||
self,
|
||||
omi_prefix: str,
|
||||
diffusers_prefix: str,
|
||||
legacy_diffusers_prefix: str | None = None,
|
||||
parent: Self | None = None,
|
||||
swap_chunks: bool = False,
|
||||
filter_is_last: bool | None = None,
|
||||
next_omi_prefix: str | None = None,
|
||||
next_diffusers_prefix: str | None = None,
|
||||
):
|
||||
if parent is not None:
|
||||
self.omi_prefix = combine(parent.omi_prefix, omi_prefix)
|
||||
self.diffusers_prefix = combine(parent.diffusers_prefix, diffusers_prefix)
|
||||
else:
|
||||
self.omi_prefix = omi_prefix
|
||||
self.diffusers_prefix = diffusers_prefix
|
||||
|
||||
if legacy_diffusers_prefix is None:
|
||||
self.legacy_diffusers_prefix = self.diffusers_prefix.replace(".", "_")
|
||||
elif parent is not None:
|
||||
self.legacy_diffusers_prefix = combine(parent.legacy_diffusers_prefix, legacy_diffusers_prefix).replace(
|
||||
".", "_"
|
||||
)
|
||||
else:
|
||||
self.legacy_diffusers_prefix = legacy_diffusers_prefix
|
||||
|
||||
self.parent = parent
|
||||
self.swap_chunks = swap_chunks
|
||||
self.filter_is_last = filter_is_last
|
||||
self.prefix = parent
|
||||
|
||||
if next_omi_prefix is None and parent is not None:
|
||||
self.next_omi_prefix = parent.next_omi_prefix
|
||||
self.next_diffusers_prefix = parent.next_diffusers_prefix
|
||||
self.next_legacy_diffusers_prefix = parent.next_legacy_diffusers_prefix
|
||||
elif next_omi_prefix is not None and parent is not None:
|
||||
self.next_omi_prefix = combine(parent.omi_prefix, next_omi_prefix)
|
||||
self.next_diffusers_prefix = combine(parent.diffusers_prefix, next_diffusers_prefix)
|
||||
self.next_legacy_diffusers_prefix = combine(parent.legacy_diffusers_prefix, next_diffusers_prefix).replace(
|
||||
".", "_"
|
||||
)
|
||||
elif next_omi_prefix is not None and parent is None:
|
||||
self.next_omi_prefix = next_omi_prefix
|
||||
self.next_diffusers_prefix = next_diffusers_prefix
|
||||
self.next_legacy_diffusers_prefix = next_diffusers_prefix.replace(".", "_")
|
||||
else:
|
||||
self.next_omi_prefix = None
|
||||
self.next_diffusers_prefix = None
|
||||
self.next_legacy_diffusers_prefix = None
|
||||
|
||||
def __get_omi(self, in_prefix: str, key: str) -> str:
|
||||
return self.omi_prefix + key.removeprefix(in_prefix)
|
||||
|
||||
def __get_diffusers(self, in_prefix: str, key: str) -> str:
|
||||
return self.diffusers_prefix + key.removeprefix(in_prefix)
|
||||
|
||||
def __get_legacy_diffusers(self, in_prefix: str, key: str) -> str:
|
||||
key = self.legacy_diffusers_prefix + key.removeprefix(in_prefix)
|
||||
|
||||
suffix = key[key.rfind(".") :]
|
||||
if suffix not in [".alpha", ".dora_scale"]: # some keys only have a single . in the suffix
|
||||
suffix = key[key.removesuffix(suffix).rfind(".") :]
|
||||
key = key.removesuffix(suffix)
|
||||
|
||||
return key.replace(".", "_") + suffix
|
||||
|
||||
def get_key(self, in_prefix: str, key: str, target: str) -> str:
|
||||
if target == "omi":
|
||||
return self.__get_omi(in_prefix, key)
|
||||
elif target == "diffusers":
|
||||
return self.__get_diffusers(in_prefix, key)
|
||||
elif target == "legacy_diffusers":
|
||||
return self.__get_legacy_diffusers(in_prefix, key)
|
||||
return key
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"omi: {self.omi_prefix}, diffusers: {self.diffusers_prefix}, legacy: {self.legacy_diffusers_prefix}"
|
||||
|
||||
|
||||
def combine(left: str, right: str) -> str:
|
||||
left = left.rstrip(".")
|
||||
right = right.lstrip(".")
|
||||
if left == "" or left is None:
|
||||
return right
|
||||
elif right == "" or right is None:
|
||||
return left
|
||||
else:
|
||||
return left + "." + right
|
||||
|
||||
|
||||
def map_prefix_range(
|
||||
omi_prefix: str,
|
||||
diffusers_prefix: str,
|
||||
parent: LoraConversionKeySet,
|
||||
) -> list[LoraConversionKeySet]:
|
||||
# 100 should be a safe upper bound. increase if it's not enough in the future
|
||||
return [
|
||||
LoraConversionKeySet(
|
||||
omi_prefix=f"{omi_prefix}.{i}",
|
||||
diffusers_prefix=f"{diffusers_prefix}.{i}",
|
||||
parent=parent,
|
||||
next_omi_prefix=f"{omi_prefix}.{i + 1}",
|
||||
next_diffusers_prefix=f"{diffusers_prefix}.{i + 1}",
|
||||
)
|
||||
for i in range(100)
|
||||
]
|
||||
|
||||
|
||||
def __convert(
|
||||
state_dict: dict[str, Tensor],
|
||||
key_sets: list[LoraConversionKeySet],
|
||||
source: str,
|
||||
target: str,
|
||||
) -> dict[str, Tensor]:
|
||||
out_states = {}
|
||||
|
||||
if source == target:
|
||||
return dict(state_dict)
|
||||
|
||||
# TODO: maybe replace with a non O(n^2) algorithm
|
||||
for key, tensor in state_dict.items():
|
||||
for key_set in key_sets:
|
||||
in_prefix = ""
|
||||
|
||||
if source == "omi":
|
||||
in_prefix = key_set.omi_prefix
|
||||
elif source == "diffusers":
|
||||
in_prefix = key_set.diffusers_prefix
|
||||
elif source == "legacy_diffusers":
|
||||
in_prefix = key_set.legacy_diffusers_prefix
|
||||
|
||||
if not key.startswith(in_prefix):
|
||||
continue
|
||||
|
||||
if key_set.filter_is_last is not None:
|
||||
next_prefix = None
|
||||
if source == "omi":
|
||||
next_prefix = key_set.next_omi_prefix
|
||||
elif source == "diffusers":
|
||||
next_prefix = key_set.next_diffusers_prefix
|
||||
elif source == "legacy_diffusers":
|
||||
next_prefix = key_set.next_legacy_diffusers_prefix
|
||||
|
||||
is_last = not any(k.startswith(next_prefix) for k in state_dict)
|
||||
if key_set.filter_is_last != is_last:
|
||||
continue
|
||||
|
||||
name = key_set.get_key(in_prefix, key, target)
|
||||
|
||||
can_swap_chunks = target == "omi" or source == "omi"
|
||||
if key_set.swap_chunks and name.endswith(".lora_up.weight") and can_swap_chunks:
|
||||
chunk_0, chunk_1 = tensor.chunk(2, dim=0)
|
||||
tensor = torch.cat([chunk_1, chunk_0], dim=0)
|
||||
|
||||
out_states[name] = tensor
|
||||
|
||||
break # only map the first matching key set
|
||||
|
||||
return out_states
|
||||
|
||||
|
||||
def __detect_source(
|
||||
state_dict: dict[str, Tensor],
|
||||
key_sets: list[LoraConversionKeySet],
|
||||
) -> str:
|
||||
omi_count = 0
|
||||
diffusers_count = 0
|
||||
legacy_diffusers_count = 0
|
||||
|
||||
for key in state_dict:
|
||||
for key_set in key_sets:
|
||||
if key.startswith(key_set.omi_prefix):
|
||||
omi_count += 1
|
||||
if key.startswith(key_set.diffusers_prefix):
|
||||
diffusers_count += 1
|
||||
if key.startswith(key_set.legacy_diffusers_prefix):
|
||||
legacy_diffusers_count += 1
|
||||
|
||||
if omi_count > diffusers_count and omi_count > legacy_diffusers_count:
|
||||
return "omi"
|
||||
if diffusers_count > omi_count and diffusers_count > legacy_diffusers_count:
|
||||
return "diffusers"
|
||||
if legacy_diffusers_count > omi_count and legacy_diffusers_count > diffusers_count:
|
||||
return "legacy_diffusers"
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def convert_to_omi(
|
||||
state_dict: dict[str, Tensor],
|
||||
key_sets: list[LoraConversionKeySet],
|
||||
) -> dict[str, Tensor]:
|
||||
source = __detect_source(state_dict, key_sets)
|
||||
return __convert(state_dict, key_sets, source, "omi")
|
||||
|
||||
|
||||
def convert_to_diffusers(
|
||||
state_dict: dict[str, Tensor],
|
||||
key_sets: list[LoraConversionKeySet],
|
||||
) -> dict[str, Tensor]:
|
||||
source = __detect_source(state_dict, key_sets)
|
||||
return __convert(state_dict, key_sets, source, "diffusers")
|
||||
|
||||
|
||||
def convert_to_legacy_diffusers(
|
||||
state_dict: dict[str, Tensor],
|
||||
key_sets: list[LoraConversionKeySet],
|
||||
) -> dict[str, Tensor]:
|
||||
source = __detect_source(state_dict, key_sets)
|
||||
return __convert(state_dict, key_sets, source, "legacy_diffusers")
|
||||
125
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_sdxl_lora.py
vendored
Normal file
125
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_sdxl_lora.py
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_clip import map_clip
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
|
||||
LoraConversionKeySet,
|
||||
map_prefix_range,
|
||||
)
|
||||
|
||||
|
||||
def __map_unet_resnet_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("emb_layers.1", "time_emb_proj", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("in_layers.2", "conv1", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("out_layers.3", "conv2", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("skip_connection", "conv_shortcut", parent=key_prefix)]
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_unet_attention_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("proj_in", "proj_in", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("proj_out", "proj_out", parent=key_prefix)]
|
||||
for k in map_prefix_range("transformer_blocks", "transformer_blocks", parent=key_prefix):
|
||||
keys += [LoraConversionKeySet("attn1.to_q", "attn1.to_q", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn1.to_k", "attn1.to_k", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn1.to_v", "attn1.to_v", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn1.to_out.0", "attn1.to_out.0", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn2.to_q", "attn2.to_q", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn2.to_k", "attn2.to_k", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn2.to_v", "attn2.to_v", parent=k)]
|
||||
keys += [LoraConversionKeySet("attn2.to_out.0", "attn2.to_out.0", parent=k)]
|
||||
keys += [LoraConversionKeySet("ff.net.0.proj", "ff.net.0.proj", parent=k)]
|
||||
keys += [LoraConversionKeySet("ff.net.2", "ff.net.2", parent=k)]
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_unet_down_blocks(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("1.0", "0.resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("2.0", "0.resnets.1", parent=key_prefix))
|
||||
keys += [LoraConversionKeySet("3.0.op", "0.downsamplers.0.conv", parent=key_prefix)]
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("4.0", "1.resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("4.1", "1.attentions.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("5.0", "1.resnets.1", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("5.1", "1.attentions.1", parent=key_prefix))
|
||||
keys += [LoraConversionKeySet("6.0.op", "1.downsamplers.0.conv", parent=key_prefix)]
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("7.0", "2.resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("7.1", "2.attentions.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("8.0", "2.resnets.1", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("8.1", "2.attentions.1", parent=key_prefix))
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_unet_mid_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("0", "resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("1", "attentions.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("2", "resnets.1", parent=key_prefix))
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_unet_up_block(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("0.0", "0.resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("0.1", "0.attentions.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("1.0", "0.resnets.1", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("1.1", "0.attentions.1", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("2.0", "0.resnets.2", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("2.1", "0.attentions.2", parent=key_prefix))
|
||||
keys += [LoraConversionKeySet("2.2.conv", "0.upsamplers.0.conv", parent=key_prefix)]
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("3.0", "1.resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("3.1", "1.attentions.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("4.0", "1.resnets.1", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("4.1", "1.attentions.1", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("5.0", "1.resnets.2", parent=key_prefix))
|
||||
keys += __map_unet_attention_block(LoraConversionKeySet("5.1", "1.attentions.2", parent=key_prefix))
|
||||
keys += [LoraConversionKeySet("5.2.conv", "1.upsamplers.0.conv", parent=key_prefix)]
|
||||
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("6.0", "2.resnets.0", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("7.0", "2.resnets.1", parent=key_prefix))
|
||||
keys += __map_unet_resnet_block(LoraConversionKeySet("8.0", "2.resnets.2", parent=key_prefix))
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def __map_unet(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("input_blocks.0.0", "conv_in", parent=key_prefix)]
|
||||
|
||||
keys += [LoraConversionKeySet("time_embed.0", "time_embedding.linear_1", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("time_embed.2", "time_embedding.linear_2", parent=key_prefix)]
|
||||
|
||||
keys += [LoraConversionKeySet("label_emb.0.0", "add_embedding.linear_1", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("label_emb.0.2", "add_embedding.linear_2", parent=key_prefix)]
|
||||
|
||||
keys += __map_unet_down_blocks(LoraConversionKeySet("input_blocks", "down_blocks", parent=key_prefix))
|
||||
keys += __map_unet_mid_block(LoraConversionKeySet("middle_block", "mid_block", parent=key_prefix))
|
||||
keys += __map_unet_up_block(LoraConversionKeySet("output_blocks", "up_blocks", parent=key_prefix))
|
||||
|
||||
keys += [LoraConversionKeySet("out.0", "conv_norm_out", parent=key_prefix)]
|
||||
keys += [LoraConversionKeySet("out.2", "conv_out", parent=key_prefix)]
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
def convert_sdxl_lora_key_sets() -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
keys += [LoraConversionKeySet("bundle_emb", "bundle_emb")]
|
||||
keys += __map_unet(LoraConversionKeySet("unet", "lora_unet"))
|
||||
keys += map_clip(LoraConversionKeySet("clip_l", "lora_te1"))
|
||||
keys += map_clip(LoraConversionKeySet("clip_g", "lora_te2"))
|
||||
|
||||
return keys
|
||||
19
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_t5.py
vendored
Normal file
19
invokeai/backend/model_manager/omi/vendor/convert/lora/convert_t5.py
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
from invokeai.backend.model_manager.omi.vendor.convert.lora.convert_lora_util import (
|
||||
LoraConversionKeySet,
|
||||
map_prefix_range,
|
||||
)
|
||||
|
||||
|
||||
def map_t5(key_prefix: LoraConversionKeySet) -> list[LoraConversionKeySet]:
|
||||
keys = []
|
||||
|
||||
for k in map_prefix_range("encoder.block", "encoder.block", parent=key_prefix):
|
||||
keys += [LoraConversionKeySet("layer.0.SelfAttention.k", "layer.0.SelfAttention.k", parent=k)]
|
||||
keys += [LoraConversionKeySet("layer.0.SelfAttention.o", "layer.0.SelfAttention.o", parent=k)]
|
||||
keys += [LoraConversionKeySet("layer.0.SelfAttention.q", "layer.0.SelfAttention.q", parent=k)]
|
||||
keys += [LoraConversionKeySet("layer.0.SelfAttention.v", "layer.0.SelfAttention.v", parent=k)]
|
||||
keys += [LoraConversionKeySet("layer.1.DenseReluDense.wi_0", "layer.1.DenseReluDense.wi_0", parent=k)]
|
||||
keys += [LoraConversionKeySet("layer.1.DenseReluDense.wi_1", "layer.1.DenseReluDense.wi_1", parent=k)]
|
||||
keys += [LoraConversionKeySet("layer.1.DenseReluDense.wo", "layer.1.DenseReluDense.wo", parent=k)]
|
||||
|
||||
return keys
|
||||
0
invokeai/backend/model_manager/omi/vendor/model_spec/__init__.py
vendored
Normal file
0
invokeai/backend/model_manager/omi/vendor/model_spec/__init__.py
vendored
Normal file
31
invokeai/backend/model_manager/omi/vendor/model_spec/architecture.py
vendored
Normal file
31
invokeai/backend/model_manager/omi/vendor/model_spec/architecture.py
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
stable_diffusion_1_lora = "stable-diffusion-v1/lora"
|
||||
stable_diffusion_1_inpainting_lora = "stable-diffusion-v1-inpainting/lora"
|
||||
|
||||
stable_diffusion_2_512_lora = "stable-diffusion-v2-512/lora"
|
||||
stable_diffusion_2_768_v_lora = "stable-diffusion-v2-768-v/lora"
|
||||
stable_diffusion_2_depth_lora = "stable-diffusion-v2-depth/lora"
|
||||
stable_diffusion_2_inpainting_lora = "stable-diffusion-v2-inpainting/lora"
|
||||
|
||||
stable_diffusion_3_medium_lora = "stable-diffusion-v3-medium/lora"
|
||||
stable_diffusion_35_medium_lora = "stable-diffusion-v3.5-medium/lora"
|
||||
stable_diffusion_35_large_lora = "stable-diffusion-v3.5-large/lora"
|
||||
|
||||
stable_diffusion_xl_1_lora = "stable-diffusion-xl-v1-base/lora"
|
||||
stable_diffusion_xl_1_inpainting_lora = "stable-diffusion-xl-v1-base-inpainting/lora"
|
||||
|
||||
wuerstchen_2_lora = "wuerstchen-v2-prior/lora"
|
||||
stable_cascade_1_stage_a_lora = "stable-cascade-v1-stage-a/lora"
|
||||
stable_cascade_1_stage_b_lora = "stable-cascade-v1-stage-b/lora"
|
||||
stable_cascade_1_stage_c_lora = "stable-cascade-v1-stage-c/lora"
|
||||
|
||||
pixart_alpha_lora = "pixart-alpha/lora"
|
||||
pixart_sigma_lora = "pixart-sigma/lora"
|
||||
|
||||
flux_dev_1_lora = "Flux.1-dev/lora"
|
||||
flux_fill_dev_1_lora = "Flux.1-fill-dev/lora"
|
||||
|
||||
sana_lora = "sana/lora"
|
||||
|
||||
hunyuan_video_lora = "hunyuan-video/lora"
|
||||
|
||||
hi_dream_i1_lora = "hidream-i1/lora"
|
||||
@@ -23,7 +23,7 @@ class StarterModel(StarterModelWithoutDependencies):
|
||||
dependencies: Optional[list[StarterModelWithoutDependencies]] = None
|
||||
|
||||
|
||||
class StarterModelBundles(BaseModel):
|
||||
class StarterModelBundle(BaseModel):
|
||||
name: str
|
||||
models: list[StarterModel]
|
||||
|
||||
@@ -109,7 +109,7 @@ flux_vae = StarterModel(
|
||||
|
||||
# region: Main
|
||||
flux_schnell_quantized = StarterModel(
|
||||
name="FLUX Schnell (Quantized)",
|
||||
name="FLUX.1 schnell (quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_schnell::transformer/bnb_nf4/flux1-schnell-bnb_nf4.safetensors",
|
||||
description="FLUX schnell transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
|
||||
@@ -117,7 +117,7 @@ flux_schnell_quantized = StarterModel(
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_dev_quantized = StarterModel(
|
||||
name="FLUX Dev (Quantized)",
|
||||
name="FLUX.1 dev (quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_dev::transformer/bnb_nf4/flux1-dev-bnb_nf4.safetensors",
|
||||
description="FLUX dev transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
|
||||
@@ -125,7 +125,7 @@ flux_dev_quantized = StarterModel(
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_schnell = StarterModel(
|
||||
name="FLUX Schnell",
|
||||
name="FLUX.1 schnell",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_schnell::transformer/base/flux1-schnell.safetensors",
|
||||
description="FLUX schnell transformer in bfloat16. Total size with dependencies: ~33GB",
|
||||
@@ -133,13 +133,29 @@ flux_schnell = StarterModel(
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_dev = StarterModel(
|
||||
name="FLUX Dev",
|
||||
name="FLUX.1 dev",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_dev::transformer/base/flux1-dev.safetensors",
|
||||
description="FLUX dev transformer in bfloat16. Total size with dependencies: ~33GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_kontext = StarterModel(
|
||||
name="FLUX.1 Kontext dev",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev/resolve/main/flux1-kontext-dev.safetensors",
|
||||
description="FLUX.1 Kontext dev transformer in bfloat16. Total size with dependencies: ~33GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
flux_kontext_quantized = StarterModel(
|
||||
name="FLUX.1 Kontext dev (Quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/unsloth/FLUX.1-Kontext-dev-GGUF/resolve/main/flux1-kontext-dev-Q4_K_M.gguf",
|
||||
description="FLUX.1 Kontext dev quantized (q4_k_m). Total size with dependencies: ~14GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
sd35_medium = StarterModel(
|
||||
name="SD3.5 Medium",
|
||||
base=BaseModelType.StableDiffusion3,
|
||||
@@ -297,6 +313,15 @@ ip_adapter_sdxl = StarterModel(
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_plus_sdxl = StarterModel(
|
||||
name="Precise Reference (IP Adapter Plus ViT-H)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h/resolve/main/ip-adapter-plus_sdxl_vit-h.safetensors",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter Plus SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
@@ -647,6 +672,7 @@ flux_fill = StarterModel(
|
||||
# List of starter models, displayed on the frontend.
|
||||
# The order/sort of this list is not changed by the frontend - set it how you want it here.
|
||||
STARTER_MODELS: list[StarterModel] = [
|
||||
flux_kontext_quantized,
|
||||
flux_schnell_quantized,
|
||||
flux_dev_quantized,
|
||||
flux_schnell,
|
||||
@@ -672,6 +698,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
ip_adapter_plus_sd1,
|
||||
ip_adapter_plus_face_sd1,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
ip_adapter_flux,
|
||||
qr_code_cnet_sd1,
|
||||
qr_code_cnet_sdxl,
|
||||
@@ -744,6 +771,7 @@ sdxl_bundle: list[StarterModel] = [
|
||||
juggernaut_sdxl,
|
||||
sdxl_fp16_vae_fix,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
@@ -765,12 +793,13 @@ flux_bundle: list[StarterModel] = [
|
||||
flux_depth_control_lora,
|
||||
flux_redux,
|
||||
flux_fill,
|
||||
flux_kontext_quantized,
|
||||
]
|
||||
|
||||
STARTER_BUNDLES: dict[str, list[StarterModel]] = {
|
||||
BaseModelType.StableDiffusion1: sd1_bundle,
|
||||
BaseModelType.StableDiffusionXL: sdxl_bundle,
|
||||
BaseModelType.Flux: flux_bundle,
|
||||
STARTER_BUNDLES: dict[str, StarterModelBundle] = {
|
||||
BaseModelType.StableDiffusion1: StarterModelBundle(name="Stable Diffusion 1.5", models=sd1_bundle),
|
||||
BaseModelType.StableDiffusionXL: StarterModelBundle(name="SDXL", models=sdxl_bundle),
|
||||
BaseModelType.Flux: StarterModelBundle(name="FLUX.1 dev", models=flux_bundle),
|
||||
}
|
||||
|
||||
assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"
|
||||
|
||||
@@ -29,6 +29,7 @@ class BaseModelType(str, Enum):
|
||||
Imagen3 = "imagen3"
|
||||
Imagen4 = "imagen4"
|
||||
ChatGPT4o = "chatgpt-4o"
|
||||
FluxKontext = "flux-kontext"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
@@ -88,6 +89,7 @@ class ModelVariantType(str, Enum):
|
||||
class ModelFormat(str, Enum):
|
||||
"""Storage format of model."""
|
||||
|
||||
OMI = "omi"
|
||||
Diffusers = "diffusers"
|
||||
Checkpoint = "checkpoint"
|
||||
LyCORIS = "lycoris"
|
||||
@@ -137,6 +139,7 @@ class FluxLoRAFormat(str, Enum):
|
||||
Kohya = "flux.kohya"
|
||||
OneTrainer = "flux.onetrainer"
|
||||
Control = "flux.control"
|
||||
AIToolkit = "flux.aitoolkit"
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
|
||||
145
invokeai/backend/model_manager/util/lora_metadata_extractor.py
Normal file
145
invokeai/backend/model_manager/util/lora_metadata_extractor.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Utility functions for extracting metadata from LoRA model files."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Set, Tuple
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.util.thumbnails import make_thumbnail
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_lora_metadata(
|
||||
model_path: Path, model_key: str, model_images_path: Path
|
||||
) -> Tuple[Optional[str], Optional[Set[str]]]:
|
||||
"""
|
||||
Extract metadata for a LoRA model from associated JSON and image files.
|
||||
|
||||
Args:
|
||||
model_path: Path to the LoRA model file
|
||||
model_key: Unique key for the model
|
||||
model_images_path: Path to the model images directory
|
||||
|
||||
Returns:
|
||||
Tuple of (description, trigger_phrases)
|
||||
"""
|
||||
model_stem = model_path.stem
|
||||
model_dir = model_path.parent
|
||||
|
||||
# Find and process preview image
|
||||
_process_preview_image(model_stem, model_dir, model_key, model_images_path)
|
||||
|
||||
# Extract metadata from JSON
|
||||
description, trigger_phrases = _extract_json_metadata(model_stem, model_dir)
|
||||
|
||||
return description, trigger_phrases
|
||||
|
||||
|
||||
def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, model_images_path: Path) -> bool:
|
||||
"""Find and process a preview image for the model, saving it to the model images store."""
|
||||
image_extensions = [".png", ".jpg", ".jpeg", ".webp"]
|
||||
|
||||
for ext in image_extensions:
|
||||
image_path = model_dir / f"{model_stem}{ext}"
|
||||
if image_path.exists():
|
||||
try:
|
||||
# Open the image
|
||||
with Image.open(image_path) as img:
|
||||
# Create thumbnail and save to model images directory
|
||||
thumbnail = make_thumbnail(img, 256)
|
||||
thumbnail_path = model_images_path / f"{model_key}.webp"
|
||||
thumbnail.save(thumbnail_path, format="webp")
|
||||
|
||||
logger.info(f"Processed preview image {image_path.name} for model {model_key}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process preview image {image_path.name}: {e}")
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
|
||||
"""Extract metadata from a JSON file with the same name as the model."""
|
||||
json_path = model_dir / f"{model_stem}.json"
|
||||
|
||||
if not json_path.exists():
|
||||
return None, None
|
||||
|
||||
try:
|
||||
with open(json_path, "r", encoding="utf-8") as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Extract description
|
||||
description = _build_description(metadata)
|
||||
|
||||
# Extract trigger phrases
|
||||
trigger_phrases = _extract_trigger_phrases(metadata)
|
||||
|
||||
if description or trigger_phrases:
|
||||
logger.info(f"Applied metadata from {json_path.name}")
|
||||
|
||||
return description, trigger_phrases
|
||||
|
||||
except (json.JSONDecodeError, IOError, Exception) as e:
|
||||
logger.warning(f"Failed to read metadata from {json_path}: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def _build_description(metadata: Dict[str, Any]) -> Optional[str]:
|
||||
"""Build a description from metadata fields."""
|
||||
description_parts = []
|
||||
|
||||
if description := metadata.get("description"):
|
||||
description_parts.append(str(description).strip())
|
||||
|
||||
if notes := metadata.get("notes"):
|
||||
description_parts.append(str(notes).strip())
|
||||
|
||||
return " | ".join(description_parts) if description_parts else None
|
||||
|
||||
|
||||
def _extract_trigger_phrases(metadata: Dict[str, Any]) -> Optional[Set[str]]:
|
||||
"""Extract trigger phrases from metadata."""
|
||||
if not (activation_text := metadata.get("activation text")):
|
||||
return None
|
||||
|
||||
activation_text = str(activation_text).strip()
|
||||
if not activation_text:
|
||||
return None
|
||||
|
||||
# Split on commas and clean up each phrase
|
||||
phrases = [phrase.strip() for phrase in activation_text.split(",") if phrase.strip()]
|
||||
|
||||
return set(phrases) if phrases else None
|
||||
|
||||
|
||||
def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_path: Path) -> None:
|
||||
"""
|
||||
Apply extracted metadata to a LoRA model configuration.
|
||||
|
||||
Args:
|
||||
info: The model configuration to update
|
||||
model_path: Path to the LoRA model file
|
||||
model_images_path: Path to the model images directory
|
||||
"""
|
||||
# Only process LoRA models
|
||||
if info.type != ModelType.LoRA:
|
||||
return
|
||||
|
||||
# Extract and apply metadata
|
||||
description, trigger_phrases = extract_lora_metadata(model_path, info.key, model_images_path)
|
||||
|
||||
# We don't set cover_image path in the config anymore since images are stored
|
||||
# separately in the model images store by model key
|
||||
|
||||
if description:
|
||||
info.description = description
|
||||
|
||||
if trigger_phrases:
|
||||
info.trigger_phrases = trigger_phrases
|
||||
@@ -46,6 +46,10 @@ class ModelPatcher:
|
||||
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
|
||||
ti_list: List[Tuple[str, TextualInversionModelRaw]],
|
||||
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
|
||||
if len(ti_list) == 0:
|
||||
yield tokenizer, TextualInversionManager(tokenizer)
|
||||
return
|
||||
|
||||
init_tokens_count = None
|
||||
new_tokens_added = None
|
||||
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import _group_by_layer
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool:
|
||||
if metadata:
|
||||
try:
|
||||
software = json.loads(metadata.get("software", "{}"))
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
return software.get("name") == "ai-toolkit"
|
||||
# metadata got lost somewhere
|
||||
return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys())
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroupedStateDict:
|
||||
transformer: dict[str, Any] = field(default_factory=dict)
|
||||
# might also grow CLIP and T5 submodels
|
||||
|
||||
|
||||
def _group_state_by_submodel(state_dict: dict[str, Any]) -> GroupedStateDict:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
grouped = GroupedStateDict()
|
||||
for key, value in state_dict.items():
|
||||
submodel_name, param_name = key.split(".", 1)
|
||||
match submodel_name:
|
||||
case "diffusion_model":
|
||||
grouped.transformer[param_name] = value
|
||||
case _:
|
||||
logger.warning(f"Unexpected submodel name: {submodel_name}")
|
||||
return grouped
|
||||
|
||||
|
||||
def _rename_peft_lora_keys(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
||||
"""Renames keys from the PEFT LoRA format to the InvokeAI format."""
|
||||
renamed_state_dict = {}
|
||||
for key, value in state_dict.items():
|
||||
renamed_key = key.replace(".lora_A.", ".lora_down.").replace(".lora_B.", ".lora_up.")
|
||||
renamed_state_dict[renamed_key] = value
|
||||
return renamed_state_dict
|
||||
|
||||
|
||||
def lora_model_from_flux_aitoolkit_state_dict(state_dict: dict[str, torch.Tensor]) -> ModelPatchRaw:
|
||||
state_dict = _rename_peft_lora_keys(state_dict)
|
||||
by_layer = _group_by_layer(state_dict)
|
||||
by_model = _group_state_by_submodel(by_layer)
|
||||
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
for layer_key, layer_state_dict in by_model.transformer.items():
|
||||
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
return ModelPatchRaw(layers=layers)
|
||||
@@ -1,4 +1,7 @@
|
||||
from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
@@ -11,7 +14,7 @@ from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_u
|
||||
)
|
||||
|
||||
|
||||
def flux_format_from_state_dict(state_dict):
|
||||
def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None:
|
||||
if is_state_dict_likely_in_flux_kohya_format(state_dict):
|
||||
return FluxLoRAFormat.Kohya
|
||||
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict):
|
||||
@@ -20,5 +23,7 @@ def flux_format_from_state_dict(state_dict):
|
||||
return FluxLoRAFormat.Diffusers
|
||||
elif is_state_dict_likely_flux_control(state_dict):
|
||||
return FluxLoRAFormat.Control
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict, metadata):
|
||||
return FluxLoRAFormat.AIToolkit
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
dist/
|
||||
static/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
stats.html
|
||||
index.html
|
||||
.yarn/
|
||||
*.scss
|
||||
src/services/api/schema.ts
|
||||
@@ -1,46 +0,0 @@
|
||||
module.exports = {
|
||||
extends: ['@invoke-ai/eslint-config-react'],
|
||||
plugins: ['path', 'i18next'],
|
||||
rules: {
|
||||
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
|
||||
'react-refresh/only-export-components': 'off',
|
||||
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
|
||||
'@typescript-eslint/consistent-type-assertions': 'off',
|
||||
// https://github.com/qdanik/eslint-plugin-path
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||
'i18next/no-literal-string': 'error',
|
||||
// https://eslint.org/docs/latest/rules/no-console
|
||||
'no-console': 'error',
|
||||
// https://eslint.org/docs/latest/rules/no-promise-executor-return
|
||||
'no-promise-executor-return': 'error',
|
||||
// https://eslint.org/docs/latest/rules/require-await
|
||||
'require-await': 'error',
|
||||
'no-restricted-properties': [
|
||||
'error',
|
||||
{
|
||||
object: 'crypto',
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
{
|
||||
object: 'navigator',
|
||||
property: 'clipboard',
|
||||
message:
|
||||
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
||||
},
|
||||
],
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
* Overrides for stories
|
||||
*/
|
||||
{
|
||||
files: ['*.stories.tsx'],
|
||||
rules: {
|
||||
// We may not have i18n available in stories.
|
||||
'i18next/no-literal-string': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
@@ -14,3 +14,4 @@ static/
|
||||
src/theme/css/overlayscrollbars.css
|
||||
src/theme_/css/overlayscrollbars.css
|
||||
pnpm-lock.yaml
|
||||
.claude
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
module.exports = {
|
||||
...require('@invoke-ai/prettier-config-react'),
|
||||
overrides: [
|
||||
{
|
||||
files: ['public/locales/*.json'],
|
||||
options: {
|
||||
tabWidth: 4,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
17
invokeai/frontend/web/.prettierrc.json
Normal file
17
invokeai/frontend/web/.prettierrc.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"$schema": "http://json.schemastore.org/prettierrc",
|
||||
"trailingComma": "es5",
|
||||
"printWidth": 120,
|
||||
"tabWidth": 2,
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"endOfLine": "auto",
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["public/locales/*.json"],
|
||||
"options": {
|
||||
"tabWidth": 4
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,21 +1,23 @@
|
||||
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useEffect } from 'react';
|
||||
|
||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
||||
/**
|
||||
* Initializes some state for storybook. Must be in a different component
|
||||
* so that it is run inside the redux context.
|
||||
*/
|
||||
export const ReduxInit = memo((props: PropsWithChildren) => {
|
||||
export const ReduxInit = memo(({ children }: PropsWithChildren) => {
|
||||
const dispatch = useAppDispatch();
|
||||
useGlobalModifiersInit();
|
||||
useEffect(() => {
|
||||
dispatch(
|
||||
modelChanged({ model: { key: 'test_model', hash: 'some_hash', name: 'some name', base: 'sd-1', type: 'main' } })
|
||||
);
|
||||
}, []);
|
||||
}, [dispatch]);
|
||||
|
||||
return props.children;
|
||||
return children;
|
||||
});
|
||||
|
||||
ReduxInit.displayName = 'ReduxInit';
|
||||
|
||||
@@ -2,19 +2,13 @@ import type { StorybookConfig } from '@storybook/react-vite';
|
||||
|
||||
const config: StorybookConfig = {
|
||||
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
|
||||
addons: [
|
||||
'@storybook/addon-links',
|
||||
'@storybook/addon-essentials',
|
||||
'@storybook/addon-interactions',
|
||||
'@storybook/addon-storysource',
|
||||
],
|
||||
addons: ['@storybook/addon-links', '@storybook/addon-docs'],
|
||||
|
||||
framework: {
|
||||
name: '@storybook/react-vite',
|
||||
options: {},
|
||||
},
|
||||
docs: {
|
||||
autodocs: 'tag',
|
||||
},
|
||||
|
||||
core: {
|
||||
disableTelemetry: true,
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { addons } from '@storybook/manager-api';
|
||||
import { themes } from '@storybook/theming';
|
||||
import { addons } from 'storybook/manager-api';
|
||||
import { themes } from 'storybook/theming';
|
||||
|
||||
addons.setConfig({
|
||||
theme: themes.dark,
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
import { Preview } from '@storybook/react';
|
||||
import { themes } from '@storybook/theming';
|
||||
import type { Preview } from '@storybook/react-vite';
|
||||
import { themes } from 'storybook/theming';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
import i18n from 'i18next';
|
||||
import { initReactI18next } from 'react-i18next';
|
||||
import { Provider } from 'react-redux';
|
||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||
import { createStore } from '../src/app/store/store';
|
||||
|
||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
import translationEN from '../public/locales/en.json';
|
||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||
import { createStore } from '../src/app/store/store';
|
||||
import { ReduxInit } from './ReduxInit';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
|
||||
i18n.use(initReactI18next).init({
|
||||
lng: 'en',
|
||||
@@ -46,6 +47,7 @@ const preview: Preview = {
|
||||
parameters: {
|
||||
docs: {
|
||||
theme: themes.dark,
|
||||
codePanel: true,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
242
invokeai/frontend/web/eslint.config.mjs
Normal file
242
invokeai/frontend/web/eslint.config.mjs
Normal file
@@ -0,0 +1,242 @@
|
||||
import js from '@eslint/js';
|
||||
import typescriptEslint from '@typescript-eslint/eslint-plugin';
|
||||
import typescriptParser from '@typescript-eslint/parser';
|
||||
import pluginI18Next from 'eslint-plugin-i18next';
|
||||
import pluginImport from 'eslint-plugin-import';
|
||||
import pluginPath from 'eslint-plugin-path';
|
||||
import pluginReact from 'eslint-plugin-react';
|
||||
import pluginReactHooks from 'eslint-plugin-react-hooks';
|
||||
import pluginReactRefresh from 'eslint-plugin-react-refresh';
|
||||
import pluginSimpleImportSort from 'eslint-plugin-simple-import-sort';
|
||||
import pluginStorybook from 'eslint-plugin-storybook';
|
||||
import pluginUnusedImports from 'eslint-plugin-unused-imports';
|
||||
import globals from 'globals';
|
||||
|
||||
export default [
|
||||
js.configs.recommended,
|
||||
|
||||
{
|
||||
languageOptions: {
|
||||
parser: typescriptParser,
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
},
|
||||
globals: {
|
||||
...globals.browser,
|
||||
...globals.node,
|
||||
GlobalCompositeOperation: 'readonly',
|
||||
RequestInit: 'readonly',
|
||||
},
|
||||
},
|
||||
|
||||
files: ['**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx'],
|
||||
|
||||
plugins: {
|
||||
react: pluginReact,
|
||||
'@typescript-eslint': typescriptEslint,
|
||||
'react-hooks': pluginReactHooks,
|
||||
import: pluginImport,
|
||||
'unused-imports': pluginUnusedImports,
|
||||
'simple-import-sort': pluginSimpleImportSort,
|
||||
'react-refresh': pluginReactRefresh.configs.vite,
|
||||
path: pluginPath,
|
||||
i18next: pluginI18Next,
|
||||
storybook: pluginStorybook,
|
||||
},
|
||||
|
||||
rules: {
|
||||
...typescriptEslint.configs.recommended.rules,
|
||||
...pluginReact.configs.recommended.rules,
|
||||
...pluginReact.configs['jsx-runtime'].rules,
|
||||
...pluginReactHooks.configs.recommended.rules,
|
||||
...pluginStorybook.configs.recommended.rules,
|
||||
|
||||
'react/jsx-no-bind': [
|
||||
'error',
|
||||
{
|
||||
allowBind: true,
|
||||
},
|
||||
],
|
||||
|
||||
'react/jsx-curly-brace-presence': [
|
||||
'error',
|
||||
{
|
||||
props: 'never',
|
||||
children: 'never',
|
||||
},
|
||||
],
|
||||
|
||||
'react-hooks/exhaustive-deps': 'error',
|
||||
|
||||
curly: 'error',
|
||||
'no-var': 'error',
|
||||
'brace-style': 'error',
|
||||
'prefer-template': 'error',
|
||||
radix: 'error',
|
||||
'space-before-blocks': 'error',
|
||||
eqeqeq: 'error',
|
||||
'one-var': ['error', 'never'],
|
||||
'no-eval': 'error',
|
||||
'no-extend-native': 'error',
|
||||
'no-implied-eval': 'error',
|
||||
'no-label-var': 'error',
|
||||
'no-return-assign': 'error',
|
||||
'no-sequences': 'error',
|
||||
'no-template-curly-in-string': 'error',
|
||||
'no-throw-literal': 'error',
|
||||
'no-unmodified-loop-condition': 'error',
|
||||
'import/no-duplicates': 'error',
|
||||
'import/prefer-default-export': 'off',
|
||||
'unused-imports/no-unused-imports': 'error',
|
||||
|
||||
'unused-imports/no-unused-vars': [
|
||||
'error',
|
||||
{
|
||||
vars: 'all',
|
||||
varsIgnorePattern: '^_',
|
||||
args: 'after-used',
|
||||
argsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
|
||||
'simple-import-sort/imports': 'error',
|
||||
'simple-import-sort/exports': 'error',
|
||||
'@typescript-eslint/no-unused-vars': 'off',
|
||||
|
||||
'@typescript-eslint/ban-ts-comment': [
|
||||
'error',
|
||||
{
|
||||
'ts-expect-error': 'allow-with-description',
|
||||
'ts-ignore': true,
|
||||
'ts-nocheck': true,
|
||||
'ts-check': false,
|
||||
minimumDescriptionLength: 10,
|
||||
},
|
||||
],
|
||||
|
||||
'@typescript-eslint/no-empty-interface': [
|
||||
'error',
|
||||
{
|
||||
allowSingleExtends: true,
|
||||
},
|
||||
],
|
||||
|
||||
'@typescript-eslint/consistent-type-imports': [
|
||||
'error',
|
||||
{
|
||||
prefer: 'type-imports',
|
||||
fixStyle: 'separate-type-imports',
|
||||
disallowTypeAnnotations: true,
|
||||
},
|
||||
],
|
||||
|
||||
'@typescript-eslint/no-import-type-side-effects': 'error',
|
||||
|
||||
'@typescript-eslint/consistent-type-assertions': [
|
||||
'error',
|
||||
{
|
||||
assertionStyle: 'as',
|
||||
},
|
||||
],
|
||||
|
||||
'path/no-relative-imports': [
|
||||
'error',
|
||||
{
|
||||
maxDepth: 0,
|
||||
},
|
||||
],
|
||||
|
||||
'no-console': 'warn',
|
||||
'no-promise-executor-return': 'error',
|
||||
'require-await': 'error',
|
||||
|
||||
'no-restricted-syntax': [
|
||||
'error',
|
||||
{
|
||||
selector: 'CallExpression[callee.name="setActiveTab"]',
|
||||
message:
|
||||
'setActiveTab() can only be called from use-navigation-api.tsx. Use navigationApi.switchToTab() instead.',
|
||||
},
|
||||
],
|
||||
|
||||
'no-restricted-properties': [
|
||||
'error',
|
||||
{
|
||||
object: 'crypto',
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
{
|
||||
object: 'navigator',
|
||||
property: 'clipboard',
|
||||
message:
|
||||
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
||||
},
|
||||
],
|
||||
|
||||
// Typescript handles this for us: https://eslint.org/docs/latest/rules/no-redeclare#handled_by_typescript
|
||||
'no-redeclare': 'off',
|
||||
|
||||
'no-restricted-imports': [
|
||||
'error',
|
||||
{
|
||||
paths: [
|
||||
{
|
||||
name: 'lodash-es',
|
||||
importNames: ['isEqual'],
|
||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||
},
|
||||
{
|
||||
name: 'lodash-es',
|
||||
message: 'Please use es-toolkit instead.',
|
||||
},
|
||||
{
|
||||
name: 'es-toolkit',
|
||||
importNames: ['isEqual'],
|
||||
message: 'Please use objectEquals from @observ33r/object-equals instead.',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
settings: {
|
||||
react: {
|
||||
version: 'detect',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
files: ['**/use-navigation-api.tsx'],
|
||||
rules: {
|
||||
'no-restricted-syntax': 'off',
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
files: ['**/*.stories.tsx'],
|
||||
rules: {
|
||||
'i18next/no-literal-string': 'off',
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ignores: [
|
||||
'**/dist/',
|
||||
'**/static/',
|
||||
'**/.husky/',
|
||||
'**/node_modules/',
|
||||
'**/patches/',
|
||||
'**/stats.html',
|
||||
'**/index.html',
|
||||
'**/.yarn/',
|
||||
'**/*.scss',
|
||||
'src/services/api/schema.ts',
|
||||
'.prettierrc.js',
|
||||
'.storybook',
|
||||
],
|
||||
},
|
||||
];
|
||||
@@ -12,10 +12,9 @@ const config: KnipConfig = {
|
||||
'src/features/parameters/types/parameterSchemas.ts',
|
||||
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
// This feature is (temprarily?) disabled
|
||||
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
|
||||
// Will be using this
|
||||
'src/common/hooks/useAsyncState.ts',
|
||||
'src/app/store/use-debounced-app-selector.ts',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -38,70 +38,60 @@
|
||||
"test:ui": "vitest --coverage --ui",
|
||||
"test:no-watch": "vitest --no-watch"
|
||||
},
|
||||
"madge": {
|
||||
"excludeRegExp": [
|
||||
"^index.ts$"
|
||||
],
|
||||
"detectiveOptions": {
|
||||
"ts": {
|
||||
"skipTypeImports": true
|
||||
},
|
||||
"tsx": {
|
||||
"skipTypeImports": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.5.3",
|
||||
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^2.1.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.0.3",
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.7.4",
|
||||
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^2.1.1",
|
||||
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.1.0",
|
||||
"@dagrejs/dagre": "^1.1.5",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@fontsource-variable/inter": "^5.2.5",
|
||||
"@fontsource-variable/inter": "^5.2.6",
|
||||
"@invoke-ai/ui-library": "^0.0.46",
|
||||
"@nanostores/react": "^1.0.0",
|
||||
"@reduxjs/toolkit": "2.7.0",
|
||||
"@observ33r/object-equals": "^1.1.5",
|
||||
"@reduxjs/toolkit": "2.8.2",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"@xyflow/react": "^12.6.0",
|
||||
"@xyflow/react": "^12.8.2",
|
||||
"ag-psd": "^28.2.2",
|
||||
"async-mutex": "^0.5.0",
|
||||
"chakra-react-select": "^4.9.2",
|
||||
"cmdk": "^1.1.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"dockview": "^4.4.1",
|
||||
"es-toolkit": "^1.39.7",
|
||||
"filesize": "^10.1.6",
|
||||
"fracturedjsonjs": "^4.0.2",
|
||||
"fracturedjsonjs": "^4.1.0",
|
||||
"framer-motion": "^11.10.0",
|
||||
"i18next": "^25.0.1",
|
||||
"i18next": "^25.3.2",
|
||||
"i18next-http-backend": "^3.0.2",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"idb-keyval": "6.2.2",
|
||||
"jsondiffpatch": "^0.7.3",
|
||||
"konva": "^9.3.20",
|
||||
"linkify-react": "^4.2.0",
|
||||
"linkifyjs": "^4.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
"konva": "^9.3.22",
|
||||
"linkify-react": "^4.3.1",
|
||||
"linkifyjs": "^4.3.1",
|
||||
"lru-cache": "^11.1.0",
|
||||
"mtwist": "^1.0.2",
|
||||
"nanoid": "^5.1.5",
|
||||
"nanostores": "^1.0.1",
|
||||
"new-github-issue-url": "^1.1.0",
|
||||
"overlayscrollbars": "^2.11.1",
|
||||
"overlayscrollbars": "^2.11.4",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"perfect-freehand": "^1.2.2",
|
||||
"query-string": "^9.1.1",
|
||||
"query-string": "^9.2.1",
|
||||
"raf-throttle": "^2.0.6",
|
||||
"react": "^18.3.1",
|
||||
"react-colorful": "^5.6.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-dropzone": "^14.3.8",
|
||||
"react-error-boundary": "^5.0.0",
|
||||
"react-hook-form": "^7.56.1",
|
||||
"react-hook-form": "^7.60.0",
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^15.5.1",
|
||||
"react-i18next": "^15.5.3",
|
||||
"react-icons": "^5.5.0",
|
||||
"react-redux": "9.2.0",
|
||||
"react-resizable-panels": "^2.1.8",
|
||||
"react-resizable-panels": "^3.0.3",
|
||||
"react-textarea-autosize": "^8.5.9",
|
||||
"react-use": "^17.6.0",
|
||||
"react-virtuoso": "^4.12.6",
|
||||
"react-virtuoso": "^4.13.0",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.2.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
@@ -109,52 +99,55 @@
|
||||
"roarr": "^7.21.1",
|
||||
"serialize-error": "^12.0.0",
|
||||
"socket.io-client": "^4.8.1",
|
||||
"stable-hash": "^0.0.5",
|
||||
"use-debounce": "^10.0.4",
|
||||
"stable-hash": "^0.0.6",
|
||||
"use-debounce": "^10.0.5",
|
||||
"use-device-pixel-ratio": "^1.1.2",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.24.3",
|
||||
"zod-validation-error": "^3.4.0"
|
||||
"zod": "^4.0.5",
|
||||
"zod-validation-error": "^3.5.2"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||
"@storybook/addon-essentials": "^8.6.12",
|
||||
"@storybook/addon-interactions": "^8.6.12",
|
||||
"@storybook/addon-links": "^8.6.12",
|
||||
"@storybook/addon-storysource": "^8.6.12",
|
||||
"@storybook/manager-api": "^8.6.12",
|
||||
"@storybook/react": "^8.6.12",
|
||||
"@storybook/react-vite": "^8.6.12",
|
||||
"@storybook/theming": "^8.6.12",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@eslint/js": "^9.31.0",
|
||||
"@storybook/addon-docs": "^9.0.17",
|
||||
"@storybook/addon-links": "^9.0.17",
|
||||
"@storybook/react-vite": "^9.0.17",
|
||||
"@types/node": "^22.15.1",
|
||||
"@types/react": "^18.3.11",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^8.37.0",
|
||||
"@typescript-eslint/parser": "^8.37.0",
|
||||
"@vitejs/plugin-react-swc": "^3.9.0",
|
||||
"@vitest/coverage-v8": "^3.1.2",
|
||||
"@vitest/ui": "^3.1.2",
|
||||
"concurrently": "^9.1.2",
|
||||
"csstype": "^3.1.3",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.1",
|
||||
"eslint-plugin-i18next": "^6.1.1",
|
||||
"eslint-plugin-path": "^1.3.0",
|
||||
"knip": "^5.50.5",
|
||||
"eslint": "^9.31.0",
|
||||
"eslint-plugin-i18next": "^6.1.2",
|
||||
"eslint-plugin-import": "^2.29.1",
|
||||
"eslint-plugin-path": "^2.0.3",
|
||||
"eslint-plugin-react": "^7.33.2",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.5",
|
||||
"eslint-plugin-simple-import-sort": "^12.0.0",
|
||||
"eslint-plugin-storybook": "^9.0.17",
|
||||
"eslint-plugin-unused-imports": "^4.1.4",
|
||||
"globals": "^16.3.0",
|
||||
"knip": "^5.61.3",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^7.6.1",
|
||||
"prettier": "^3.5.3",
|
||||
"rollup-plugin-visualizer": "^5.14.0",
|
||||
"storybook": "^8.6.12",
|
||||
"rollup-plugin-visualizer": "^6.0.3",
|
||||
"storybook": "^9.0.17",
|
||||
"tsafe": "^1.8.5",
|
||||
"type-fest": "^4.40.0",
|
||||
"typescript": "^5.8.3",
|
||||
"vite": "^6.3.3",
|
||||
"vite": "^7.0.5",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.2",
|
||||
"vite-plugin-dts": "^4.5.3",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
@@ -162,7 +155,7 @@
|
||||
"vitest": "^3.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"pnpm": "8"
|
||||
"pnpm": "10"
|
||||
},
|
||||
"packageManager": "pnpm@8.15.9+sha512.499434c9d8fdd1a2794ebf4552b3b25c0a633abcee5bb15e7b5de90f32f47b513aca98cd5cfd001c31f0db454bc3804edccd578501e4ca293a6816166bbd9f81"
|
||||
"packageManager": "pnpm@10.12.4"
|
||||
}
|
||||
|
||||
12664
invokeai/frontend/web/pnpm-lock.yaml
generated
12664
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
3
invokeai/frontend/web/pnpm-workspace.yaml
Normal file
3
invokeai/frontend/web/pnpm-workspace.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
onlyBuiltDependencies:
|
||||
- '@swc/core'
|
||||
- esbuild
|
||||
@@ -711,7 +711,8 @@
|
||||
"gaussianBlur": "Gaußsche Unschärfe",
|
||||
"sendToUpscale": "An Hochskalieren senden",
|
||||
"useCpuNoise": "CPU-Rauschen verwenden",
|
||||
"sendToCanvas": "An Leinwand senden"
|
||||
"sendToCanvas": "An Leinwand senden",
|
||||
"disabledNoRasterContent": "Deaktiviert (kein Rasterinhalt)"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Zwischenbilder anzeigen",
|
||||
@@ -789,7 +790,10 @@
|
||||
"pasteSuccess": "Eingefügt in {{destination}}",
|
||||
"pasteFailed": "Einfügen fehlgeschlagen",
|
||||
"unableToCopy": "Kopieren nicht möglich",
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte"
|
||||
"unableToCopyDesc_theseSteps": "diese Schritte",
|
||||
"noRasterLayers": "Keine Rasterebenen gefunden",
|
||||
"noActiveRasterLayers": "Keine aktiven Rasterebenen",
|
||||
"noVisibleRasterLayers": "Keine sichtbaren Rasterebenen"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Bild hochladen",
|
||||
@@ -847,7 +851,10 @@
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners",
|
||||
"uncategorizedImages": "Nicht kategorisierte Bilder",
|
||||
"deleteAllUncategorizedImages": "Alle nicht kategorisierten Bilder löschen",
|
||||
"deletedImagesCannotBeRestored": "Gelöschte Bilder können nicht wiederhergestellt werden."
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -1194,6 +1201,9 @@
|
||||
"Die Kantengröße des Kohärenzdurchlaufs."
|
||||
],
|
||||
"heading": "Kantengröße"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "Rasterebene"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1431,7 +1441,10 @@
|
||||
"autoLayout": "Auto Layout",
|
||||
"copyShareLink": "Teilen-Link kopieren",
|
||||
"download": "Herunterladen",
|
||||
"convertGraph": "Graph konvertieren"
|
||||
"convertGraph": "Graph konvertieren",
|
||||
"filterByTags": "Nach Tags filtern",
|
||||
"yourWorkflows": "Ihre Arbeitsabläufe",
|
||||
"recentlyOpened": "Kürzlich geöffnet"
|
||||
},
|
||||
"sdxl": {
|
||||
"concatPromptStyle": "Verknüpfen von Prompt & Stil",
|
||||
@@ -1444,7 +1457,15 @@
|
||||
"prompt": {
|
||||
"noMatchingTriggers": "Keine passenden Trigger",
|
||||
"addPromptTrigger": "Prompt-Trigger hinzufügen",
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen"
|
||||
"compatibleEmbeddings": "Kompatible Einbettungen",
|
||||
"replace": "Ersetzen",
|
||||
"insert": "Einfügen",
|
||||
"discard": "Verwerfen",
|
||||
"generateFromImage": "Prompt aus Bild generieren",
|
||||
"expandCurrentPrompt": "Aktuelle Prompt erweitern",
|
||||
"uploadImageForPromptGeneration": "Bild zur Prompt-Generierung hochladen",
|
||||
"expandingPrompt": "Prompt wird erweitert...",
|
||||
"resultTitle": "Prompt-Erweiterung abgeschlossen"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -1573,30 +1594,30 @@
|
||||
"newGlobalReferenceImage": "Neues globales Referenzbild",
|
||||
"newRegionalReferenceImage": "Neues regionales Referenzbild",
|
||||
"newControlLayer": "Neue Kontroll-Ebene",
|
||||
"newRasterLayer": "Neue Raster-Ebene"
|
||||
"newRasterLayer": "Neue Rasterebene"
|
||||
},
|
||||
"rectangle": "Rechteck",
|
||||
"saveCanvasToGallery": "Leinwand in Galerie speichern",
|
||||
"newRasterLayerError": "Problem beim Erstellen einer Raster-Ebene",
|
||||
"newRasterLayerError": "Problem beim Erstellen einer Rasterebene",
|
||||
"saveLayerToAssets": "Ebene in Galerie speichern",
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
"rasterLayer": "Rasterebene",
|
||||
"rasterLayers_withCount_visible": "Rasterebenen ({{count}})",
|
||||
"controlLayers_withCount_visible": "Kontroll-Ebenen ({{count}})",
|
||||
"deleteSelected": "Ausgewählte löschen",
|
||||
"newRegionalReferenceImageError": "Problem beim Erstellen eines regionalen Referenzbilds",
|
||||
"newControlLayerOk": "Kontroll-Ebene erstellt",
|
||||
"newControlLayerError": "Problem beim Erstellen einer Kontroll-Ebene",
|
||||
"newRasterLayerOk": "Raster-Layer erstellt",
|
||||
"newRasterLayerOk": "Rasterebene erstellt",
|
||||
"moveToFront": "Nach vorne bringen",
|
||||
"copyToClipboard": "In die Zwischenablage kopieren",
|
||||
"controlLayers_withCount_hidden": "Kontroll-Ebenen ({{count}} ausgeblendet)",
|
||||
"clearCaches": "Cache leeren",
|
||||
"controlLayer": "Kontroll-Ebene",
|
||||
"rasterLayers_withCount_hidden": "Raster-Ebenen ({{count}} ausgeblendet)",
|
||||
"rasterLayers_withCount_hidden": "Rasterebenen ({{count}} ausgeblendet)",
|
||||
"transparency": "Transparenz",
|
||||
"canvas": "Leinwand",
|
||||
"global": "Global",
|
||||
@@ -1682,7 +1703,14 @@
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
},
|
||||
"bookmark": "Lesezeichen für Schnell-Umschalten"
|
||||
"bookmark": "Lesezeichen für Schnell-Umschalten",
|
||||
"asRasterLayer": "Als $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Als $t(controlLayers.rasterLayer) (Größe anpassen)",
|
||||
"rasterLayer_withCount_one": "$t(controlLayers.rasterLayer)",
|
||||
"rasterLayer_withCount_other": "Rasterebenen",
|
||||
"newRasterLayer": "Neue $t(controlLayers.rasterLayer)",
|
||||
"showNonRasterLayers": "Nicht-Rasterebenen anzeigen (Umschalt+H)",
|
||||
"hideNonRasterLayers": "Nicht-Rasterebenen ausblenden (Umschalt+H)"
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -225,7 +225,16 @@
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Add Prompt Trigger",
|
||||
"compatibleEmbeddings": "Compatible Embeddings",
|
||||
"noMatchingTriggers": "No matching triggers"
|
||||
"noMatchingTriggers": "No matching triggers",
|
||||
"generateFromImage": "Generate prompt from image",
|
||||
"expandCurrentPrompt": "Expand Current Prompt",
|
||||
"uploadImageForPromptGeneration": "Upload Image for Prompt Generation",
|
||||
"expandingPrompt": "Expanding prompt...",
|
||||
"resultTitle": "Prompt Expansion Complete",
|
||||
"resultSubtitle": "Choose how to handle the expanded prompt:",
|
||||
"replace": "Replace",
|
||||
"insert": "Insert",
|
||||
"discard": "Discard"
|
||||
},
|
||||
"queue": {
|
||||
"queue": "Queue",
|
||||
@@ -244,6 +253,7 @@
|
||||
"cancel": "Cancel",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
|
||||
"cancelAllExceptCurrent": "Cancel All Except Current",
|
||||
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
|
||||
"cancelTooltip": "Cancel Current Item",
|
||||
"cancelSucceeded": "Item Canceled",
|
||||
@@ -264,7 +274,7 @@
|
||||
"retryItem": "Retry Item",
|
||||
"cancelBatchSucceeded": "Batch Canceled",
|
||||
"cancelBatchFailed": "Problem Canceling Batch",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled and the Canvas Staging Area will be reset.",
|
||||
"clearQueueAlertDialog2": "Are you sure you want to clear the queue?",
|
||||
"current": "Current",
|
||||
"next": "Next",
|
||||
@@ -335,14 +345,14 @@
|
||||
"images": "Images",
|
||||
"assets": "Assets",
|
||||
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
|
||||
"assetsTab": "Files you’ve uploaded for use in your projects.",
|
||||
"assetsTab": "Files you've uploaded for use in your projects.",
|
||||
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"boardsSettings": "Boards Settings",
|
||||
"copy": "Copy",
|
||||
"currentlyInUse": "This image is currently in use in the following features:",
|
||||
"drop": "Drop",
|
||||
"dropOrUpload": "$t(gallery.drop) or Upload",
|
||||
"dropOrUpload": "Drop or Upload",
|
||||
"dropToUpload": "$t(gallery.drop) to Upload",
|
||||
"deleteImage_one": "Delete Image",
|
||||
"deleteImage_other": "Delete {{count}} Images",
|
||||
@@ -357,7 +367,7 @@
|
||||
"gallerySettings": "Gallery Settings",
|
||||
"go": "Go",
|
||||
"image": "image",
|
||||
"imagesTab": "Images you’ve created and saved within Invoke.",
|
||||
"imagesTab": "Images you've created and saved within Invoke.",
|
||||
"imagesSettings": "Gallery Images Settings",
|
||||
"jump": "Jump",
|
||||
"loading": "Loading",
|
||||
@@ -396,7 +406,8 @@
|
||||
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit.",
|
||||
"openViewer": "Open Viewer",
|
||||
"closeViewer": "Close Viewer",
|
||||
"move": "Move"
|
||||
"move": "Move",
|
||||
"useForPromptGeneration": "Use for Prompt Generation"
|
||||
},
|
||||
"hotkeys": {
|
||||
"hotkeys": "Hotkeys",
|
||||
@@ -460,6 +471,11 @@
|
||||
"togglePanels": {
|
||||
"title": "Toggle Panels",
|
||||
"desc": "Show or hide both left and right panels at once."
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Select the Generate Tab",
|
||||
"desc": "Selects the Generate tab.",
|
||||
"key": "1"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
@@ -564,6 +580,10 @@
|
||||
"title": "Transform",
|
||||
"desc": "Transform the selected layer."
|
||||
},
|
||||
"invertMask": {
|
||||
"title": "Invert Mask",
|
||||
"desc": "Invert the selected inpaint mask, creating a new mask with opposite transparency."
|
||||
},
|
||||
"applyFilter": {
|
||||
"title": "Apply Filter",
|
||||
"desc": "Apply the pending filter to the selected layer."
|
||||
@@ -579,6 +599,30 @@
|
||||
"cancelTransform": {
|
||||
"title": "Cancel Transform",
|
||||
"desc": "Cancel the pending transform."
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Behavior",
|
||||
"display": "Display",
|
||||
"grid": "Grid",
|
||||
"debug": "Debug"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "Toggle Non-Raster Layers",
|
||||
"desc": "Show or hide all non-raster layer categories (Control Layers, Inpaint Masks, Regional Guidance)."
|
||||
},
|
||||
"fitBboxToMasks": {
|
||||
"title": "Fit Bbox To Masks",
|
||||
"desc": "Automatically adjust the generation bounding box to fit visible inpaint masks"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Apply Segment Anything",
|
||||
"desc": "Apply the current Segment Anything mask.",
|
||||
"key": "enter"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Cancel Segment Anything",
|
||||
"desc": "Cancel the current Segment Anything operation.",
|
||||
"key": "esc"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -708,6 +752,10 @@
|
||||
"deleteSelection": {
|
||||
"title": "Delete",
|
||||
"desc": "Delete all selected images. By default, you will be prompted to confirm deletion. If the images are currently in use in the app, you will be warned."
|
||||
},
|
||||
"starImage": {
|
||||
"title": "Star/Unstar Image",
|
||||
"desc": "Star or unstar the selected image."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -742,7 +790,7 @@
|
||||
"vae": "VAE",
|
||||
"width": "Width",
|
||||
"workflow": "Workflow",
|
||||
"canvasV2Metadata": "Canvas"
|
||||
"canvasV2Metadata": "Canvas Layers"
|
||||
},
|
||||
"modelManager": {
|
||||
"active": "active",
|
||||
@@ -763,7 +811,7 @@
|
||||
"convertToDiffusers": "Convert To Diffusers",
|
||||
"convertToDiffusersHelpText1": "This model will be converted to the 🧨 Diffusers format.",
|
||||
"convertToDiffusersHelpText2": "This process will replace your Model Manager entry with the Diffusers version of the same model.",
|
||||
"convertToDiffusersHelpText3": "Your checkpoint file on disk WILL be deleted if it is in InvokeAI root folder. If it is in a custom location, then it WILL NOT be deleted.",
|
||||
"convertToDiffusersHelpText3": "Your checkpoint file on disk WILL be deleted if it is in the InvokeAI root folder. If it is in a custom location, then it WILL NOT be deleted.",
|
||||
"convertToDiffusersHelpText4": "This is a one time process only. It might take around 30s-60s depending on the specifications of your computer.",
|
||||
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 2GB-7GB in size.",
|
||||
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
||||
@@ -806,7 +854,11 @@
|
||||
"urlUnauthorizedErrorMessage": "You may need to configure an API token to access this model.",
|
||||
"urlUnauthorizedErrorMessage2": "Learn how here.",
|
||||
"imageEncoderModelId": "Image Encoder Model ID",
|
||||
"includesNModels": "Includes {{n}} models and their dependencies",
|
||||
"installedModelsCount": "{{installed}} of {{total}} models installed.",
|
||||
"includesNModels": "Includes {{n}} models and their dependencies.",
|
||||
"allNModelsInstalled": "All {{count}} models installed",
|
||||
"nToInstall": "{{count}} to install",
|
||||
"nAlreadyInstalled": "{{count}} already installed",
|
||||
"installQueue": "Install Queue",
|
||||
"inplaceInstall": "In-place install",
|
||||
"inplaceInstallDesc": "Install models without copying the files. When using the model, it will be loaded from its this location. If disabled, the model file(s) will be copied into the Invoke-managed models directory during installation.",
|
||||
@@ -869,6 +921,25 @@
|
||||
"starterBundleHelpText": "Easily install all models needed to get started with a base model, including a main model, controlnets, IP adapters, and more. Selecting a bundle will skip any models that you already have installed.",
|
||||
"starterModels": "Starter Models",
|
||||
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
|
||||
"bundleAlreadyInstalled": "Bundle already installed",
|
||||
"bundleAlreadyInstalledDesc": "All models in the {{bundleName}} bundle are already installed.",
|
||||
"launchpadTab": "Launchpad",
|
||||
"launchpad": {
|
||||
"welcome": "Welcome to Model Management",
|
||||
"description": "Invoke requires models to be installed to utilize most features of the platform. Choose from manual installation options or explore curated starter models.",
|
||||
"manualInstall": "Manual Installation",
|
||||
"urlDescription": "Install models from a URL or local file path. Perfect for specific models you want to add.",
|
||||
"huggingFaceDescription": "Browse and install models directly from HuggingFace repositories.",
|
||||
"scanFolderDescription": "Scan a local folder to automatically detect and install models.",
|
||||
"recommendedModels": "Recommended Models",
|
||||
"exploreStarter": "Or browse all available starter models",
|
||||
"quickStart": "Quick Start Bundles",
|
||||
"bundleDescription": "Each bundle includes essential models for each model family and curated base models to get started.",
|
||||
"browseAll": "Or browse all available models:",
|
||||
"stableDiffusion15": "Stable Diffusion 1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
},
|
||||
"controlLora": "Control LoRA",
|
||||
"llavaOnevision": "LLaVA OneVision",
|
||||
"syncModels": "Sync Models",
|
||||
@@ -905,7 +976,8 @@
|
||||
"selectModel": "Select a Model",
|
||||
"noLoRAsInstalled": "No LoRAs installed",
|
||||
"noRefinerModelsInstalled": "No SDXL Refiner models installed",
|
||||
"defaultVAE": "Default VAE"
|
||||
"defaultVAE": "Default VAE",
|
||||
"noCompatibleLoRAs": "No Compatible LoRAs"
|
||||
},
|
||||
"nodes": {
|
||||
"arithmeticSequence": "Arithmetic Sequence",
|
||||
@@ -1081,7 +1153,23 @@
|
||||
"addItem": "Add Item",
|
||||
"generateValues": "Generate Values",
|
||||
"floatRangeGenerator": "Float Range Generator",
|
||||
"integerRangeGenerator": "Integer Range Generator"
|
||||
"integerRangeGenerator": "Integer Range Generator",
|
||||
"layout": {
|
||||
"autoLayout": "Auto Layout",
|
||||
"layeringStrategy": "Layering Strategy",
|
||||
"networkSimplex": "Network Simplex",
|
||||
"longestPath": "Longest Path",
|
||||
"nodeSpacing": "Node Spacing",
|
||||
"layerSpacing": "Layer Spacing",
|
||||
"layoutDirection": "Layout Direction",
|
||||
"layoutDirectionRight": "Right",
|
||||
"layoutDirectionDown": "Down",
|
||||
"alignment": "Node Alignment",
|
||||
"alignmentUL": "Top Left",
|
||||
"alignmentDL": "Bottom Left",
|
||||
"alignmentUR": "Top Right",
|
||||
"alignmentDR": "Bottom Right"
|
||||
}
|
||||
},
|
||||
"parameters": {
|
||||
"aspect": "Aspect",
|
||||
@@ -1147,6 +1235,7 @@
|
||||
"modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}",
|
||||
"fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time",
|
||||
"fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
@@ -1154,7 +1243,9 @@
|
||||
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected"
|
||||
"systemDisconnected": "System disconnected",
|
||||
"promptExpansionPending": "Prompt expansion in progress",
|
||||
"promptExpansionResultPending": "Please accept or discard your prompt expansion result"
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
@@ -1312,6 +1403,21 @@
|
||||
"problemCopyingLayer": "Unable to Copy Layer",
|
||||
"problemSavingLayer": "Unable to Save Layer",
|
||||
"problemDownloadingImage": "Unable to Download Image",
|
||||
"noRasterLayers": "No Raster Layers Found",
|
||||
"noRasterLayersDesc": "Create at least one raster layer to export to PSD",
|
||||
"noActiveRasterLayers": "No Active Raster Layers",
|
||||
"noActiveRasterLayersDesc": "Enable at least one raster layer to export to PSD",
|
||||
"noVisibleRasterLayers": "No Visible Raster Layers",
|
||||
"noVisibleRasterLayersDesc": "Enable at least one raster layer to export to PSD",
|
||||
"invalidCanvasDimensions": "Invalid Canvas Dimensions",
|
||||
"canvasTooLarge": "Canvas Too Large",
|
||||
"canvasTooLargeDesc": "Canvas dimensions exceed the maximum allowed size for PSD export. Reduce the total width and height of the canvas of the canvas and try again.",
|
||||
"failedToProcessLayers": "Failed to Process Layers",
|
||||
"psdExportSuccess": "PSD Export Complete",
|
||||
"psdExportSuccessDesc": "Successfully exported {{count}} layers to PSD file",
|
||||
"problemExportingPSD": "Problem Exporting PSD",
|
||||
"canvasManagerNotAvailable": "Canvas Manager Not Available",
|
||||
"noValidLayerAdapters": "No Valid Layer Adapters Found",
|
||||
"pasteSuccess": "Pasted to {{destination}}",
|
||||
"pasteFailed": "Paste Failed",
|
||||
"prunedQueue": "Pruned Queue",
|
||||
@@ -1337,9 +1443,23 @@
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supports Text to Image and Image to Image only. Use other models Inpainting and Outpainting tasks.",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext does not support generation from images placed on the canvas. Re-try using the Reference Image section and disable any Raster Layers.",
|
||||
"problemUnpublishingWorkflow": "Problem Unpublishing Workflow",
|
||||
"problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.",
|
||||
"workflowUnpublished": "Workflow Unpublished"
|
||||
"workflowUnpublished": "Workflow Unpublished",
|
||||
"sentToCanvas": "Sent to Canvas",
|
||||
"sentToUpscale": "Sent to Upscale",
|
||||
"promptGenerationStarted": "Prompt generation started",
|
||||
"uploadAndPromptGenerationFailed": "Failed to upload image and generate prompt",
|
||||
"promptExpansionFailed": "We ran into an issue. Please try prompt expansion again.",
|
||||
"maskInverted": "Mask Inverted",
|
||||
"maskInvertFailed": "Failed to Invert Mask",
|
||||
"noVisibleMasks": "No Visible Masks",
|
||||
"noVisibleMasksDesc": "Create or enable at least one inpaint mask to invert",
|
||||
"noInpaintMaskSelected": "No Inpaint Mask Selected",
|
||||
"noInpaintMaskSelectedDesc": "Select an inpaint mask to invert",
|
||||
"invalidBbox": "Invalid Bounding Box",
|
||||
"invalidBboxDesc": "The bounding box has no valid dimensions"
|
||||
},
|
||||
"popovers": {
|
||||
"clipSkip": {
|
||||
@@ -1707,6 +1827,20 @@
|
||||
"Structure controls how closely the output image will keep to the layout of the original. Low structure allows major changes, while high structure strictly maintains the original composition and layout."
|
||||
]
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Tile Size",
|
||||
"paragraphs": [
|
||||
"Controls the size of tiles used during the upscaling process. Larger tiles use more memory but may produce better results.",
|
||||
"SD1.5 models default to 768, while SDXL models default to 1024. Reduce tile size if you encounter memory issues."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Tile Overlap",
|
||||
"paragraphs": [
|
||||
"Controls the overlap between adjacent tiles during upscaling. Higher overlap values help reduce visible seams between tiles but use more memory.",
|
||||
"The default value of 128 works well for most cases, but you can adjust based on your specific needs and memory constraints."
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "Non-Commercial License",
|
||||
"paragraphs": [
|
||||
@@ -1858,10 +1992,12 @@
|
||||
"canvas": "Canvas",
|
||||
"bookmark": "Bookmark for Quick Switch",
|
||||
"fitBboxToLayers": "Fit Bbox To Layers",
|
||||
"fitBboxToMasks": "Fit Bbox To Masks",
|
||||
"removeBookmark": "Remove Bookmark",
|
||||
"saveCanvasToGallery": "Save Canvas to Gallery",
|
||||
"saveBboxToGallery": "Save Bbox to Gallery",
|
||||
"saveLayerToAssets": "Save Layer to Assets",
|
||||
"exportCanvasToPSD": "Export Canvas to PSD",
|
||||
"cropLayerToBbox": "Crop Layer to Bbox",
|
||||
"savedToGalleryOk": "Saved to Gallery",
|
||||
"savedToGalleryError": "Error saving to gallery",
|
||||
@@ -1887,6 +2023,7 @@
|
||||
"mergingLayers": "Merging layers",
|
||||
"clearHistory": "Clear History",
|
||||
"bboxOverlay": "Show Bbox Overlay",
|
||||
"ruleOfThirds": "Show Rule of Thirds",
|
||||
"newSession": "New Session",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
@@ -1920,6 +2057,7 @@
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"invertMask": "Invert Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"referenceImageRegional": "Reference Image (Regional)",
|
||||
"referenceImageGlobal": "Reference Image (Global)",
|
||||
@@ -1992,6 +2130,8 @@
|
||||
"disableTransparencyEffect": "Disable Transparency Effect",
|
||||
"hidingType": "Hiding {{type}}",
|
||||
"showingType": "Showing {{type}}",
|
||||
"showNonRasterLayers": "Show Non-Raster Layers (Shift+H)",
|
||||
"hideNonRasterLayers": "Hide Non-Raster Layers (Shift+H)",
|
||||
"dynamicGrid": "Dynamic Grid",
|
||||
"logDebugInfo": "Log Debug Info",
|
||||
"locked": "Locked",
|
||||
@@ -2014,8 +2154,10 @@
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Upload an image</UploadButton>, drag an image from the gallery onto this Reference Image or <PullBboxButton>pull the bounding box into this Reference Image</PullBboxButton> to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the gallery onto this Reference Image to get started.",
|
||||
"uploadOrDragAnImage": "Drag an image from the gallery or <UploadButton>upload an image</UploadButton>.",
|
||||
"imageNoise": "Image Noise",
|
||||
"denoiseLimit": "Denoise Limit",
|
||||
"warnings": {
|
||||
@@ -2256,6 +2398,10 @@
|
||||
"label": "Preserve Masked Region",
|
||||
"alert": "Preserving Masked Region"
|
||||
},
|
||||
"saveAllImagesToGallery": {
|
||||
"label": "Send New Generations to Gallery",
|
||||
"alert": "Sending new generations to Gallery, bypassing Canvas"
|
||||
},
|
||||
"isolatedStagingPreview": "Isolated Staging Preview",
|
||||
"isolatedPreview": "Isolated Preview",
|
||||
"isolatedLayerPreview": "Isolated Layer Preview",
|
||||
@@ -2284,6 +2430,7 @@
|
||||
"newGlobalReferenceImage": "New Global Reference Image",
|
||||
"newRegionalReferenceImage": "New Regional Reference Image",
|
||||
"newControlLayer": "New Control Layer",
|
||||
"newResizedControlLayer": "New Resized Control Layer",
|
||||
"newRasterLayer": "New Raster Layer",
|
||||
"newInpaintMask": "New Inpaint Mask",
|
||||
"newRegionalGuidance": "New Regional Guidance",
|
||||
@@ -2301,6 +2448,11 @@
|
||||
"saveToGallery": "Save To Gallery",
|
||||
"showResultsOn": "Showing Results",
|
||||
"showResultsOff": "Hiding Results"
|
||||
},
|
||||
"autoSwitch": {
|
||||
"off": "Off",
|
||||
"switchOnStart": "On Start",
|
||||
"switchOnFinish": "On Finish"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -2312,6 +2464,9 @@
|
||||
"upscaleModel": "Upscale Model",
|
||||
"postProcessingModel": "Post-Processing Model",
|
||||
"scale": "Scale",
|
||||
"tileControl": "Tile Control",
|
||||
"tileSize": "Tile Size",
|
||||
"tileOverlap": "Tile Overlap",
|
||||
"postProcessingMissingModelWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install a post-processing (image to image) model.",
|
||||
"missingModelsWarning": "Visit the <LinkComponent>Model Manager</LinkComponent> to install the required models:",
|
||||
"mainModelDesc": "Main model (SD1.5 or SDXL architecture)",
|
||||
@@ -2367,7 +2522,8 @@
|
||||
"uploadImage": "Upload Image",
|
||||
"useForTemplate": "Use For Prompt Template",
|
||||
"viewList": "View Template List",
|
||||
"viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box."
|
||||
"viewModeTooltip": "This is how your prompt will look with your currently selected template. To edit your prompt, click anywhere in the text box.",
|
||||
"togglePromptPreviews": "Toggle Prompt Previews"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invite Teammates",
|
||||
@@ -2387,6 +2543,55 @@
|
||||
"upscaling": "Upscaling",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Gallery"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Go deep with Workflows.",
|
||||
"upscalingTitle": "Upscale and add detail.",
|
||||
"canvasTitle": "Edit and refine on Canvas.",
|
||||
"generateTitle": "Generate images from text prompts.",
|
||||
"modelGuideText": "Want to learn what prompts work best for each model?",
|
||||
"modelGuideLink": "Check out our Model Guide.",
|
||||
"workflows": {
|
||||
"description": "Workflows are reusable templates that automate image generation tasks, allowing you to quickly perform complex operations and get consistent results.",
|
||||
"learnMoreLink": "Learn more about creating workflows",
|
||||
"browseTemplates": {
|
||||
"title": "Browse Workflow Templates",
|
||||
"description": "Choose from pre-built workflows for common tasks"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Create a new Workflow",
|
||||
"description": "Start a new workflow from scratch"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Load workflow from file",
|
||||
"description": "Upload a workflow to start with an existing setup"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Upload Image to Upscale",
|
||||
"description": "Click or drag an image to upscale (JPG, PNG, WebP up to 100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Replace Current Image",
|
||||
"description": "Click or drag a new image to replace the current one"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Image Ready",
|
||||
"description": "Press Invoke to begin upscaling"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Ready to upscale!",
|
||||
"description": "Configure your settings below, then click the Invoke button to begin upscaling your image."
|
||||
},
|
||||
"upscaleModel": "Upscale Model",
|
||||
"model": "Model",
|
||||
"scale": "Scale",
|
||||
"helpText": {
|
||||
"promptAdvice": "When upscaling, use a prompt that describes the medium and style. Avoid describing specific content details in the image.",
|
||||
"styleAdvice": "Upscaling works best with the general style of your image."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"system": {
|
||||
@@ -2426,8 +2631,10 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Inpainting: Per-mask noise levels and denoise limits.",
|
||||
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
|
||||
"New setting to send all Canvas generations directly to the Gallery.",
|
||||
"New Invert Mask (Shift+V) and Fit BBox to Mask (Shift+B) capabilities.",
|
||||
"Expanded support for Model Thumbnails and configurations.",
|
||||
"Various other quality of life updates and fixes"
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
@@ -2436,62 +2643,16 @@
|
||||
"supportVideos": {
|
||||
"supportVideos": "Support Videos",
|
||||
"gettingStarted": "Getting Started",
|
||||
"controlCanvas": "Control Canvas",
|
||||
"watch": "Watch",
|
||||
"studioSessionsDesc1": "Check out the <StudioSessionsPlaylistLink /> for Invoke deep dives.",
|
||||
"studioSessionsDesc2": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
|
||||
"studioSessionsDesc": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
|
||||
"videos": {
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Creating Your First Image",
|
||||
"description": "Introduction to creating an image from scratch using Invoke's tools."
|
||||
"gettingStarted": {
|
||||
"title": "Getting Started with Invoke",
|
||||
"description": "Complete video series covering everything you need to know to get started with Invoke, from creating your first image to advanced techniques."
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Using Control Layers and Reference Guides",
|
||||
"description": "Learn how to guide your image creation with control layers and reference images."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Understanding Image-to-Image and Denoising",
|
||||
"description": "Overview of image-to-image transformations and denoising in Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Exploring AI Models and Concept Adapters",
|
||||
"description": "Dive into AI models and how to use concept adapters for creative control."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"title": "Creating and Composing on Invoke's Control Canvas",
|
||||
"description": "Learn to compose images using Invoke's control canvas."
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Upscaling",
|
||||
"description": "How to upscale images with Invoke's tools to enhance resolution."
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "How Do I Generate and Save to the Gallery?",
|
||||
"description": "Steps to generate and save images to the gallery."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "How Do I Edit on the Canvas?",
|
||||
"description": "Guide to editing images directly on the canvas."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "How Do I Do Image-to-Image Transformation?",
|
||||
"description": "Tutorial on performing image-to-image transformations in Invoke."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "How Do I Use Control Nets and Control Layers?",
|
||||
"description": "Learn to apply control layers and controlnets to your images."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "How Do I Use Global IP Adapters and Reference Images?",
|
||||
"description": "Introduction to adding reference images and global IP adapters."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "How Do I Use Inpaint Masks?",
|
||||
"description": "How to apply inpaint masks for image correction and variation."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "How Do I Outpaint?",
|
||||
"description": "Guide to outpainting beyond the original image borders."
|
||||
"studioSessions": {
|
||||
"title": "Studio Sessions",
|
||||
"description": "Deep dive sessions exploring advanced Invoke features, creative workflows, and community discussions."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2375,65 +2375,8 @@
|
||||
},
|
||||
"supportVideos": {
|
||||
"watch": "Regarder",
|
||||
"videos": {
|
||||
"upscaling": {
|
||||
"description": "Comment améliorer la résolution des images avec les outils d'Invoke pour les agrandir.",
|
||||
"title": "Upscaling"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Étapes pour générer et enregistrer des images dans la galerie.",
|
||||
"title": "Comment générer et enregistrer dans la galerie ?"
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilisation des couche de contrôle et des guides de référence",
|
||||
"description": "Apprenez à guider la création de vos images avec des couche de contrôle et des images de référence."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Plongez dans les modèles d'IA et découvrez comment utiliser les adaptateurs de concepts pour un contrôle créatif.",
|
||||
"title": "Exploration des modèles d'IA et des adaptateurs de concepts"
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Comment utiliser les réseaux de contrôle et les couches de contrôle ?",
|
||||
"description": "Apprenez à appliquer des couches de contrôle et des ControlNets à vos images."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Apprenez à composer des images en utilisant le canvas de contrôle d'Invoke.",
|
||||
"title": "Créer et composer sur le canvas de contrôle d'Invoke"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Comment puis-je modifier sur la toile ?",
|
||||
"description": "Guide pour éditer des images directement sur la toile."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "Comment effectuer une transformation d'image à image ?",
|
||||
"description": "Tutoriel sur la réalisation de transformations d'image à image dans Invoke."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Comment utiliser les IP Adapters globaux et les images de référence ?",
|
||||
"description": "Introduction à l'ajout d'images de référence et IP Adapters globaux."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Comment utiliser les masques d'inpainting ?",
|
||||
"description": "Comment appliquer des masques de retourche pour la correction et la variation d'image."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Créer votre première image",
|
||||
"description": "Introduction à la création d'une image à partir de zéro en utilisant les outils d'Invoke."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Comprendre l'Image-à-Image et le Débruitage",
|
||||
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Comment effectuer un outpainting ?",
|
||||
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
|
||||
}
|
||||
},
|
||||
"gettingStarted": "Commencer",
|
||||
"studioSessionsDesc1": "Consultez le <StudioSessionsPlaylistLink /> pour des approfondissements sur Invoke.",
|
||||
"studioSessionsDesc2": "Rejoignez notre <DiscordLink /> pour participer aux sessions en direct et poser vos questions. Les sessions sont ajoutée dans la playlist la semaine suivante.",
|
||||
"supportVideos": "Vidéos d'assistance",
|
||||
"controlCanvas": "Contrôler la toile"
|
||||
"supportVideos": "Vidéos d'assistance"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Effacer le cache du modèle",
|
||||
|
||||
@@ -152,7 +152,7 @@
|
||||
"image": "immagine",
|
||||
"drop": "Rilascia",
|
||||
"unstarImage": "Rimuovi contrassegno immagine",
|
||||
"dropOrUpload": "$t(gallery.drop) o carica",
|
||||
"dropOrUpload": "Rilascia o carica",
|
||||
"starImage": "Contrassegna l'immagine",
|
||||
"dropToUpload": "$t(gallery.drop) per aggiornare",
|
||||
"bulkDownloadRequested": "Preparazione del download",
|
||||
@@ -197,7 +197,8 @@
|
||||
"boardsSettings": "Impostazioni Bacheche",
|
||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||
"assets": "Risorse",
|
||||
"images": "Immagini"
|
||||
"images": "Immagini",
|
||||
"useForPromptGeneration": "Usa per generare il prompt"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -253,12 +254,16 @@
|
||||
"desc": "Attiva/disattiva il pannello destro."
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "Ripristina il layout del pannello",
|
||||
"desc": "Ripristina le dimensioni e il layout predefiniti dei pannelli sinistro e destro."
|
||||
"title": "Ripristina lo schema del pannello",
|
||||
"desc": "Ripristina le dimensioni e lo schema predefiniti dei pannelli sinistro e destro."
|
||||
},
|
||||
"togglePanels": {
|
||||
"title": "Attiva/disattiva i pannelli",
|
||||
"desc": "Mostra o nascondi contemporaneamente i pannelli sinistro e destro."
|
||||
},
|
||||
"selectGenerateTab": {
|
||||
"title": "Seleziona la scheda Genera",
|
||||
"desc": "Seleziona la scheda Genera."
|
||||
}
|
||||
},
|
||||
"hotkeys": "Tasti di scelta rapida",
|
||||
@@ -379,6 +384,32 @@
|
||||
"applyTransform": {
|
||||
"title": "Applica trasformazione",
|
||||
"desc": "Applica la trasformazione in sospeso al livello selezionato."
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"desc": "Mostra o nascondi tutte le categorie di livelli non raster (Livelli di controllo, Maschere di Inpaint, Guida regionale).",
|
||||
"title": "Attiva/disattiva livelli non raster"
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Comportamento",
|
||||
"display": "Mostra",
|
||||
"grid": "Griglia"
|
||||
},
|
||||
"invertMask": {
|
||||
"title": "Inverti maschera",
|
||||
"desc": "Inverte la maschera di inpaint selezionata, creando una nuova maschera con trasparenza opposta."
|
||||
},
|
||||
"fitBboxToMasks": {
|
||||
"title": "Adatta il riquadro di delimitazione alle maschere",
|
||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo alle maschere di inpaint visibili"
|
||||
},
|
||||
"applySegmentAnything": {
|
||||
"title": "Applica Segment Anything",
|
||||
"desc": "Applica la maschera Segment Anything corrente.",
|
||||
"key": "invio"
|
||||
},
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Annulla Segment Anything",
|
||||
"desc": "Annulla l'operazione Segment Anything corrente."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -508,6 +539,10 @@
|
||||
"galleryNavUpAlt": {
|
||||
"desc": "Uguale a Naviga verso l'alto, ma seleziona l'immagine da confrontare, aprendo la modalità di confronto se non è già aperta.",
|
||||
"title": "Naviga verso l'alto (Confronta immagine)"
|
||||
},
|
||||
"starImage": {
|
||||
"desc": "Aggiungi/Rimuovi contrassegno all'immagine selezionata.",
|
||||
"title": "Aggiungi / Rimuovi contrassegno immagine"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -623,7 +658,7 @@
|
||||
"installingXModels_one": "Installazione di {{count}} modello",
|
||||
"installingXModels_many": "Installazione di {{count}} modelli",
|
||||
"installingXModels_other": "Installazione di {{count}} modelli",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze",
|
||||
"includesNModels": "Include {{n}} modelli e le loro dipendenze.",
|
||||
"starterBundleHelpText": "Installa facilmente tutti i modelli necessari per iniziare con un modello base, tra cui un modello principale, controlnet, adattatori IP e altro. Selezionando un pacchetto salterai tutti i modelli che hai già installato.",
|
||||
"noDefaultSettings": "Nessuna impostazione predefinita configurata per questo modello. Visita Gestione Modelli per aggiungere impostazioni predefinite.",
|
||||
"defaultSettingsOutOfSync": "Alcune impostazioni non corrispondono a quelle predefinite del modello:",
|
||||
@@ -656,7 +691,27 @@
|
||||
"manageModels": "Gestione modelli",
|
||||
"hfTokenReset": "Ripristino del gettone HF",
|
||||
"relatedModels": "Modelli correlati",
|
||||
"showOnlyRelatedModels": "Correlati"
|
||||
"showOnlyRelatedModels": "Correlati",
|
||||
"installedModelsCount": "{{installed}} di {{total}} modelli installati.",
|
||||
"allNModelsInstalled": "Tutti i {{count}} modelli installati",
|
||||
"nToInstall": "{{count}} da installare",
|
||||
"nAlreadyInstalled": "{{count}} già installati",
|
||||
"bundleAlreadyInstalled": "Pacchetto già installato",
|
||||
"bundleAlreadyInstalledDesc": "Tutti i modelli nel pacchetto {{bundleName}} sono già installati.",
|
||||
"launchpad": {
|
||||
"description": "Per utilizzare la maggior parte delle funzionalità della piattaforma, Invoke richiede l'installazione di modelli. Scegli tra le opzioni di installazione manuale o esplora i modelli di avvio selezionati.",
|
||||
"manualInstall": "Installazione manuale",
|
||||
"urlDescription": "Installa i modelli da un URL o da un percorso file locale. Perfetto per modelli specifici che desideri aggiungere.",
|
||||
"huggingFaceDescription": "Esplora e installa i modelli direttamente dai repository di HuggingFace.",
|
||||
"scanFolderDescription": "Esegui la scansione di una cartella locale per rilevare e installare automaticamente i modelli.",
|
||||
"recommendedModels": "Modelli consigliati",
|
||||
"exploreStarter": "Oppure sfoglia tutti i modelli iniziali disponibili",
|
||||
"welcome": "Benvenuti in Gestione Modelli",
|
||||
"quickStart": "Pacchetti di avvio rapido",
|
||||
"bundleDescription": "Ogni pacchetto include modelli essenziali per ogni famiglia di modelli e modelli base selezionati per iniziare.",
|
||||
"browseAll": "Oppure scopri tutti i modelli disponibili:"
|
||||
},
|
||||
"launchpadTab": "Rampa di lancio"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -742,7 +797,10 @@
|
||||
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade."
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade.",
|
||||
"fluxKontextMultipleReferenceImages": "È possibile utilizzare solo 1 immagine di riferimento alla volta con Flux Kontext",
|
||||
"promptExpansionResultPending": "Accetta o ignora il risultato dell'espansione del prompt",
|
||||
"promptExpansionPending": "Espansione del prompt in corso"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -884,7 +942,34 @@
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting.",
|
||||
"noRasterLayers": "Nessun livello raster trovato",
|
||||
"noRasterLayersDesc": "Crea almeno un livello raster da esportare in PSD",
|
||||
"noActiveRasterLayers": "Nessun livello raster attivo",
|
||||
"noActiveRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
||||
"noVisibleRasterLayers": "Nessun livello raster visibile",
|
||||
"noVisibleRasterLayersDesc": "Abilitare almeno un livello raster da esportare in PSD",
|
||||
"invalidCanvasDimensions": "Dimensioni della tela non valide",
|
||||
"canvasTooLarge": "Tela troppo grande",
|
||||
"canvasTooLargeDesc": "Le dimensioni della tela superano le dimensioni massime consentite per l'esportazione in formato PSD. Riduci la larghezza e l'altezza totali della tela e riprova.",
|
||||
"failedToProcessLayers": "Impossibile elaborare i livelli",
|
||||
"psdExportSuccess": "Esportazione PSD completata",
|
||||
"psdExportSuccessDesc": "Esportazione riuscita di {{count}} livelli nel file PSD",
|
||||
"problemExportingPSD": "Problema durante l'esportazione PSD",
|
||||
"noValidLayerAdapters": "Nessun adattatore di livello valido trovato",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext non supporta la generazione di immagini posizionate sulla tela. Riprova utilizzando la sezione Immagine di riferimento e disattiva tutti i livelli raster.",
|
||||
"canvasManagerNotAvailable": "Gestione tela non disponibile",
|
||||
"promptExpansionFailed": "Abbiamo riscontrato un problema. Riprova a eseguire l'espansione del prompt.",
|
||||
"uploadAndPromptGenerationFailed": "Impossibile caricare l'immagine e generare il prompt",
|
||||
"promptGenerationStarted": "Generazione del prompt avviata",
|
||||
"invalidBboxDesc": "Il riquadro di delimitazione non ha dimensioni valide",
|
||||
"invalidBbox": "Riquadro di delimitazione non valido",
|
||||
"noInpaintMaskSelectedDesc": "Seleziona una maschera di inpaint da invertire",
|
||||
"noInpaintMaskSelected": "Nessuna maschera di inpaint selezionata",
|
||||
"noVisibleMasksDesc": "Crea o abilita almeno una maschera inpaint da invertire",
|
||||
"noVisibleMasks": "Nessuna maschera visibile",
|
||||
"maskInvertFailed": "Impossibile invertire la maschera",
|
||||
"maskInverted": "Maschera invertita"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1079,7 +1164,22 @@
|
||||
"missingField_withName": "Campo \"{{name}}\" mancante",
|
||||
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
|
||||
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante"
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante",
|
||||
"layout": {
|
||||
"alignmentDR": "In basso a destra",
|
||||
"autoLayout": "Schema automatico",
|
||||
"nodeSpacing": "Spaziatura nodi",
|
||||
"layerSpacing": "Spaziatura livelli",
|
||||
"layeringStrategy": "Strategia livelli",
|
||||
"longestPath": "Percorso più lungo",
|
||||
"layoutDirection": "Direzione schema",
|
||||
"layoutDirectionRight": "Orizzontale",
|
||||
"layoutDirectionDown": "Verticale",
|
||||
"alignment": "Allineamento nodi",
|
||||
"alignmentUL": "In alto a sinistra",
|
||||
"alignmentDL": "In basso a sinistra",
|
||||
"alignmentUR": "In alto a destra"
|
||||
}
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1156,7 +1256,7 @@
|
||||
"batchQueuedDesc_other": "Aggiunte {{count}} sessioni a {{direction}} della coda",
|
||||
"graphQueued": "Grafico in coda",
|
||||
"batch": "Lotto",
|
||||
"clearQueueAlertDialog": "Lo svuotamento della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati.",
|
||||
"clearQueueAlertDialog": "La cancellazione della coda annulla immediatamente tutti gli elementi in elaborazione e cancella completamente la coda. I filtri in sospeso verranno annullati e l'area di lavoro della Tela verrà reimpostata.",
|
||||
"pending": "In attesa",
|
||||
"completedIn": "Completato in",
|
||||
"resumeFailed": "Problema nel riavvio dell'elaborazione",
|
||||
@@ -1212,7 +1312,8 @@
|
||||
"retrySucceeded": "Elemento rieseguito",
|
||||
"retryItem": "Riesegui elemento",
|
||||
"retryFailed": "Problema riesecuzione elemento",
|
||||
"credits": "Crediti"
|
||||
"credits": "Crediti",
|
||||
"cancelAllExceptCurrent": "Annulla tutto tranne quello corrente"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1225,7 +1326,8 @@
|
||||
"addLora": "Aggiungi LoRA",
|
||||
"defaultVAE": "VAE predefinito",
|
||||
"concepts": "Concetti",
|
||||
"lora": "LoRA"
|
||||
"lora": "LoRA",
|
||||
"noCompatibleLoRAs": "Nessun LoRA compatibile"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Disabilita",
|
||||
@@ -1626,7 +1728,7 @@
|
||||
"structure": {
|
||||
"heading": "Struttura",
|
||||
"paragraphs": [
|
||||
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Una struttura bassa permette cambiamenti significativi, mentre una struttura alta conserva la composizione e il layout originali."
|
||||
"La struttura determina quanto l'immagine finale rispecchierà il layout dell'originale. Un valore struttura basso permette cambiamenti significativi, mentre un valore struttura alto conserva la composizione e lo schema originali."
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
@@ -1683,6 +1785,20 @@
|
||||
"paragraphs": [
|
||||
"Controlla quale area viene modificata, in base all'intensità di riduzione del rumore."
|
||||
]
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Dimensione riquadro",
|
||||
"paragraphs": [
|
||||
"Controlla la dimensione dei riquadri utilizzati durante il processo di ampliamento. Riquadri più grandi consumano più memoria, ma possono produrre risultati migliori.",
|
||||
"I modelli SD1.5 hanno un valore predefinito di 768, mentre i modelli SDXL hanno un valore predefinito di 1024. Ridurre le dimensioni dei riquadri in caso di problemi di memoria."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Sovrapposizione riquadri",
|
||||
"paragraphs": [
|
||||
"Controlla la sovrapposizione tra riquadri adiacenti durante l'ampliamento. Valori di sovrapposizione più elevati aiutano a ridurre le giunzioni visibili tra i riquadri, ma consuma più memoria.",
|
||||
"Il valore predefinito di 128 è adatto alla maggior parte dei casi, ma è possibile modificarlo in base alle proprie esigenze specifiche e ai limiti di memoria."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1730,7 +1846,7 @@
|
||||
"parameterSet": "Parametro {{parameter}} impostato",
|
||||
"parsingFailed": "Analisi non riuscita",
|
||||
"recallParameter": "Richiama {{label}}",
|
||||
"canvasV2Metadata": "Tela",
|
||||
"canvasV2Metadata": "Livelli Tela",
|
||||
"guidance": "Guida",
|
||||
"seamlessXAxis": "Asse X senza giunte",
|
||||
"seamlessYAxis": "Asse Y senza giunte",
|
||||
@@ -1778,7 +1894,7 @@
|
||||
"opened": "Aperto",
|
||||
"convertGraph": "Converti grafico",
|
||||
"loadWorkflow": "$t(common.load) Flusso di lavoro",
|
||||
"autoLayout": "Disposizione automatica",
|
||||
"autoLayout": "Schema automatico",
|
||||
"loadFromGraph": "Carica il flusso di lavoro dal grafico",
|
||||
"userWorkflows": "Flussi di lavoro utente",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
@@ -1901,7 +2017,16 @@
|
||||
"prompt": {
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente",
|
||||
"discard": "Scarta",
|
||||
"insert": "Inserisci",
|
||||
"replace": "Sostituisci",
|
||||
"resultSubtitle": "Scegli come gestire il prompt espanso:",
|
||||
"resultTitle": "Espansione del prompt completata",
|
||||
"expandingPrompt": "Espansione del prompt...",
|
||||
"uploadImageForPromptGeneration": "Carica l'immagine per la generazione del prompt",
|
||||
"expandCurrentPrompt": "Espandi il prompt corrente",
|
||||
"generateFromImage": "Genera prompt dall'immagine"
|
||||
},
|
||||
"controlLayers": {
|
||||
"addLayer": "Aggiungi Livello",
|
||||
@@ -2212,7 +2337,11 @@
|
||||
"label": "Preserva la regione mascherata"
|
||||
},
|
||||
"isolatedLayerPreview": "Anteprima livello isolato",
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione."
|
||||
"isolatedLayerPreviewDesc": "Se visualizzare solo questo livello quando si eseguono operazioni come il filtraggio o la trasformazione.",
|
||||
"saveAllImagesToGallery": {
|
||||
"alert": "Invia le nuove generazioni alla Galleria, bypassando la Tela",
|
||||
"label": "Invia le nuove generazioni alla Galleria"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
"reset": "Reimposta",
|
||||
@@ -2262,7 +2391,8 @@
|
||||
"newRegionalGuidance": "Nuova Guida Regionale",
|
||||
"copyToClipboard": "Copia negli appunti",
|
||||
"copyCanvasToClipboard": "Copia la tela negli appunti",
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti",
|
||||
"newResizedControlLayer": "Nuovo livello di controllo ridimensionato"
|
||||
},
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
||||
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
||||
@@ -2299,10 +2429,10 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla galleria su questa Immagine di riferimento.",
|
||||
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||
@@ -2352,7 +2482,20 @@
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
"imageNoise": "Rumore dell'immagine",
|
||||
"exportCanvasToPSD": "Esporta la tela in PSD",
|
||||
"ruleOfThirds": "Mostra la regola dei terzi",
|
||||
"showNonRasterLayers": "Mostra livelli non raster (Shift+H)",
|
||||
"hideNonRasterLayers": "Nascondi livelli non raster (Shift+H)",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla galleria su questa immagine di riferimento o <PullBboxButton>trascina il riquadro di delimitazione in questa immagine di riferimento</PullBboxButton> per iniziare.",
|
||||
"uploadOrDragAnImage": "Trascina un'immagine dalla galleria o <UploadButton>carica un'immagine</UploadButton>.",
|
||||
"autoSwitch": {
|
||||
"switchOnStart": "All'inizio",
|
||||
"switchOnFinish": "Alla fine",
|
||||
"off": "Spento"
|
||||
},
|
||||
"invertMask": "Inverti maschera",
|
||||
"fitBboxToMasks": "Adatta il riquadro di delimitazione alle maschere"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2366,6 +2509,55 @@
|
||||
"upscaling": "Amplia",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)",
|
||||
"gallery": "Galleria"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Approfondisci i flussi di lavoro.",
|
||||
"upscalingTitle": "Amplia e aggiungi dettagli.",
|
||||
"canvasTitle": "Modifica e perfeziona sulla tela.",
|
||||
"generateTitle": "Genera immagini da prompt testuali.",
|
||||
"modelGuideText": "Vuoi scoprire quali prompt funzionano meglio per ciascun modello?",
|
||||
"modelGuideLink": "Consulta la nostra guida ai modelli.",
|
||||
"workflows": {
|
||||
"description": "I flussi di lavoro sono modelli riutilizzabili che automatizzano le attività di generazione delle immagini, consentendo di eseguire rapidamente operazioni complesse e di ottenere risultati coerenti.",
|
||||
"learnMoreLink": "Scopri di più sulla creazione di flussi di lavoro",
|
||||
"browseTemplates": {
|
||||
"title": "Sfoglia i modelli di flusso di lavoro",
|
||||
"description": "Scegli tra flussi di lavoro predefiniti per le attività comuni"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Crea un nuovo flusso di lavoro",
|
||||
"description": "Avvia un nuovo flusso di lavoro da zero"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Carica flusso di lavoro da file",
|
||||
"description": "Carica un flusso di lavoro per iniziare con una configurazione esistente"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Carica l'immagine da ampliare",
|
||||
"description": "Fai clic o trascina un'immagine per ingrandirla (JPG, PNG, WebP fino a 100 MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Sostituisci l'immagine corrente",
|
||||
"description": "Fai clic o trascina una nuova immagine per sostituire quella corrente"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Immagine pronta",
|
||||
"description": "Premere Invoke per iniziare l'ampliamento"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Pronto per ampliare!",
|
||||
"description": "Configura le impostazioni qui sotto, quindi fai clic sul pulsante Invoke per iniziare ad ampliare l'immagine."
|
||||
},
|
||||
"upscaleModel": "Modello per l'ampliamento",
|
||||
"model": "Modello",
|
||||
"scale": "Scala",
|
||||
"helpText": {
|
||||
"promptAdvice": "Durante l'ampliamento, utilizza un prompt che descriva il mezzo e lo stile. Evita di descrivere dettagli specifici del contenuto dell'immagine.",
|
||||
"styleAdvice": "L'ampliamento funziona meglio con lo stile generale dell'immagine."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
@@ -2386,7 +2578,10 @@
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento.",
|
||||
"tileControl": "Controllo del riquadro",
|
||||
"tileSize": "Dimensione del riquadro",
|
||||
"tileOverlap": "Sovrapposizione riquadro"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2436,7 +2631,8 @@
|
||||
"positivePromptColumn": "'prompt' o 'positive_prompt'",
|
||||
"noTemplates": "Nessun modello",
|
||||
"acceptedColumnsKeys": "Colonne/chiavi accettate:",
|
||||
"promptTemplateCleared": "Modello di prompt cancellato"
|
||||
"promptTemplateCleared": "Modello di prompt cancellato",
|
||||
"togglePromptPreviews": "Attiva/disattiva le anteprime dei prompt"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"gettingStartedSeries": "Desideri maggiori informazioni? Consulta la nostra <LinkComponent>Getting Started Series</LinkComponent> per suggerimenti su come sfruttare appieno il potenziale di Invoke Studio.",
|
||||
@@ -2452,8 +2648,10 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
"Nuova impostazione per inviare tutte le generazioni della Tela direttamente alla Galleria.",
|
||||
"Nuove funzionalità Inverti maschera (Maiusc+V) e Adatta il Riquadro di delimitazione alla maschera (Maiusc+B).",
|
||||
"Supporto esteso per miniature e configurazioni dei modelli.",
|
||||
"Vari altri aggiornamenti e correzioni per la qualità della vita"
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
@@ -2485,64 +2683,18 @@
|
||||
"supportVideos": {
|
||||
"gettingStarted": "Iniziare",
|
||||
"supportVideos": "Video di supporto",
|
||||
"videos": {
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilizzo di livelli di controllo e guide di riferimento",
|
||||
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
|
||||
"title": "Creazione della tua prima immagine"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
|
||||
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
|
||||
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Come si usano le maschere Inpaint?",
|
||||
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
|
||||
"title": "Come posso eseguire l'outpainting?"
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
|
||||
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Ampliamento",
|
||||
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
|
||||
"title": "Creare e comporre sulla tela di controllo di Invoke"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Passaggi per generare e salvare le immagini nella galleria.",
|
||||
"title": "Come posso generare e salvare nella Galleria?"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Come posso apportare modifiche sulla tela?",
|
||||
"description": "Guida alla modifica delle immagini direttamente sulla tela."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
|
||||
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
|
||||
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Tela di Controllo",
|
||||
"watch": "Guarda",
|
||||
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
|
||||
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
|
||||
"studioSessionsDesc": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e porre domande. Le sessioni vengono caricate nella playlist la settimana successiva.",
|
||||
"videos": {
|
||||
"gettingStarted": {
|
||||
"title": "Introduzione a Invoke",
|
||||
"description": "Serie video completa che copre tutto ciò che devi sapere per iniziare a usare Invoke, dalla creazione della tua prima immagine alle tecniche avanzate."
|
||||
},
|
||||
"studioSessions": {
|
||||
"title": "Sessioni in studio",
|
||||
"description": "Sessioni approfondite che esplorano le funzionalità avanzate di Invoke, i flussi di lavoro creativi e le discussioni della community."
|
||||
}
|
||||
}
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Cancella la cache del modello",
|
||||
|
||||
@@ -141,7 +141,7 @@
|
||||
"loading": "ロード中",
|
||||
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
||||
"drop": "ドロップ",
|
||||
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||
"dropOrUpload": "ドロップまたはアップロード",
|
||||
"deleteImage_other": "画像 {{count}} 枚を削除",
|
||||
"deleteImagePermanent": "削除された画像は復元できません。",
|
||||
"download": "ダウンロード",
|
||||
@@ -193,7 +193,8 @@
|
||||
"images": "画像",
|
||||
"assetsTab": "プロジェクトで使用するためにアップロードされたファイル。",
|
||||
"imagesTab": "Invoke内で作成および保存された画像。",
|
||||
"assets": "アセット"
|
||||
"assets": "アセット",
|
||||
"useForPromptGeneration": "プロンプト生成に使用する"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
@@ -363,6 +364,16 @@
|
||||
"selectRectTool": {
|
||||
"title": "矩形ツール",
|
||||
"desc": "矩形ツールを選択します。"
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "行動",
|
||||
"display": "ディスプレイ",
|
||||
"grid": "グリッド",
|
||||
"debug": "デバッグ"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "非ラスターレイヤーの切り替え",
|
||||
"desc": "ラスター以外のレイヤー カテゴリ (コントロール レイヤー、インペイント マスク、地域ガイダンス) を表示または非表示にします。"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -630,7 +641,7 @@
|
||||
"restoreDefaultSettings": "クリックするとモデルのデフォルト設定が使用されます.",
|
||||
"hfTokenSaved": "ハギングフェイストークンを保存しました",
|
||||
"imageEncoderModelId": "画像エンコーダーモデルID",
|
||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます",
|
||||
"includesNModels": "{{n}}個のモデルとこれらの依存関係を含みます。",
|
||||
"learnMoreAboutSupportedModels": "私たちのサポートしているモデルについて更に学ぶ",
|
||||
"modelImageUpdateFailed": "モデル画像アップデート失敗",
|
||||
"scanFolder": "スキャンフォルダ",
|
||||
@@ -654,7 +665,30 @@
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
"showOnlyRelatedModels": "関連している",
|
||||
"installedModelsCount": "{{total}} モデルのうち {{installed}} 個がインストールされています。",
|
||||
"allNModelsInstalled": "{{count}} 個のモデルがすべてインストールされています",
|
||||
"nToInstall": "{{count}}個をインストールする",
|
||||
"nAlreadyInstalled": "{{count}} 個すでにインストールされています",
|
||||
"bundleAlreadyInstalled": "バンドルがすでにインストールされています",
|
||||
"bundleAlreadyInstalledDesc": "{{bundleName}} バンドル内のすべてのモデルはすでにインストールされています。",
|
||||
"launchpadTab": "ランチパッド",
|
||||
"launchpad": {
|
||||
"welcome": "モデルマネジメントへようこそ",
|
||||
"description": "Invoke プラットフォームのほとんどの機能を利用するには、モデルのインストールが必要です。手動インストールオプションから選択するか、厳選されたスターターモデルをご覧ください。",
|
||||
"manualInstall": "マニュアルインストール",
|
||||
"urlDescription": "URLまたはローカルファイルパスからモデルをインストールします。特定のモデルを追加したい場合に最適です。",
|
||||
"huggingFaceDescription": "HuggingFace リポジトリからモデルを直接参照してインストールします。",
|
||||
"scanFolderDescription": "ローカルフォルダをスキャンしてモデルを自動的に検出し、インストールします。",
|
||||
"recommendedModels": "推奨モデル",
|
||||
"exploreStarter": "または、利用可能なすべてのスターターモデルを参照してください",
|
||||
"quickStart": "クイックスタートバンドル",
|
||||
"bundleDescription": "各バンドルには各モデルファミリーの必須モデルと、開始するための厳選されたベースモデルが含まれています。",
|
||||
"browseAll": "または、利用可能なすべてのモデルを参照してください。",
|
||||
"stableDiffusion15": "Stable Diffusion1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -720,7 +754,10 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
|
||||
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
||||
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。"
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。",
|
||||
"fluxKontextMultipleReferenceImages": "Flux Kontext では一度に 1 つの参照画像しか使用できません",
|
||||
"promptExpansionPending": "プロンプト拡張が進行中",
|
||||
"promptExpansionResultPending": "プロンプト拡張結果を受け入れるか破棄してください"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
@@ -875,7 +912,26 @@
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください.",
|
||||
"noRasterLayers": "ラスターレイヤーが見つかりません",
|
||||
"noRasterLayersDesc": "PSDにエクスポートするには、少なくとも1つのラスターレイヤーを作成します",
|
||||
"noActiveRasterLayers": "アクティブなラスターレイヤーがありません",
|
||||
"noActiveRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
||||
"noVisibleRasterLayers": "表示されるラスター レイヤーがありません",
|
||||
"noVisibleRasterLayersDesc": "PSD にエクスポートするには、少なくとも 1 つのラスター レイヤーを有効にします",
|
||||
"invalidCanvasDimensions": "キャンバスのサイズが無効です",
|
||||
"canvasTooLarge": "キャンバスが大きすぎます",
|
||||
"canvasTooLargeDesc": "キャンバスのサイズがPSDエクスポートの最大許容サイズを超えています。キャンバス全体の幅と高さを小さくしてから、もう一度お試しください。",
|
||||
"failedToProcessLayers": "レイヤーの処理に失敗しました",
|
||||
"psdExportSuccess": "PSDエクスポート完了",
|
||||
"psdExportSuccessDesc": "{{count}} 個のレイヤーを PSD ファイルに正常にエクスポートしました",
|
||||
"problemExportingPSD": "PSD のエクスポート中に問題が発生しました",
|
||||
"canvasManagerNotAvailable": "キャンバスマネージャーは利用できません",
|
||||
"noValidLayerAdapters": "有効なレイヤーアダプタが見つかりません",
|
||||
"fluxKontextIncompatibleGenerationMode": "Flux Kontext はテキストから画像への変換のみをサポートしています。画像から画像への変換、インペインティング、アウトペインティングのタスクには他のモデルを使用してください。",
|
||||
"promptGenerationStarted": "プロンプト生成が開始されました",
|
||||
"uploadAndPromptGenerationFailed": "画像のアップロードとプロンプトの生成に失敗しました",
|
||||
"promptExpansionFailed": "プロンプト拡張に失敗しました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -1014,7 +1070,8 @@
|
||||
"lora": "LoRA",
|
||||
"defaultVAE": "デフォルトVAE",
|
||||
"noLoRAsInstalled": "インストールされているLoRAはありません",
|
||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません"
|
||||
"noRefinerModelsInstalled": "インストールされているSDXLリファイナーモデルはありません",
|
||||
"noCompatibleLoRAs": "互換性のあるLoRAはありません"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -1708,7 +1765,16 @@
|
||||
"prompt": {
|
||||
"addPromptTrigger": "プロンプトトリガーを追加",
|
||||
"compatibleEmbeddings": "互換性のある埋め込み",
|
||||
"noMatchingTriggers": "一致するトリガーがありません"
|
||||
"noMatchingTriggers": "一致するトリガーがありません",
|
||||
"generateFromImage": "画像からプロンプトを生成する",
|
||||
"expandCurrentPrompt": "現在のプロンプトを展開",
|
||||
"uploadImageForPromptGeneration": "プロンプト生成用の画像をアップロードする",
|
||||
"expandingPrompt": "プロンプトを展開しています...",
|
||||
"resultTitle": "プロンプト拡張完了",
|
||||
"resultSubtitle": "拡張プロンプトの処理方法を選択します:",
|
||||
"replace": "交換する",
|
||||
"insert": "挿入する",
|
||||
"discard": "破棄する"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -1716,7 +1782,61 @@
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"models": "モデル",
|
||||
"gallery": "ギャラリー"
|
||||
"gallery": "ギャラリー",
|
||||
"generation": "生成",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"upscaling": "アップスケーリング",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
},
|
||||
"launchpad": {
|
||||
"upscaling": {
|
||||
"model": "モデル",
|
||||
"scale": "スケール",
|
||||
"helpText": {
|
||||
"promptAdvice": "アップスケールする際は、媒体とスタイルを説明するプロンプトを使用してください。画像内の具体的なコンテンツの詳細を説明することは避けてください。",
|
||||
"styleAdvice": "アップスケーリングは、画像の全体的なスタイルに最適です。"
|
||||
},
|
||||
"uploadImage": {
|
||||
"title": "アップスケール用の画像をアップロードする",
|
||||
"description": "アップスケールするには、画像をクリックまたはドラッグします(JPG、PNG、WebP、最大100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "現在の画像を置き換える",
|
||||
"description": "新しい画像をクリックまたはドラッグして、現在の画像を置き換えます"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "画像準備完了",
|
||||
"description": "アップスケールを開始するにはInvokeを押してください"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "アップスケールの準備ができました!",
|
||||
"description": "以下の設定を構成し、「Invoke」ボタンをクリックして画像のアップスケールを開始します。"
|
||||
},
|
||||
"upscaleModel": "アップスケールモデル"
|
||||
},
|
||||
"workflowsTitle": "ワークフローを詳しく見てみましょう。",
|
||||
"upscalingTitle": "アップスケールして詳細を追加します。",
|
||||
"canvasTitle": "キャンバス上で編集および調整します。",
|
||||
"generateTitle": "テキストプロンプトから画像を生成します。",
|
||||
"modelGuideText": "各モデルに最適なプロンプトを知りたいですか?",
|
||||
"modelGuideLink": "モデルガイドをご覧ください。",
|
||||
"workflows": {
|
||||
"description": "ワークフローは、画像生成タスクを自動化する再利用可能なテンプレートであり、複雑な操作を迅速に実行して一貫した結果を得ることができます。",
|
||||
"learnMoreLink": "ワークフローの作成について詳しく見る",
|
||||
"browseTemplates": {
|
||||
"title": "ワークフローテンプレートを参照する",
|
||||
"description": "一般的なタスク用にあらかじめ構築されたワークフローから選択する"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "新規ワークフローを作成する",
|
||||
"description": "新しいワークフローをゼロから始める"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "ファイルからワークフローを読み込む",
|
||||
"description": "既存の設定から開始するためのワークフローをアップロードする"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
@@ -1732,7 +1852,16 @@
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像",
|
||||
"canvasGroup": "キャンバス"
|
||||
"canvasGroup": "キャンバス",
|
||||
"saveToGalleryGroup": "ギャラリーに保存",
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "Bボックスをギャラリーに保存",
|
||||
"newControlLayer": "新規コントロールレイヤー",
|
||||
"newRasterLayer": "新規ラスターレイヤー",
|
||||
"newInpaintMask": "新規インペイントマスク",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"copyCanvasToClipboard": "キャンバスをクリップボードにコピー",
|
||||
"copyBboxToClipboard": "Bボックスをクリップボードにコピー"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
@@ -1743,7 +1872,11 @@
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
"reset": "リセット",
|
||||
"fitMode": "フィットモード",
|
||||
"fitModeContain": "含む",
|
||||
"fitModeCover": "カバー",
|
||||
"fitModeFill": "満たす"
|
||||
},
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
@@ -1754,7 +1887,8 @@
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム",
|
||||
"bbox": "Bbox"
|
||||
"bbox": "Bbox",
|
||||
"view": "ビュー"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
@@ -1774,25 +1908,386 @@
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました",
|
||||
"controlMode": {
|
||||
"prompt": "プロンプト"
|
||||
"prompt": "プロンプト",
|
||||
"controlMode": "コントロールモード",
|
||||
"balanced": "バランス(推奨)",
|
||||
"control": "コントロール",
|
||||
"megaControl": "メガコントロール"
|
||||
},
|
||||
"prompt": "プロンプト",
|
||||
"settings": {
|
||||
"snapToGrid": {
|
||||
"off": "オフ",
|
||||
"on": "オン"
|
||||
}
|
||||
"on": "オン",
|
||||
"label": "グリッドにスナップ"
|
||||
},
|
||||
"preserveMask": {
|
||||
"label": "マスクされた領域を保持",
|
||||
"alert": "マスクされた領域の保存"
|
||||
},
|
||||
"isolatedStagingPreview": "分離されたステージングプレビュー",
|
||||
"isolatedPreview": "分離されたプレビュー",
|
||||
"isolatedLayerPreview": "分離されたレイヤーのプレビュー",
|
||||
"isolatedLayerPreviewDesc": "フィルタリングや変換などの操作を実行するときに、このレイヤーのみを表示するかどうか。",
|
||||
"invertBrushSizeScrollDirection": "ブラシサイズのスクロール反転",
|
||||
"pressureSensitivity": "圧力感度"
|
||||
},
|
||||
"filter": {
|
||||
"filter": "フィルター",
|
||||
"spandrel_filter": {
|
||||
"model": "モデル"
|
||||
"model": "モデル",
|
||||
"label": "img2imgモデル",
|
||||
"description": "選択したレイヤーでimg2imgモデルを実行します。",
|
||||
"autoScale": "オートスケール",
|
||||
"autoScaleDesc": "選択したモデルは、目標スケールに達するまで実行されます。",
|
||||
"scale": "ターゲットスケール"
|
||||
},
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"cancel": "キャンセル"
|
||||
"cancel": "キャンセル",
|
||||
"filters": "フィルター",
|
||||
"filterType": "フィルタータイプ",
|
||||
"autoProcess": "オートプロセス",
|
||||
"process": "プロセス",
|
||||
"advanced": "アドバンスド",
|
||||
"processingLayerWith": "{{type}} フィルターを使用した処理レイヤー。",
|
||||
"forMoreControl": "さらに細かく制御するには、以下の「詳細設定」をクリックしてください。",
|
||||
"canny_edge_detection": {
|
||||
"label": "キャニーエッジ検出",
|
||||
"description": "Canny エッジ検出アルゴリズムを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"low_threshold": "低閾値",
|
||||
"high_threshold": "高閾値"
|
||||
},
|
||||
"color_map": {
|
||||
"label": "カラーマップ",
|
||||
"description": "選択したレイヤーからカラーマップを作成します。",
|
||||
"tile_size": "タイルサイズ"
|
||||
},
|
||||
"content_shuffle": {
|
||||
"label": "コンテンツシャッフル",
|
||||
"description": "選択したレイヤーのコンテンツを、「液化」効果と同様にシャッフルします。",
|
||||
"scale_factor": "スケール係数"
|
||||
},
|
||||
"depth_anything_depth_estimation": {
|
||||
"label": "デプスエニシング",
|
||||
"description": "デプスエニシングモデルを使用して、選択したレイヤーから深度マップを生成します。",
|
||||
"model_size": "モデルサイズ",
|
||||
"model_size_small": "スモール",
|
||||
"model_size_small_v2": "スモールv2",
|
||||
"model_size_base": "ベース",
|
||||
"model_size_large": "ラージ"
|
||||
},
|
||||
"dw_openpose_detection": {
|
||||
"label": "DW オープンポーズ検出",
|
||||
"description": "DW Openpose モデルを使用して、選択したレイヤー内の人間のポーズを検出します。",
|
||||
"draw_hands": "手を描く",
|
||||
"draw_face": "顔を描く",
|
||||
"draw_body": "体を描く"
|
||||
},
|
||||
"hed_edge_detection": {
|
||||
"label": "HEDエッジ検出",
|
||||
"description": "HED エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"scribble": "落書き"
|
||||
},
|
||||
"lineart_anime_edge_detection": {
|
||||
"label": "線画アニメのエッジ検出",
|
||||
"description": "線画アニメエッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。"
|
||||
},
|
||||
"lineart_edge_detection": {
|
||||
"label": "線画エッジ検出",
|
||||
"description": "線画エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"coarse": "粗い"
|
||||
},
|
||||
"mediapipe_face_detection": {
|
||||
"label": "メディアパイプ顔検出",
|
||||
"description": "メディアパイプ顔検出モデルを使用して、選択したレイヤー内の顔を検出します。",
|
||||
"max_faces": "マックスフェイス",
|
||||
"min_confidence": "最小信頼度"
|
||||
},
|
||||
"mlsd_detection": {
|
||||
"label": "線分検出",
|
||||
"description": "MLSD 線分検出モデルを使用して、選択したレイヤーから線分マップを生成します。",
|
||||
"score_threshold": "スコア閾値",
|
||||
"distance_threshold": "距離閾値"
|
||||
},
|
||||
"normal_map": {
|
||||
"label": "ノーマルマップ",
|
||||
"description": "選択したレイヤーからノーマルマップを生成します。"
|
||||
},
|
||||
"pidi_edge_detection": {
|
||||
"label": "PiDiNetエッジ検出",
|
||||
"description": "PiDiNet エッジ検出モデルを使用して、選択したレイヤーからエッジ マップを生成します。",
|
||||
"scribble": "落書き",
|
||||
"quantize_edges": "エッジを量子化する"
|
||||
},
|
||||
"img_blur": {
|
||||
"label": "画像をぼかす",
|
||||
"description": "選択したレイヤーをぼかします。",
|
||||
"blur_type": "ぼかしの種類",
|
||||
"blur_radius": "半径",
|
||||
"gaussian_type": "ガウス分布",
|
||||
"box_type": "ボックス"
|
||||
},
|
||||
"img_noise": {
|
||||
"label": "ノイズ画像",
|
||||
"description": "選択したレイヤーにノイズを追加します。",
|
||||
"noise_type": "ノイズの種類",
|
||||
"noise_amount": "総計",
|
||||
"gaussian_type": "ガウス分布",
|
||||
"salt_and_pepper_type": "塩コショウ",
|
||||
"noise_color": "カラーノイズ",
|
||||
"size": "ノイズサイズ"
|
||||
},
|
||||
"adjust_image": {
|
||||
"label": "画像を調整する",
|
||||
"description": "画像の選択したチャンネルを調整します。",
|
||||
"channel": "チャンネル",
|
||||
"value_setting": "バリュー",
|
||||
"scale_values": "スケールバリュー",
|
||||
"red": "赤(RGBA)",
|
||||
"green": "緑(RGBA)",
|
||||
"blue": "青(RGBA)",
|
||||
"alpha": "アルファ(RGBA)",
|
||||
"cyan": "シアン(CMYK)",
|
||||
"magenta": "マゼンタ(CMYK)",
|
||||
"yellow": "黄色(CMYK)",
|
||||
"black": "黒(CMYK)",
|
||||
"hue": "色相(HSV)",
|
||||
"saturation": "彩度(HSV)",
|
||||
"value": "値(HSV)",
|
||||
"luminosity": "明度(LAB)",
|
||||
"a": "A(ラボ)",
|
||||
"b": "B(ラボ)",
|
||||
"y": "Y(YCbCr)",
|
||||
"cb": "Cb(YCbCr)",
|
||||
"cr": "Cr(YCbCr)"
|
||||
}
|
||||
},
|
||||
"weight": "重み"
|
||||
"weight": "重み",
|
||||
"bookmark": "クイックスイッチのブックマーク",
|
||||
"exportCanvasToPSD": "キャンバスをPSDにエクスポート",
|
||||
"savedToGalleryError": "ギャラリーへの保存中にエラーが発生しました",
|
||||
"regionCopiedToClipboard": "{{region}} をクリップボードにコピーしました",
|
||||
"copyRegionError": "{{region}} のコピー中にエラーが発生しました",
|
||||
"newGlobalReferenceImageOk": "作成されたグローバル参照画像",
|
||||
"newGlobalReferenceImageError": "グローバル参照イメージの作成中に問題が発生しました",
|
||||
"newRegionalReferenceImageOk": "地域参照画像の作成",
|
||||
"newRegionalReferenceImageError": "地域参照画像の作成中に問題が発生しました",
|
||||
"newControlLayerOk": "制御レイヤーの作成",
|
||||
"newControlLayerError": "制御層の作成中に問題が発生しました",
|
||||
"newRasterLayerOk": "ラスターレイヤーを作成しました",
|
||||
"newRasterLayerError": "ラスターレイヤーの作成中に問題が発生しました",
|
||||
"pullBboxIntoLayerOk": "Bbox をレイヤーにプル",
|
||||
"pullBboxIntoLayerError": "BBox をレイヤーにプルする際に問題が発生しました",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox が ReferenceImage にプルされました",
|
||||
"pullBboxIntoReferenceImageError": "BBox を ReferenceImage にプルする際に問題が発生しました",
|
||||
"regionIsEmpty": "選択した領域は空です",
|
||||
"mergeVisible": "マージを可視化",
|
||||
"mergeVisibleOk": "マージされたレイヤー",
|
||||
"mergeVisibleError": "レイヤーの結合エラー",
|
||||
"mergingLayers": "レイヤーのマージ",
|
||||
"clearHistory": "履歴をクリア",
|
||||
"bboxOverlay": "Bboxオーバーレイを表示",
|
||||
"ruleOfThirds": "三分割法を表示",
|
||||
"newSession": "新しいセッション",
|
||||
"clearCaches": "キャッシュをクリア",
|
||||
"recalculateRects": "長方形を再計算する",
|
||||
"clipToBbox": "ストロークをBboxにクリップ",
|
||||
"outputOnlyMaskedRegions": "生成された領域のみを出力する",
|
||||
"width": "幅",
|
||||
"autoNegative": "オートネガティブ",
|
||||
"enableAutoNegative": "オートネガティブを有効にする",
|
||||
"disableAutoNegative": "オートネガティブを無効にする",
|
||||
"deletePrompt": "プロンプトを削除",
|
||||
"deleteReferenceImage": "参照画像を削除",
|
||||
"showHUD": "HUDを表示",
|
||||
"maskFill": "マスク塗りつぶし",
|
||||
"addPositivePrompt": "$t(controlLayers.prompt) を追加します",
|
||||
"addNegativePrompt": "$t(controlLayers.negativePrompt)を追加します",
|
||||
"addReferenceImage": "$t(controlLayers.referenceImage)を追加します",
|
||||
"addImageNoise": "$t(controlLayers.imageNoise)を追加します",
|
||||
"addRasterLayer": "$t(controlLayers.rasterLayer)を追加します",
|
||||
"addControlLayer": "$t(controlLayers.controlLayer)を追加します",
|
||||
"addInpaintMask": "$t(controlLayers.inpaintMask)を追加します",
|
||||
"addRegionalGuidance": "$t(controlLayers.regionalGuidance)を追加します",
|
||||
"addGlobalReferenceImage": "$t(controlLayers.globalReferenceImage)を追加します",
|
||||
"addDenoiseLimit": "$t(controlLayers.denoiseLimit)を追加します",
|
||||
"controlLayer": "コントロールレイヤー",
|
||||
"inpaintMask": "インペイントマスク",
|
||||
"referenceImageRegional": "参考画像(地域別)",
|
||||
"referenceImageGlobal": "参考画像(グローバル)",
|
||||
"asRasterLayer": "$t(controlLayers.rasterLayer) として",
|
||||
"asRasterLayerResize": "$t(controlLayers.rasterLayer) として (リサイズ)",
|
||||
"asControlLayer": "$t(controlLayers.controlLayer) として",
|
||||
"asControlLayerResize": "$t(controlLayers.controlLayer) として (リサイズ)",
|
||||
"referenceImage": "参照画像",
|
||||
"sendingToCanvas": "キャンバスに生成をのせる",
|
||||
"sendingToGallery": "生成をギャラリーに送る",
|
||||
"sendToGallery": "ギャラリーに送る",
|
||||
"sendToGalleryDesc": "Invokeを押すとユニークな画像が生成され、ギャラリーに保存されます。",
|
||||
"sendToCanvas": "キャンバスに送る",
|
||||
"newLayerFromImage": "画像から新規レイヤー",
|
||||
"newCanvasFromImage": "画像から新規キャンバス",
|
||||
"newImg2ImgCanvasFromImage": "画像からの新規 Img2Img",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"sendToCanvasDesc": "Invokeを押すと、進行中の作品がキャンバス上にステージされます。",
|
||||
"viewProgressInViewer": "<Btn>画像ビューア</Btn>で進行状況と出力を表示します。",
|
||||
"viewProgressOnCanvas": "<Btn>キャンバス</Btn> で進行状況とステージ出力を表示します。",
|
||||
"rasterLayer_withCount_other": "ラスターレイヤー",
|
||||
"controlLayer_withCount_other": "コントロールレイヤー",
|
||||
"regionalGuidance_withCount_hidden": "地域ガイダンス({{count}} 件非表示)",
|
||||
"controlLayers_withCount_hidden": "コントロールレイヤー({{count}} 個非表示)",
|
||||
"rasterLayers_withCount_hidden": "ラスター レイヤー ({{count}} 個非表示)",
|
||||
"globalReferenceImages_withCount_hidden": "グローバル参照画像({{count}} 枚非表示)",
|
||||
"regionalGuidance_withCount_visible": "地域ガイダンス ({{count}})",
|
||||
"controlLayers_withCount_visible": "コントロールレイヤー ({{count}})",
|
||||
"rasterLayers_withCount_visible": "ラスターレイヤー({{count}})",
|
||||
"globalReferenceImages_withCount_visible": "グローバル参照画像 ({{count}})",
|
||||
"layer_other": "レイヤー",
|
||||
"layer_withCount_other": "レイヤー ({{count}})",
|
||||
"convertRasterLayerTo": "$t(controlLayers.rasterLayer) を変換する",
|
||||
"convertControlLayerTo": "$t(controlLayers.controlLayer) を変換する",
|
||||
"convertRegionalGuidanceTo": "$t(controlLayers.regionalGuidance) を変換する",
|
||||
"copyRasterLayerTo": "$t(controlLayers.rasterLayer)をコピーする",
|
||||
"copyControlLayerTo": "$t(controlLayers.controlLayer) をコピーする",
|
||||
"copyRegionalGuidanceTo": "$t(controlLayers.regionalGuidance)をコピーする",
|
||||
"newRasterLayer": "新しい $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新しい $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "新しい $t(controlLayers.inpaintMask)",
|
||||
"newRegionalGuidance": "新しい $t(controlLayers.regionalGuidance)",
|
||||
"pasteTo": "貼り付け先",
|
||||
"pasteToAssets": "アセット",
|
||||
"pasteToAssetsDesc": "アセットに貼り付け",
|
||||
"pasteToBbox": "Bボックス",
|
||||
"pasteToBboxDesc": "新しいレイヤー(Bbox内)",
|
||||
"pasteToCanvas": "キャンバス",
|
||||
"pasteToCanvasDesc": "新しいレイヤー(キャンバス内)",
|
||||
"pastedTo": "{{destination}} に貼り付けました",
|
||||
"transparency": "透明性",
|
||||
"enableTransparencyEffect": "透明効果を有効にする",
|
||||
"disableTransparencyEffect": "透明効果を無効にする",
|
||||
"hidingType": "{{type}} を非表示",
|
||||
"showingType": "{{type}}を表示",
|
||||
"showNonRasterLayers": "非ラスターレイヤーを表示 (Shift+H)",
|
||||
"hideNonRasterLayers": "非ラスターレイヤーを非表示にする (Shift+H)",
|
||||
"dynamicGrid": "ダイナミックグリッド",
|
||||
"logDebugInfo": "デバッグ情報をログに記録する",
|
||||
"locked": "ロックされています",
|
||||
"unlocked": "ロック解除",
|
||||
"deleteSelected": "選択項目を削除",
|
||||
"stagingOnCanvas": "ステージング画像",
|
||||
"replaceLayer": "レイヤーの置き換え",
|
||||
"pullBboxIntoLayer": "Bboxをレイヤーに引き込む",
|
||||
"pullBboxIntoReferenceImage": "Bboxを参照画像に取り込む",
|
||||
"showProgressOnCanvas": "キャンバスに進捗状況を表示",
|
||||
"useImage": "画像を使う",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"beginEndStepPercentShort": "開始/終了 %",
|
||||
"newGallerySession": "新しいギャラリーセッション",
|
||||
"newGallerySessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成した画像はギャラリーに送信されます。",
|
||||
"newCanvasSession": "新規キャンバスセッション",
|
||||
"newCanvasSessionDesc": "これにより、キャンバスとモデル選択以外のすべての設定がクリアされます。生成はキャンバス上でステージングされます。",
|
||||
"resetCanvasLayers": "キャンバスレイヤーをリセット",
|
||||
"resetGenerationSettings": "生成設定をリセット",
|
||||
"replaceCurrent": "現在のものを置き換える",
|
||||
"controlLayerEmptyState": "<UploadButton>画像をアップロード</UploadButton>、<GalleryButton>ギャラリー</GalleryButton>からこのレイヤーに画像をドラッグ、<PullBboxButton>境界ボックスをこのレイヤーにプル</PullBboxButton>、またはキャンバスに描画して開始します。",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグするか、<PullBboxButton>境界ボックスをこの参照画像にプル</PullBboxButton>します。",
|
||||
"referenceImageEmptyState": "開始するには、<UploadButton>画像をアップロード</UploadButton>するか、<GalleryButton>ギャラリー</GalleryButton>からこの参照画像に画像をドラッグします。",
|
||||
"uploadOrDragAnImage": "ギャラリーから画像をドラッグするか、<UploadButton>画像をアップロード</UploadButton>します。",
|
||||
"imageNoise": "画像ノイズ",
|
||||
"denoiseLimit": "ノイズ除去制限",
|
||||
"warnings": {
|
||||
"problemsFound": "問題が見つかりました",
|
||||
"unsupportedModel": "選択したベースモデルではレイヤーがサポートされていません",
|
||||
"controlAdapterNoModelSelected": "制御レイヤーモデルが選択されていません",
|
||||
"controlAdapterIncompatibleBaseModel": "互換性のない制御レイヤーベースモデル",
|
||||
"controlAdapterNoControl": "コントロールが選択/描画されていません",
|
||||
"ipAdapterNoModelSelected": "参照画像モデルが選択されていません",
|
||||
"ipAdapterIncompatibleBaseModel": "互換性のない参照画像ベースモデル",
|
||||
"ipAdapterNoImageSelected": "参照画像が選択されていません",
|
||||
"rgNoPromptsOrIPAdapters": "テキストプロンプトや参照画像はありません",
|
||||
"rgNegativePromptNotSupported": "選択されたベースモデルでは否定プロンプトはサポートされていません",
|
||||
"rgReferenceImagesNotSupported": "選択されたベースモデルでは地域の参照画像はサポートされていません",
|
||||
"rgAutoNegativeNotSupported": "選択したベースモデルでは自動否定はサポートされていません",
|
||||
"rgNoRegion": "領域が描画されていません",
|
||||
"fluxFillIncompatibleWithControlLoRA": "コントロールLoRAはFLUX Fillと互換性がありません"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "画像が見つかりません",
|
||||
"unableToLoadImage": "画像を読み込めません"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "モード",
|
||||
"full": "スタイルと構成",
|
||||
"fullDesc": "視覚スタイル (色、テクスチャ) と構成 (レイアウト、構造) を適用します。",
|
||||
"style": "スタイル(シンプル)",
|
||||
"styleDesc": "レイアウトを考慮せずに視覚スタイル(色、テクスチャ)を適用します。以前は「スタイルのみ」と呼ばれていました。",
|
||||
"composition": "構成のみ",
|
||||
"compositionDesc": "参照スタイルを無視してレイアウトと構造を複製します。",
|
||||
"styleStrong": "スタイル(ストロング)",
|
||||
"styleStrongDesc": "構成への影響をわずかに抑えて、強力なビジュアル スタイルを適用します。",
|
||||
"stylePrecise": "スタイル(正確)",
|
||||
"stylePreciseDesc": "被写体の影響を排除し、正確な視覚スタイルを適用します。"
|
||||
},
|
||||
"fluxReduxImageInfluence": {
|
||||
"imageInfluence": "イメージの影響力",
|
||||
"lowest": "最低",
|
||||
"low": "低",
|
||||
"medium": "中",
|
||||
"high": "高",
|
||||
"highest": "最高"
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "塗りつぶし色",
|
||||
"fillStyle": "塗りつぶしスタイル",
|
||||
"solid": "固体",
|
||||
"grid": "グリッド",
|
||||
"crosshatch": "クロスハッチ",
|
||||
"vertical": "垂直",
|
||||
"horizontal": "水平",
|
||||
"diagonal": "対角線"
|
||||
},
|
||||
"selectObject": {
|
||||
"selectObject": "オブジェクトを選択",
|
||||
"pointType": "ポイントタイプ",
|
||||
"invertSelection": "選択範囲を反転",
|
||||
"include": "含む",
|
||||
"exclude": "除外",
|
||||
"neutral": "ニュートラル",
|
||||
"apply": "適用",
|
||||
"reset": "リセット",
|
||||
"saveAs": "名前を付けて保存",
|
||||
"cancel": "キャンセル",
|
||||
"process": "プロセス",
|
||||
"help1": "ターゲットオブジェクトを1つ選択します。<Bold>含める</Bold>ポイントと<Bold>除外</Bold>ポイントを追加して、レイヤーのどの部分がターゲットオブジェクトの一部であるかを示します。",
|
||||
"help2": "対象オブジェクト内に<Bold>含める</Bold>ポイントを1つ選択するところから始めます。ポイントを追加して選択範囲を絞り込みます。ポイントが少ないほど、通常はより良い結果が得られます。",
|
||||
"help3": "選択を反転して、ターゲットオブジェクト以外のすべてを選択します。",
|
||||
"clickToAdd": "レイヤーをクリックしてポイントを追加します",
|
||||
"dragToMove": "ポイントをドラッグして移動します",
|
||||
"clickToRemove": "ポイントをクリックして削除します"
|
||||
},
|
||||
"HUD": {
|
||||
"bbox": "Bボックス",
|
||||
"scaledBbox": "スケールされたBボックス",
|
||||
"entityStatus": {
|
||||
"isFiltering": "{{title}} はフィルタリング中です",
|
||||
"isTransforming": "{{title}}は変化しています",
|
||||
"isLocked": "{{title}}はロックされています",
|
||||
"isHidden": "{{title}}は非表示になっています",
|
||||
"isDisabled": "{{title}}は無効です",
|
||||
"isEmpty": "{{title}} は空です"
|
||||
}
|
||||
},
|
||||
"stagingArea": {
|
||||
"accept": "受け入れる",
|
||||
"discardAll": "すべて破棄",
|
||||
"discard": "破棄する",
|
||||
"previous": "前へ",
|
||||
"next": "次へ",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"showResultsOn": "結果を表示",
|
||||
"showResultsOff": "結果を隠す"
|
||||
}
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
@@ -1810,13 +2305,56 @@
|
||||
"nameColumn": "'name'",
|
||||
"type": "タイプ",
|
||||
"private": "プライベート",
|
||||
"name": "名称"
|
||||
"name": "名称",
|
||||
"active": "アクティブ",
|
||||
"copyTemplate": "テンプレートをコピー",
|
||||
"deleteImage": "画像を削除",
|
||||
"deleteTemplate": "テンプレートを削除",
|
||||
"deleteTemplate2": "このテンプレートを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"exportPromptTemplates": "プロンプトテンプレートをエクスポートする(CSV)",
|
||||
"editTemplate": "テンプレートを編集",
|
||||
"exportDownloaded": "エクスポートをダウンロードしました",
|
||||
"exportFailed": "生成とCSVのダウンロードができません",
|
||||
"importTemplates": "プロンプトテンプレートのインポート(CSV/JSON)",
|
||||
"acceptedColumnsKeys": "受け入れられる列/キー:",
|
||||
"positivePromptColumn": "'プロンプト'または'ポジティブプロンプト'",
|
||||
"insertPlaceholder": "プレースホルダーを挿入",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"noTemplates": "テンプレートがありません",
|
||||
"noMatchingTemplates": "マッチするテンプレートがありません",
|
||||
"promptTemplatesDesc1": "プロンプトテンプレートは、プロンプトボックスに書き込むプロンプトにテキストを追加します。",
|
||||
"promptTemplatesDesc2": "テンプレート内でプロンプトを含める場所を指定するには <Pre>{{placeholder}}</Pre> のプレースホルダーの文字列を使用します。",
|
||||
"promptTemplatesDesc3": "プレースホルダーを省略すると、テンプレートはプロンプトの末尾に追加されます。",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"shared": "共有",
|
||||
"sharedTemplates": "テンプレートを共有",
|
||||
"templateDeleted": "プロンプトテンプレートを削除しました",
|
||||
"unableToDeleteTemplate": "プロンプトテンプレートを削除できません",
|
||||
"updatePromptTemplate": "プロンプトテンプレートをアップデート",
|
||||
"useForTemplate": "プロンプトテンプレートに使用する",
|
||||
"viewList": "テンプレートリストを表示",
|
||||
"viewModeTooltip": "現在選択されているテンプレートでは、プロンプトはこのようになります。プロンプトを編集するには、テキストボックス内の任意の場所をクリックしてください。",
|
||||
"togglePromptPreviews": "プロンプトプレビューを切り替える"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール",
|
||||
"scale": "スケール"
|
||||
"scale": "スケール",
|
||||
"creativity": "創造性",
|
||||
"exceedsMaxSize": "アップスケール設定が最大サイズ制限を超えています",
|
||||
"exceedsMaxSizeDetails": "アップスケールの上限は{{max Upscale Dimension}} x {{max Upscale Dimension}}ピクセルです。画像を小さくするか、スケールの選択範囲を小さくしてください。",
|
||||
"structure": "構造",
|
||||
"postProcessingMissingModelWarning": "後処理 (img2img) モデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
||||
"missingModelsWarning": "必要なモデルをインストールするには、<LinkComponent>モデル マネージャー</LinkComponent> にアクセスしてください。",
|
||||
"mainModelDesc": "メインモデル(SD1.5またはSDXLアーキテクチャ)",
|
||||
"tileControlNetModelDesc": "選択したメインモデルアーキテクチャのタイルコントロールネットモデル",
|
||||
"upscaleModelDesc": "アップスケール(img2img)モデル",
|
||||
"missingUpscaleInitialImage": "アップスケール用の初期画像がありません",
|
||||
"missingUpscaleModel": "アップスケールモデルがありません",
|
||||
"missingTileControlNetModel": "有効なタイル コントロールネットモデルがインストールされていません",
|
||||
"incompatibleBaseModel": "アップスケーリングにサポートされていないメインモデルアーキテクチャです",
|
||||
"incompatibleBaseModelDesc": "アップスケーリングはSD1.5およびSDXLアーキテクチャモデルでのみサポートされています。アップスケーリングを有効にするには、メインモデルを変更してください。"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
@@ -1891,7 +2429,34 @@
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
"publishedWorkflowInputs": "インプット",
|
||||
"workflowLocked": "ワークフローがロックされました",
|
||||
"workflowLockedPublished": "公開済みのワークフローは編集用にロックされています。\nワークフローを非公開にして編集したり、コピーを作成したりできます。",
|
||||
"workflowLockedDuringPublishing": "公開の構成中にワークフローがロックされます。",
|
||||
"selectOutputNode": "出力ノードを選択",
|
||||
"changeOutputNode": "出力ノードの変更",
|
||||
"unpublishableInputs": "これらの公開できない入力は省略されます",
|
||||
"noPublishableInputs": "公開可能な入力はありません",
|
||||
"noOutputNodeSelected": "出力ノードが選択されていません",
|
||||
"cannotPublish": "ワークフローを公開できません",
|
||||
"publishWarnings": "警告",
|
||||
"errorWorkflowHasUnsavedChanges": "ワークフローに保存されていない変更があります",
|
||||
"errorWorkflowHasUnpublishableNodes": "ワークフローにはバッチ、ジェネレータ、またはメタデータ抽出ノードがあります",
|
||||
"errorWorkflowHasInvalidGraph": "ワークフロー グラフが無効です (詳細については [呼び出し] ボタンにマウスを移動してください)",
|
||||
"errorWorkflowHasNoOutputNode": "出力ノードが選択されていません",
|
||||
"warningWorkflowHasNoPublishableInputFields": "公開可能な入力フィールドが選択されていません - 公開されたワークフローはデフォルト値のみで実行されます",
|
||||
"warningWorkflowHasUnpublishableInputFields": "ワークフローには公開できない入力がいくつかあります。これらは公開されたワークフローから省略されます",
|
||||
"publishFailed": "公開失敗",
|
||||
"publishFailedDesc": "ワークフローの公開中に問題が発生しました。もう一度お試しください。",
|
||||
"publishSuccess": "ワークフローを公開しています",
|
||||
"publishSuccessDesc": "<LinkComponent>プロジェクト ダッシュボード</LinkComponent> をチェックして進捗状況を確認してください。",
|
||||
"publishInProgress": "公開中",
|
||||
"publishedWorkflowIsLocked": "公開されたワークフローはロックされています",
|
||||
"publishingValidationRun": "公開検証実行",
|
||||
"publishingValidationRunInProgress": "公開検証の実行が進行中です。",
|
||||
"publishedWorkflowsLocked": "公開済みのワークフローはロックされており、編集または実行できません。このワークフローを編集または実行するには、ワークフローを非公開にするか、コピーを保存してください。",
|
||||
"selectingOutputNode": "出力ノードの選択",
|
||||
"selectingOutputNodeDesc": "ノードをクリックして、ワークフローの出力ノードとして選択します。"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
@@ -1954,15 +2519,23 @@
|
||||
"models": "モデル",
|
||||
"canvas": "キャンバス",
|
||||
"metadata": "メタデータ",
|
||||
"queue": "キュー"
|
||||
"queue": "キュー",
|
||||
"logNamespaces": "ログのネームスペース",
|
||||
"dnd": "ドラッグ&ドロップ",
|
||||
"config": "構成",
|
||||
"generation": "生成",
|
||||
"events": "イベント"
|
||||
},
|
||||
"logLevel": {
|
||||
"debug": "Debug",
|
||||
"info": "Info",
|
||||
"error": "Error",
|
||||
"fatal": "Fatal",
|
||||
"warn": "Warn"
|
||||
}
|
||||
"warn": "Warn",
|
||||
"logLevel": "ログレベル",
|
||||
"trace": "追跡"
|
||||
},
|
||||
"enableLogging": "ログを有効にする"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"promptsPreview": "プロンプトプレビュー",
|
||||
@@ -1978,5 +2551,34 @@
|
||||
"dynamicPrompts": "ダイナミックプロンプト",
|
||||
"loading": "ダイナミックプロンプトを生成...",
|
||||
"maxPrompts": "最大プロンプト"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "チームメートを招待",
|
||||
"professional": "プロフェッショナル",
|
||||
"professionalUpsell": "InvokeのProfessional Editionでご利用いただけます。詳細については、こちらをクリックするか、invoke.com/pricingをご覧ください。",
|
||||
"shareAccess": "共有アクセス"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "始めるには、Invoke の実行に必要なモデルをダウンロードまたはインポートしてください。次に、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
||||
"toGetStarted": "開始するには、ボックスにプロンプトを入力し、<StrongComponent>Invoke</StrongComponent> をクリックして最初の画像を生成します。プロンプトテンプレートを選択すると、結果が向上します。画像は <StrongComponent>Gallery</StrongComponent> に直接保存するか、<StrongComponent>Canvas</StrongComponent> で編集するかを選択できます。",
|
||||
"toGetStartedWorkflow": "開始するには、左側のフィールドに入力し、<StrongComponent>Invoke</StrongComponent> をクリックして画像を生成します。他のワークフローも試してみたい場合は、ワークフロータイトルの横にある<StrongComponent>フォルダアイコン</StrongComponent> をクリックすると、試せる他のテンプレートのリストが表示されます。",
|
||||
"gettingStartedSeries": "さらに詳しいガイダンスが必要ですか? Invoke Studio の可能性を最大限に引き出すためのヒントについては、<LinkComponent>入門シリーズ</LinkComponent>をご覧ください。",
|
||||
"lowVRAMMode": "最高のパフォーマンスを得るには、<LinkComponent>低 VRAM ガイド</LinkComponent>に従ってください。",
|
||||
"noModelsInstalled": "モデルがインストールされていないようです。<DownloadStarterModelsButton>スターターモデルバンドルをダウンロード</DownloadStarterModelsButton>するか、<ImportModelsButton>モデルをインポート</ImportModelsButton>してください。"
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Invokeの新機能",
|
||||
"items": [
|
||||
"インペインティング: マスクごとのノイズ レベルとノイズ除去の制限。",
|
||||
"キャンバス: SDXL のアスペクト比がスマートになり、スクロールによるズームが改善されました。"
|
||||
],
|
||||
"readReleaseNotes": "リリースノートを読む",
|
||||
"watchRecentReleaseVideos": "最近のリリースビデオを見る",
|
||||
"watchUiUpdatesOverview": "Watch UI アップデートの概要"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "サポートビデオ",
|
||||
"gettingStarted": "はじめる",
|
||||
"watch": "ウォッチ"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
"bulkDownloadFailed": "Tải Xuống Thất Bại",
|
||||
"bulkDownloadRequestFailed": "Có Vấn Đề Khi Đang Chuẩn Bị Tải Xuống",
|
||||
"download": "Tải Xuống",
|
||||
"dropOrUpload": "$t(gallery.drop) Hoặc Tải Lên",
|
||||
"dropOrUpload": "Kéo Thả Hoặc Tải Lên",
|
||||
"currentlyInUse": "Hình ảnh này hiện đang sử dụng các tính năng sau:",
|
||||
"deleteImagePermanent": "Ảnh đã xoá không thể phục hồi.",
|
||||
"exitSearch": "Thoát Tìm Kiếm Hình Ảnh",
|
||||
@@ -111,7 +111,7 @@
|
||||
"noImageSelected": "Không Có Ảnh Được Chọn",
|
||||
"noImagesInGallery": "Không Có Ảnh Để Hiển Thị",
|
||||
"assetsTab": "Tài liệu bạn đã tải lên để dùng cho dự án của mình.",
|
||||
"imagesTab": "hình bạn vừa được tạo và lưu trong Invoke.",
|
||||
"imagesTab": "Ảnh bạn vừa được tạo và lưu trong Invoke.",
|
||||
"loading": "Đang Tải",
|
||||
"oldestFirst": "Cũ Nhất Trước",
|
||||
"exitCompare": "Ngừng So Sánh",
|
||||
@@ -122,7 +122,8 @@
|
||||
"boardsSettings": "Thiết Lập Bảng",
|
||||
"imagesSettings": "Cài Đặt Ảnh Trong Thư Viện Ảnh",
|
||||
"assets": "Tài Nguyên",
|
||||
"images": "Hình Ảnh"
|
||||
"images": "Hình Ảnh",
|
||||
"useForPromptGeneration": "Dùng Để Tạo Sinh Lệnh"
|
||||
},
|
||||
"common": {
|
||||
"ipAdapter": "IP Adapter",
|
||||
@@ -254,9 +255,18 @@
|
||||
"options_withCount_other": "{{count}} thiết lập"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
|
||||
"compatibleEmbeddings": "Embedding Tương Thích",
|
||||
"noMatchingTriggers": "Không có trigger phù hợp"
|
||||
"noMatchingTriggers": "Không có trigger phù hợp",
|
||||
"generateFromImage": "Tạo sinh lệnh từ ảnh",
|
||||
"expandCurrentPrompt": "Mở Rộng Lệnh Hiện Tại",
|
||||
"uploadImageForPromptGeneration": "Tải Ảnh Để Tạo Sinh Lệnh",
|
||||
"expandingPrompt": "Đang mở rộng lệnh...",
|
||||
"resultTitle": "Mở Rộng Lệnh Hoàn Tất",
|
||||
"resultSubtitle": "Chọn phương thức mở rộng lệnh:",
|
||||
"replace": "Thay Thế",
|
||||
"insert": "Chèn",
|
||||
"discard": "Huỷ Bỏ"
|
||||
},
|
||||
"queue": {
|
||||
"resume": "Tiếp Tục",
|
||||
@@ -453,6 +463,16 @@
|
||||
"applyFilter": {
|
||||
"title": "Áp Dụng Bộ Lộc",
|
||||
"desc": "Áp dụng bộ lọc đang chờ sẵn cho layer được chọn."
|
||||
},
|
||||
"settings": {
|
||||
"behavior": "Hành Vi",
|
||||
"display": "Hiển Thị",
|
||||
"grid": "Lưới",
|
||||
"debug": "Gỡ Lỗi"
|
||||
},
|
||||
"toggleNonRasterLayers": {
|
||||
"title": "Bật/Tắt Layer Không Thuộc Dạng Raster",
|
||||
"desc": "Hiện hoặc ẩn tất cả layer không thuộc dạng raster (Layer Điều Khiển Được, Lớp Phủ Inpaint, Chỉ Dẫn Khu Vực)."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -695,7 +715,7 @@
|
||||
"cancel": "Huỷ",
|
||||
"huggingFace": "HuggingFace (HF)",
|
||||
"huggingFacePlaceholder": "chủ-sỡ-hữu/tên-model",
|
||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó",
|
||||
"includesNModels": "Thêm vào {{n}} model và dependency của nó.",
|
||||
"localOnly": "chỉ ở trên máy chủ",
|
||||
"manual": "Thủ Công",
|
||||
"convertToDiffusersHelpText4": "Đây là quá trình diễn ra chỉ một lần. Nó có thể tốn tầm 30-60 giây tuỳ theo thông số kỹ thuật của máy tính.",
|
||||
@@ -742,7 +762,7 @@
|
||||
"simpleModelPlaceholder": "Url hoặc đường đẫn đến tệp hoặc thư mục chứa diffusers trong máy chủ",
|
||||
"selectModel": "Chọn Model",
|
||||
"spandrelImageToImage": "Hình Ảnh Sang Hình Ảnh (Spandrel)",
|
||||
"starterBundles": "Quà Tân Thủ",
|
||||
"starterBundles": "Gói Khởi Đầu",
|
||||
"vae": "VAE",
|
||||
"urlOrLocalPath": "URL / Đường Dẫn",
|
||||
"triggerPhrases": "Từ Ngữ Kích Hoạt",
|
||||
@@ -794,7 +814,30 @@
|
||||
"manageModels": "Quản Lý Model",
|
||||
"hfTokenReset": "Làm Mới HF Token",
|
||||
"relatedModels": "Model Liên Quan",
|
||||
"showOnlyRelatedModels": "Liên Quan"
|
||||
"showOnlyRelatedModels": "Liên Quan",
|
||||
"installedModelsCount": "Đã tải {{installed}} trên {{total}} model.",
|
||||
"allNModelsInstalled": "Đã tải tất cả {{count}} model",
|
||||
"nToInstall": "Còn {{count}} để tải",
|
||||
"nAlreadyInstalled": "Có {{count}} đã tải",
|
||||
"bundleAlreadyInstalled": "Gói đã được cài sẵn",
|
||||
"bundleAlreadyInstalledDesc": "Tất cả model trong gói {{bundleName}} đã được cài sẵn.",
|
||||
"launchpadTab": "Launchpad",
|
||||
"launchpad": {
|
||||
"welcome": "Chào mừng đến Trình Quản Lý Model",
|
||||
"description": "Invoke yêu cầu tải model nhằm tối ưu hoá các tính năng trên nền tảng. Chọn tải các phương án thủ công hoặc khám phá các model khởi đầu thích hợp.",
|
||||
"manualInstall": "Tải Thủ Công",
|
||||
"urlDescription": "Tải model bằng URL hoặc đường dẫn trên máy. Phù hợp để cụ thể model muốn thêm vào.",
|
||||
"huggingFaceDescription": "Duyệt và cài đặt model từ các repository trên HuggingFace.",
|
||||
"scanFolderDescription": "Quét một thư mục trên máy để tự động tra và tải model.",
|
||||
"recommendedModels": "Model Khuyến Nghị",
|
||||
"exploreStarter": "Hoặc duyệt tất cả model khởi đầu có sẵn",
|
||||
"quickStart": "Gói Khởi Đầu Nhanh",
|
||||
"bundleDescription": "Các gói đều bao gồm những model cần thiết cho từng nhánh model và những model cơ sở đã chọn lọc để bắt đầu.",
|
||||
"browseAll": "Hoặc duyệt tất cả model có sẵn:",
|
||||
"stableDiffusion15": "Stable Diffusion 1.5",
|
||||
"sdxl": "SDXL",
|
||||
"fluxDev": "FLUX.1 dev"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -802,7 +845,7 @@
|
||||
"imageDetails": "Chi Tiết Ảnh",
|
||||
"createdBy": "Được Tạo Bởi",
|
||||
"parsingFailed": "Lỗi Cú Pháp",
|
||||
"canvasV2Metadata": "Canvas",
|
||||
"canvasV2Metadata": "Layer Canvas",
|
||||
"parameterSet": "Dữ liệu tham số {{parameter}}",
|
||||
"positivePrompt": "Lệnh Tích Cực",
|
||||
"recallParameter": "Gợi Nhớ {{label}}",
|
||||
@@ -1474,6 +1517,20 @@
|
||||
"Lát khối liền mạch bức ảnh theo trục ngang."
|
||||
],
|
||||
"heading": "Lát Khối Liền Mạch Trục X"
|
||||
},
|
||||
"tileSize": {
|
||||
"heading": "Kích Thước Khối",
|
||||
"paragraphs": [
|
||||
"Điều chỉnh kích thước của khối trong quá trình upscale. Khối càng lớn, bộ nhớ được sử dụng càng nhiều, nhưng có thể tạo sinh ảnh tốt hơn.",
|
||||
"Model SD1.5 mặt định là 768, trong khi SDXL mặc định là 1024. Giảm kích thước khối nếu các gặp vấn đề bộ nhớ."
|
||||
]
|
||||
},
|
||||
"tileOverlap": {
|
||||
"heading": "Chồng Chéo Khối",
|
||||
"paragraphs": [
|
||||
"Điều chỉnh sự chồng chéo giữa các khối liền kề trong quá trình upscale. Giá trị chồng chép lớn giúp giảm sự rõ nét của các chỗ nối nhau, nhưng ngốn nhiều bộ nhớ hơn.",
|
||||
"Giá trị mặc định (128) hoạt động tốt với đa số trường hợp, nhưng bạn có thể điều chỉnh cho phù hợp với nhu cầu cụ thể và hạn chế về bộ nhớ."
|
||||
]
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
@@ -1487,7 +1544,8 @@
|
||||
"defaultVAE": "VAE Mặc Định",
|
||||
"noMatchingModels": "Không có Model phù hợp",
|
||||
"noModelsAvailable": "Không có model",
|
||||
"selectModel": "Chọn Model"
|
||||
"selectModel": "Chọn Model",
|
||||
"noCompatibleLoRAs": "Không Có LoRAs Tương Thích"
|
||||
},
|
||||
"parameters": {
|
||||
"postProcessing": "Xử Lý Hậu Kỳ (Shift + U)",
|
||||
@@ -1538,7 +1596,10 @@
|
||||
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp."
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp.",
|
||||
"fluxKontextMultipleReferenceImages": "Chỉ có thể dùng 1 Ảnh Mẫu cùng lúc với Flux Kontext",
|
||||
"promptExpansionPending": "Trong quá trình mở rộng lệnh",
|
||||
"promptExpansionResultPending": "Hãy chấp thuận hoặc huỷ bỏ kết quả mở rộng lệnh của bạn"
|
||||
},
|
||||
"cfgScale": "Thang CFG",
|
||||
"useSeed": "Dùng Hạt Giống",
|
||||
@@ -1869,7 +1930,8 @@
|
||||
"canvasGroup": "Canvas",
|
||||
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
|
||||
"copyToClipboard": "Sao Chép Vào Clipboard",
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard",
|
||||
"newResizedControlLayer": "Layer Điều Khiển Được Đã Chỉnh Kích Thước Mới"
|
||||
},
|
||||
"stagingArea": {
|
||||
"saveToGallery": "Lưu Vào Thư Viện Ảnh",
|
||||
@@ -2050,7 +2112,11 @@
|
||||
},
|
||||
"isolatedLayerPreviewDesc": "Có hay không hiển thị riêng layer này khi thực hiện các thao tác như lọc hay biến đổi.",
|
||||
"isolatedStagingPreview": "Xem Trước Tổng Quan Phần Cô Lập",
|
||||
"isolatedPreview": "Xem Trước Phần Cô Lập"
|
||||
"isolatedPreview": "Xem Trước Phần Cô Lập",
|
||||
"saveAllImagesToGallery": {
|
||||
"label": "Chuyển Sản Phẩm Tạo Sinh Mới Vào Thư Viện Ảnh",
|
||||
"alert": "Đang chuyển sản phẩm tạo sinh mới vào Thư Viện Ảnh, bỏ qua Canvas"
|
||||
}
|
||||
},
|
||||
"tool": {
|
||||
"eraser": "Tẩy",
|
||||
@@ -2062,8 +2128,8 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ thư viện ảnh vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton> hoặc kéo ảnh từ thư viện ảnh vào Ảnh Mẫu để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
"asRasterLayer": "Như $t(controlLayers.rasterLayer)",
|
||||
@@ -2115,7 +2181,18 @@
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)",
|
||||
"referenceImageEmptyStateWithCanvasOptions": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ thư viện ảnh vào Ảnh Mẫu này, hoặc <PullBboxButton>kéo hộp giới hạn vào Ảnh Mẫu này</PullBboxButton> để bắt đầu.",
|
||||
"uploadOrDragAnImage": "Kéo ảnh từ thư viện ảnh hoặc <UploadButton>tải lên ảnh</UploadButton>.",
|
||||
"exportCanvasToPSD": "Xuất Canvas Thành File PSD",
|
||||
"ruleOfThirds": "Hiển Thị Quy Tắc Một Phần Ba",
|
||||
"showNonRasterLayers": "Hiển Thị Layer Không Thuộc Dạng Raster (Shift + H)",
|
||||
"hideNonRasterLayers": "Ẩn Layer Không Thuộc Dạng Raster (Shift + H)",
|
||||
"autoSwitch": {
|
||||
"off": "Tắt",
|
||||
"switchOnStart": "Khi Bắt Đầu",
|
||||
"switchOnFinish": "Khi Kết Thúc"
|
||||
}
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2161,7 +2238,8 @@
|
||||
"deleteImage": "Xoá Hình Ảnh",
|
||||
"exportPromptTemplates": "Xuất Mẫu Trình Bày Cho Lệnh Ra (CSV)",
|
||||
"templateDeleted": "Mẫu trình bày cho lệnh đã được xoá",
|
||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh"
|
||||
"unableToDeleteTemplate": "Không thể xoá mẫu trình bày cho lệnh",
|
||||
"togglePromptPreviews": "Bật/Tắt Xem Trước Lệnh"
|
||||
},
|
||||
"system": {
|
||||
"enableLogging": "Bật Chế Độ Ghi Log",
|
||||
@@ -2257,7 +2335,26 @@
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint.",
|
||||
"fluxKontextIncompatibleGenerationMode": "FLUX Kontext không hỗ trợ tạo sinh từ hình ảnh từ canvas. Thử sử dụng Ảnh Mẫu và tắt các Layer Dạng Raster.",
|
||||
"noRasterLayers": "Không Tìm Thấy Layer Dạng Raster",
|
||||
"noRasterLayersDesc": "Tạo ít nhất một layer dạng raster để xuất file PSD",
|
||||
"noActiveRasterLayers": "Không Có Layer Dạng Raster Hoạt Động",
|
||||
"noActiveRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
||||
"noVisibleRasterLayers": "Không Có Layer Dạng Raster Hiển Thị",
|
||||
"noVisibleRasterLayersDesc": "Khởi động ít nhất một layer dạng raster để xuất file PSD",
|
||||
"invalidCanvasDimensions": "Kích Thước Canvas Không Phù Hợp",
|
||||
"canvasTooLarge": "Canvas Quá Lớn",
|
||||
"canvasTooLargeDesc": "Kích thước canvas vượt mức tối đa cho phép để xuất file PSD. Giảm cả chiều dài và chiều rộng chủa canvas và thử lại.",
|
||||
"failedToProcessLayers": "Thất Bại Khi Xử Lý Layer",
|
||||
"psdExportSuccess": "Xuất File PSD Hoàn Tất",
|
||||
"psdExportSuccessDesc": "Thành công xuất {{count}} layer sang file PSD",
|
||||
"problemExportingPSD": "Có Vấn Đề Khi Xuất File PSD",
|
||||
"canvasManagerNotAvailable": "Trình Quản Lý Canvas Không Có Sẵn",
|
||||
"noValidLayerAdapters": "Không có Layer Adaper Phù Hợp",
|
||||
"promptGenerationStarted": "Trình tạo sinh lệnh khởi động",
|
||||
"uploadAndPromptGenerationFailed": "Thất bại khi tải lên ảnh để tạo sinh lệnh",
|
||||
"promptExpansionFailed": "Có vấn đề xảy ra. Hãy thử mở rộng lệnh lại."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2271,6 +2368,55 @@
|
||||
"queue": "Queue (Hàng Đợi)",
|
||||
"workflows": "Workflow (Luồng Làm Việc)",
|
||||
"workflowsTab": "$t(common.tab) $t(ui.tabs.workflows)"
|
||||
},
|
||||
"launchpad": {
|
||||
"workflowsTitle": "Đi sâu hơn với Workflow.",
|
||||
"upscalingTitle": "Upscale và thêm chi tiết.",
|
||||
"canvasTitle": "Biên tập và làm đẹp trên Canvas.",
|
||||
"generateTitle": "Tạo sinh ảnh từ lệnh chữ.",
|
||||
"modelGuideText": "Muốn biết lệnh nào tốt nhất cho từng model chứ?",
|
||||
"modelGuideLink": "Xem thêm Hướng Dẫn Model.",
|
||||
"workflows": {
|
||||
"description": "Workflow là các template tái sử dụng được sẽ tự động hoá các tác vụ tạo sinh ảnh, cho phép bạn nhanh chóng thực hiện cách thao tác phức tạp và nhận được kết quả nhất quán.",
|
||||
"learnMoreLink": "Học thêm cách tạo ra workflow",
|
||||
"browseTemplates": {
|
||||
"title": "Duyệt Template Workflow",
|
||||
"description": "Chọn từ các workflow có sẵn cho những tác vụ cơ bản"
|
||||
},
|
||||
"createNew": {
|
||||
"title": "Tạo workflow mới",
|
||||
"description": "Tạo workflow mới từ ban đầu"
|
||||
},
|
||||
"loadFromFile": {
|
||||
"title": "Tải workflow từ tệp",
|
||||
"description": "Tải lên workflow để bắt đầu với những thiết lập sẵn có"
|
||||
}
|
||||
},
|
||||
"upscaling": {
|
||||
"uploadImage": {
|
||||
"title": "Tải Ảnh Để Upscale",
|
||||
"description": "Nhấp hoặc kéo ảnh để upscale (JPG, PNG, WebP lên đến 100MB)"
|
||||
},
|
||||
"replaceImage": {
|
||||
"title": "Thay Thế Ảnh Hiện Tại",
|
||||
"description": "Nhấp hoặc kéo ảnh mới để thay thế cái hiện tại"
|
||||
},
|
||||
"imageReady": {
|
||||
"title": "Ảnh Đã Sẵn Sàng",
|
||||
"description": "Bấm 'Kích Hoạt' để chuẩn bị upscale"
|
||||
},
|
||||
"readyToUpscale": {
|
||||
"title": "Chuẩn bị upscale!",
|
||||
"description": "Điều chỉnh thiết lập bên dưới, sau đó bấm vào nút 'Khởi Động' để chuẩn bị upscale ảnh."
|
||||
},
|
||||
"upscaleModel": "Model Upscale",
|
||||
"model": "Model",
|
||||
"helpText": {
|
||||
"promptAdvice": "Khi upscale, dùng lệnh để mô tả phương thức và phong cách. Tránh mô tả các chi tiết cụ thể trong ảnh.",
|
||||
"styleAdvice": "Upscale thích hợp nhất cho phong cách chung của ảnh."
|
||||
},
|
||||
"scale": "Kích Thước"
|
||||
}
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -2423,7 +2569,10 @@
|
||||
"postProcessingMissingModelWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model xử lý hậu kỳ (ảnh sang ảnh).",
|
||||
"missingModelsWarning": "Đến <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải model cần thiết:",
|
||||
"incompatibleBaseModel": "Phiên bản model chính không được hỗ trợ để upscale",
|
||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale."
|
||||
"incompatibleBaseModelDesc": "Upscale chỉ hỗ trợ cho model phiên bản SD1.5 và SDXL. Đổi model chính để bật lại tính năng upscale.",
|
||||
"tileControl": "Điều Chỉnh Khối",
|
||||
"tileSize": "Kích Thước Khối",
|
||||
"tileOverlap": "Chồng Chéo Khối"
|
||||
},
|
||||
"newUserExperience": {
|
||||
"toGetStartedLocal": "Để bắt đầu, hãy chắc chắn đã tải xuống hoặc thêm vào model cần để chạy Invoke. Sau đó, nhập lệnh vào hộp và nhấp chuột vào <StrongComponent>Kích Hoạt</StrongComponent> để tạo ra bức ảnh đầu tiên. Chọn một mẫu trình bày cho lệnh để cải thiện kết quả. Bạn có thể chọn để lưu ảnh trực tiếp vào <StrongComponent>Thư Viện Ảnh</StrongComponent> hoặc chỉnh sửa chúng ở <StrongComponent>Canvas</StrongComponent>.",
|
||||
@@ -2439,8 +2588,9 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
"Tạo sinh ảnh nhanh hơn với Launchpad và thẻ Tạo Sinh đã cơ bản hoá.",
|
||||
"Biên tập với lệnh bằng Flux Kontext Dev.",
|
||||
"Xuất ra file PSD, ẩn số lượng lớn lớp phủ, sắp xếp model & ảnh — tất cả cho một giao diện đã thiết kế lại để chuyên điều khiển."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
@@ -2452,64 +2602,18 @@
|
||||
"supportVideos": {
|
||||
"supportVideos": "Video Hỗ Trợ",
|
||||
"gettingStarted": "Bắt Đầu Làm Quen",
|
||||
"studioSessionsDesc1": "Xem thử <StudioSessionsPlaylistLink /> để hiểu rõ Invoke hơn.",
|
||||
"studioSessionsDesc2": "Đến <DiscordLink /> để tham gia vào phiên trực tiếp và hỏi câu hỏi. Các phiên được tải lên danh sách phát vào các tuần.",
|
||||
"watch": "Xem",
|
||||
"studioSessionsDesc": "Tham gia <DiscordLink /> để xem các buổi phát trực tiếp và đặt câu hỏi. Các phiên được đăng lên trên playlist các tuần tiếp theo.",
|
||||
"videos": {
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "Làm Sao Để Tôi Dùng Trình Biến Đổi Hình Ảnh Sang Hình Ảnh?",
|
||||
"description": "Hướng dẫn cách thực hiện biến đổi ảnh sang ảnh trong Invoke."
|
||||
"gettingStarted": {
|
||||
"title": "Bắt Đầu Với Invoke",
|
||||
"description": "Hoàn thành các video bao hàm mọi thứ bạn cần biết để bắt đầu với Invoke, từ tạo bức ảnh đầu tiên đến các kỹ thuật phức tạp khác."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"description": "Giới thiệu về ảnh mẫu và IP adapter toàn vùng.",
|
||||
"title": "Làm Sao Để Tôi Dùng IP Adapter Toàn Vùng Và Ảnh Mẫu?"
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Học cách sáng tạo ảnh bằng trình điều khiển canvas của Invoke.",
|
||||
"title": "Sáng Tạo Trong Trình Kiểm Soát Canvas Của Invoke"
|
||||
},
|
||||
"upscaling": {
|
||||
"description": "Cách upscale ảnh bằng bộ công cụ của Invoke để nâng cấp độ phân giải.",
|
||||
"title": "Upscale (Nâng Cấp Chất Lượng Hình Ảnh)"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "Làm Sao Để Tôi Tạo Sinh Và Lưu Vào Thư Viện Ảnh?",
|
||||
"description": "Các bước để tạo sinh và lưu ảnh vào thư viện ảnh."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"description": "Hướng dẫn chỉnh sửa ảnh trực tiếp trên canvas.",
|
||||
"title": "Làm Sao Để Tôi Chỉnh Sửa Trên Canvas?"
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Làm Sao Để Tôi Dùng ControlNet và Layer Điều Khiển Được?",
|
||||
"description": "Học cách áp dụng layer điều khiển được và controlnet vào ảnh của bạn."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Làm Sao Để Tôi Dùng Lớp Phủ Inpaint?",
|
||||
"description": "Cách áp dụng lớp phủ inpaint vào chỉnh sửa và thay đổi ảnh."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Làm Sao Để Tôi Outpaint?",
|
||||
"description": "Hướng dẫn outpaint bên ngoài viền ảnh gốc."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Giới thiệu về cách tạo ảnh từ ban đầu bằng công cụ Invoke.",
|
||||
"title": "Tạo Hình Ảnh Đầu Tiên Của Bạn"
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"description": "Học cách chỉ dẫn ảnh được tạo ra bằng layer điều khiển được và ảnh mẫu.",
|
||||
"title": "Dùng Layer Điều Khiển Được và Chỉ Dẫn Mẫu"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Hiểu Rõ Trình Hình Ảnh Sang Hình Ảnh Và Trình Khử Nhiễu",
|
||||
"description": "Tổng quan về trình biến đổi ảnh sang ảnh và trình khử nhiễu trong Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Khám Phá Model AI Và Khái Niệm Về Adapter",
|
||||
"description": "Đào sâu vào model AI và cách dùng những adapter để điều khiển một cách sáng tạo."
|
||||
"studioSessions": {
|
||||
"title": "Phiên Studio",
|
||||
"description": "Đào sâu vào các phiên họp để khám phá những tính năng nâng cao của Invoke, sáng tạo workflow, và thảo luận cộng đồng."
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Điều Khiển Canvas",
|
||||
"watch": "Xem"
|
||||
}
|
||||
},
|
||||
"modelCache": {
|
||||
"clearSucceeded": "Cache Model Đã Được Dọn",
|
||||
|
||||
@@ -2,8 +2,7 @@ import { Box } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { GlobalHookIsolator } from 'app/components/GlobalHookIsolator';
|
||||
import { GlobalModalIsolator } from 'app/components/GlobalModalIsolator';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
|
||||
import { $didStudioInit, type StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import { useClearStorage } from 'common/hooks/useClearStorage';
|
||||
@@ -12,6 +11,7 @@ import { memo, useCallback } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
|
||||
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
|
||||
import ThemeLocaleProvider from './ThemeLocaleProvider';
|
||||
const DEFAULT_CONFIG = {};
|
||||
|
||||
interface Props {
|
||||
@@ -30,14 +30,16 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
}, [clearStorage]);
|
||||
|
||||
return (
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||
<AppContent />
|
||||
{!didStudioInit && <Loading />}
|
||||
</Box>
|
||||
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
|
||||
<GlobalModalIsolator />
|
||||
</ErrorBoundary>
|
||||
<ThemeLocaleProvider>
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||
<AppContent />
|
||||
{!didStudioInit && <Loading />}
|
||||
</Box>
|
||||
<GlobalHookIsolator config={config} studioInitAction={studioInitAction} />
|
||||
<GlobalModalIsolator />
|
||||
</ErrorBoundary>
|
||||
</ThemeLocaleProvider>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||
import { setupListeners } from '@reduxjs/toolkit/query';
|
||||
import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useStudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { useSyncLangDirection } from 'app/hooks/useSyncLangDirection';
|
||||
import { useSyncQueueStatus } from 'app/hooks/useSyncQueueStatus';
|
||||
import { useLogger } from 'app/logging/useLogger';
|
||||
import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
||||
@@ -8,19 +10,26 @@ import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/ap
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import { useFocusRegionWatcher } from 'common/hooks/focus';
|
||||
import { useCloseChakraTooltipsOnDragFix } from 'common/hooks/useCloseChakraTooltipsOnDragFix';
|
||||
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
|
||||
import { useDndMonitor } from 'features/dnd/useDndMonitor';
|
||||
import { useDynamicPromptsWatcher } from 'features/dynamicPrompts/hooks/useDynamicPromptsWatcher';
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { useWorkflowBuilderWatcher } from 'features/nodes/components/sidePanel/workflow/IsolatedWorkflowBuilderWatcher';
|
||||
import { useSyncExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
|
||||
import { useSyncNodeErrors } from 'features/nodes/store/util/fieldValidators';
|
||||
import { useReadinessWatcher } from 'features/queue/store/readiness';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { useNavigationApi } from 'features/ui/layouts/use-navigation-api';
|
||||
import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useEffect } from 'react';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
import { useGetQueueCountsByDestinationQuery } from 'services/api/endpoints/queue';
|
||||
import { useSocketIO } from 'services/events/useSocketIO';
|
||||
|
||||
const queueCountArg = { destination: 'canvas' };
|
||||
|
||||
/**
|
||||
* GlobalHookIsolator is a logical component that runs global hooks in an isolated component, so that they do not
|
||||
* cause needless re-renders of any other components.
|
||||
@@ -38,22 +47,34 @@ export const GlobalHookIsolator = memo(
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
useSyncLoggingConfig();
|
||||
useCloseChakraTooltipsOnDragFix();
|
||||
useNavigationApi();
|
||||
useDndMonitor();
|
||||
useSyncNodeErrors();
|
||||
useSyncLangDirection();
|
||||
|
||||
// Persistent subscription to the queue counts query - canvas relies on this to know if there are pending
|
||||
// and/or in progress canvas sessions.
|
||||
useGetQueueCountsByDestinationQuery(queueCountArg);
|
||||
useSyncExecutionState();
|
||||
|
||||
useEffect(() => {
|
||||
i18n.changeLanguage(language);
|
||||
}, [language]);
|
||||
|
||||
useEffect(() => {
|
||||
if (size(config)) {
|
||||
logger.info({ config }, 'Received config');
|
||||
dispatch(configChanged(config));
|
||||
}
|
||||
logger.info({ config }, 'Received config');
|
||||
dispatch(configChanged(config));
|
||||
}, [dispatch, config, logger]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(appStarted());
|
||||
}, [dispatch]);
|
||||
|
||||
useEffect(() => {
|
||||
return setupListeners(dispatch);
|
||||
}, [dispatch]);
|
||||
|
||||
useStudioInitAction(studioInitAction);
|
||||
useStarterModelsToast();
|
||||
useSyncQueueStatus();
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { selectIsStaging } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { useImageActions } from 'features/gallery/hooks/useImageActions';
|
||||
import { useLoadWorkflow } from 'features/gallery/hooks/useLoadWorkflow';
|
||||
import { useRecallAll } from 'features/gallery/hooks/useRecallAll';
|
||||
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
|
||||
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
|
||||
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
|
||||
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { memo } from 'react';
|
||||
import { useImageDTO } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const GlobalImageHotkeys = memo(() => {
|
||||
useAssertSingleton('GlobalImageHotkeys');
|
||||
const imageDTO = useAppSelector(selectLastSelectedImage);
|
||||
const imageName = useAppSelector(selectLastSelectedImage);
|
||||
const imageDTO = useImageDTO(imageName);
|
||||
|
||||
if (!imageDTO) {
|
||||
return null;
|
||||
@@ -25,59 +30,64 @@ GlobalImageHotkeys.displayName = 'GlobalImageHotkeys';
|
||||
const GlobalImageHotkeysInternal = memo(({ imageDTO }: { imageDTO: ImageDTO }) => {
|
||||
const isGalleryFocused = useIsRegionFocused('gallery');
|
||||
const isViewerFocused = useIsRegionFocused('viewer');
|
||||
const imageActions = useImageActions(imageDTO);
|
||||
const isStaging = useAppSelector(selectIsStaging);
|
||||
const isUpscalingEnabled = useFeatureStatus('upscaling');
|
||||
|
||||
const isFocusOK = isGalleryFocused || isViewerFocused;
|
||||
|
||||
const recallAll = useRecallAll(imageDTO);
|
||||
const recallRemix = useRecallRemix(imageDTO);
|
||||
const recallPrompts = useRecallPrompts(imageDTO);
|
||||
const recallSeed = useRecallSeed(imageDTO);
|
||||
const recallDimensions = useRecallDimensions(imageDTO);
|
||||
const loadWorkflow = useLoadWorkflow(imageDTO);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'loadWorkflow',
|
||||
category: 'viewer',
|
||||
callback: imageActions.loadWorkflow,
|
||||
options: { enabled: isGalleryFocused || isViewerFocused },
|
||||
dependencies: [imageActions.loadWorkflow, isGalleryFocused, isViewerFocused],
|
||||
callback: loadWorkflow.load,
|
||||
options: { enabled: loadWorkflow.isEnabled && isFocusOK },
|
||||
dependencies: [loadWorkflow, isFocusOK],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'recallAll',
|
||||
category: 'viewer',
|
||||
callback: imageActions.recallAll,
|
||||
options: { enabled: !isStaging && (isGalleryFocused || isViewerFocused) },
|
||||
dependencies: [imageActions.recallAll, isStaging, isGalleryFocused, isViewerFocused],
|
||||
callback: recallAll.recall,
|
||||
options: { enabled: recallAll.isEnabled && isFocusOK },
|
||||
dependencies: [recallAll, isFocusOK],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'recallSeed',
|
||||
category: 'viewer',
|
||||
callback: imageActions.recallSeed,
|
||||
options: { enabled: isGalleryFocused || isViewerFocused },
|
||||
dependencies: [imageActions.recallSeed, isGalleryFocused, isViewerFocused],
|
||||
callback: recallSeed.recall,
|
||||
options: { enabled: recallSeed.isEnabled && isFocusOK },
|
||||
dependencies: [recallSeed, isFocusOK],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'recallPrompts',
|
||||
category: 'viewer',
|
||||
callback: imageActions.recallPrompts,
|
||||
options: { enabled: isGalleryFocused || isViewerFocused },
|
||||
dependencies: [imageActions.recallPrompts, isGalleryFocused, isViewerFocused],
|
||||
callback: recallPrompts.recall,
|
||||
options: { enabled: recallPrompts.isEnabled && isFocusOK },
|
||||
dependencies: [recallPrompts, isFocusOK],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'remix',
|
||||
category: 'viewer',
|
||||
callback: imageActions.remix,
|
||||
options: { enabled: isGalleryFocused || isViewerFocused },
|
||||
dependencies: [imageActions.remix, isGalleryFocused, isViewerFocused],
|
||||
callback: recallRemix.recall,
|
||||
options: { enabled: recallRemix.isEnabled && isFocusOK },
|
||||
dependencies: [recallRemix, isFocusOK],
|
||||
});
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'useSize',
|
||||
category: 'viewer',
|
||||
callback: imageActions.recallSize,
|
||||
options: { enabled: !isStaging && (isGalleryFocused || isViewerFocused) },
|
||||
dependencies: [imageActions.recallSize, isStaging, isGalleryFocused, isViewerFocused],
|
||||
});
|
||||
useRegisteredHotkeys({
|
||||
id: 'runPostprocessing',
|
||||
category: 'viewer',
|
||||
callback: imageActions.upscale,
|
||||
options: { enabled: isUpscalingEnabled && isViewerFocused },
|
||||
dependencies: [isUpscalingEnabled, imageDTO, isViewerFocused],
|
||||
callback: recallDimensions.recall,
|
||||
options: { enabled: recallDimensions.isEnabled && isFocusOK },
|
||||
dependencies: [recallDimensions, isFocusOK],
|
||||
});
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
import { GlobalImageHotkeys } from 'app/components/GlobalImageHotkeys';
|
||||
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
||||
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
|
||||
import {
|
||||
NewCanvasSessionDialog,
|
||||
NewGallerySessionDialog,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { DeleteImageModal } from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
|
||||
@@ -15,6 +11,7 @@ import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/workflow
|
||||
import { WorkflowLibraryModal } from 'features/nodes/components/sidePanel/workflow/WorkflowLibrary/WorkflowLibraryModal';
|
||||
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
|
||||
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { DeleteAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/DeleteAllExceptCurrentQueueItemConfirmationAlertDialog';
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
@@ -39,6 +36,7 @@ export const GlobalModalIsolator = memo(() => {
|
||||
<StylePresetModal />
|
||||
<WorkflowLibraryModal />
|
||||
<CancelAllExceptCurrentQueueItemConfirmationAlertDialog />
|
||||
<DeleteAllExceptCurrentQueueItemConfirmationAlertDialog />
|
||||
<ClearQueueConfirmationsAlertDialog />
|
||||
<NewWorkflowConfirmationAlertDialog />
|
||||
<LoadWorkflowConfirmationAlertDialog />
|
||||
@@ -48,8 +46,6 @@ export const GlobalModalIsolator = memo(() => {
|
||||
<RefreshAfterResetModal />
|
||||
<DeleteBoardModal />
|
||||
<GlobalImageHotkeys />
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
|
||||
@@ -42,7 +42,6 @@ import { $socketOptions } from 'services/events/stores';
|
||||
import type { ManagerOptions, SocketOptions } from 'socket.io-client';
|
||||
|
||||
const App = lazy(() => import('./App'));
|
||||
const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
|
||||
|
||||
interface Props extends PropsWithChildren {
|
||||
apiUrl?: string;
|
||||
@@ -318,7 +317,7 @@ const InvokeAIUI = ({
|
||||
if (import.meta.env.MODE === 'development') {
|
||||
window.$store = $store;
|
||||
}
|
||||
() => {
|
||||
return () => {
|
||||
$store.set(undefined);
|
||||
if (import.meta.env.MODE === 'development') {
|
||||
window.$store = undefined;
|
||||
@@ -330,9 +329,7 @@ const InvokeAIUI = ({
|
||||
<React.StrictMode>
|
||||
<Provider store={store}>
|
||||
<React.Suspense fallback={<Loading />}>
|
||||
<ThemeLocaleProvider>
|
||||
<App config={config} studioInitAction={studioInitAction} />
|
||||
</ThemeLocaleProvider>
|
||||
<App config={config} studioInitAction={studioInitAction} />
|
||||
</React.Suspense>
|
||||
</Provider>
|
||||
</React.StrictMode>
|
||||
|
||||
@@ -1,44 +1,41 @@
|
||||
import '@fontsource-variable/inter';
|
||||
import 'overlayscrollbars/overlayscrollbars.css';
|
||||
import '@xyflow/react/dist/base.css';
|
||||
import 'common/components/OverlayScrollbars/overlayscrollbars.css';
|
||||
|
||||
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||
import { ChakraProvider, DarkMode, extendTheme, theme as baseTheme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $direction } from 'app/hooks/useSyncLangDirection';
|
||||
import type { ReactNode } from 'react';
|
||||
import { memo, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
type ThemeLocaleProviderProps = {
|
||||
children: ReactNode;
|
||||
};
|
||||
|
||||
const buildTheme = (direction: 'ltr' | 'rtl') => {
|
||||
return extendTheme({
|
||||
...baseTheme,
|
||||
direction,
|
||||
shadows: {
|
||||
...baseTheme.shadows,
|
||||
selected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverSelected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverUnselected:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
selectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
hoverSelectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
|
||||
const { i18n } = useTranslation();
|
||||
|
||||
const direction = i18n.dir();
|
||||
|
||||
const theme = useMemo(() => {
|
||||
return extendTheme({
|
||||
..._theme,
|
||||
direction,
|
||||
shadows: {
|
||||
..._theme.shadows,
|
||||
selected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverSelected:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
hoverUnselected:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
selectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
hoverSelectedForCompare:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
});
|
||||
}, [direction]);
|
||||
|
||||
useEffect(() => {
|
||||
document.body.dir = direction;
|
||||
}, [direction]);
|
||||
const direction = useStore($direction);
|
||||
const theme = useMemo(() => buildTheme(direction), [direction]);
|
||||
|
||||
return (
|
||||
<ChakraProvider theme={theme} toastOptions={TOAST_OPTIONS}>
|
||||
|
||||
@@ -3,13 +3,12 @@ import { useAppStore } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { settingsSendToCanvasChanged } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { MetadataUtils } from 'features/metadata/parsing';
|
||||
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { $isWorkflowLibraryModalOpen } from 'features/nodes/store/workflowLibraryModal';
|
||||
import {
|
||||
@@ -20,7 +19,8 @@ import {
|
||||
} from 'features/nodes/store/workflowLibrarySlice';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { activeTabCanvasRightPanelChanged, setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
import { LAUNCHPAD_PANEL_ID, WORKSPACE_PANEL_ID } from 'features/ui/layouts/shared';
|
||||
import { useLoadWorkflowWithDialog } from 'features/workflowLibrary/components/LoadWorkflowConfirmationAlertDialog';
|
||||
import { atom } from 'nanostores';
|
||||
import { useCallback, useEffect } from 'react';
|
||||
@@ -91,12 +91,10 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const overrides: Partial<CanvasRasterLayerState> = {
|
||||
objects: [imageObject],
|
||||
};
|
||||
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
|
||||
store.dispatch(canvasReset());
|
||||
store.dispatch(rasterLayerAdded({ overrides, isSelected: true }));
|
||||
store.dispatch(settingsSendToCanvasChanged(true));
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
store.dispatch(sentImageToCanvas());
|
||||
$imageViewer.set(false);
|
||||
toast({
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'info',
|
||||
@@ -118,25 +116,25 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
return;
|
||||
}
|
||||
const metadata = getImageMetadataResult.value;
|
||||
store.dispatch(canvasReset());
|
||||
// This shows a toast
|
||||
await parseAndRecallAllMetadata(metadata, true);
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
await MetadataUtils.recallAll(metadata, store);
|
||||
},
|
||||
[store, t]
|
||||
);
|
||||
|
||||
const handleLoadWorkflow = useCallback(
|
||||
async (workflowId: string) => {
|
||||
(workflowId: string) => {
|
||||
// This shows a toast
|
||||
await loadWorkflowWithDialog({
|
||||
loadWorkflowWithDialog({
|
||||
type: 'library',
|
||||
data: workflowId,
|
||||
onSuccess: () => {
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
navigationApi.switchToTab('workflows');
|
||||
},
|
||||
});
|
||||
},
|
||||
[loadWorkflowWithDialog, store]
|
||||
[loadWorkflowWithDialog]
|
||||
);
|
||||
|
||||
const handleSelectStylePreset = useCallback(
|
||||
@@ -150,7 +148,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
return;
|
||||
}
|
||||
store.dispatch(activeStylePresetIdChanged(stylePresetId));
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
navigationApi.switchToTab('canvas');
|
||||
toast({
|
||||
title: t('toast.stylePresetLoaded'),
|
||||
status: 'info',
|
||||
@@ -160,37 +158,33 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
);
|
||||
|
||||
const handleGoToDestination = useCallback(
|
||||
(destination: StudioDestinationAction['data']['destination']) => {
|
||||
async (destination: StudioDestinationAction['data']['destination']) => {
|
||||
switch (destination) {
|
||||
case 'generation':
|
||||
// Go to the canvas tab, open the image viewer, and enable send-to-gallery mode
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
store.dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
store.dispatch(settingsSendToCanvasChanged(false));
|
||||
$imageViewer.set(true);
|
||||
// Go to the generate tab, open the launchpad
|
||||
await navigationApi.focusPanel('generate', LAUNCHPAD_PANEL_ID);
|
||||
store.dispatch(paramsReset());
|
||||
break;
|
||||
case 'canvas':
|
||||
// Go to the canvas tab, close the image viewer, and disable send-to-gallery mode
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
store.dispatch(settingsSendToCanvasChanged(true));
|
||||
$imageViewer.set(false);
|
||||
// Go to the canvas tab, open the launchpad
|
||||
await navigationApi.focusPanel('canvas', WORKSPACE_PANEL_ID);
|
||||
break;
|
||||
case 'workflows':
|
||||
// Go to the workflows tab
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
navigationApi.switchToTab('workflows');
|
||||
break;
|
||||
case 'upscaling':
|
||||
// Go to the upscaling tab
|
||||
store.dispatch(setActiveTab('upscaling'));
|
||||
navigationApi.switchToTab('upscaling');
|
||||
break;
|
||||
case 'viewAllWorkflows':
|
||||
// Go to the workflows tab and open the workflow library modal
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
navigationApi.switchToTab('workflows');
|
||||
$isWorkflowLibraryModalOpen.set(true);
|
||||
break;
|
||||
case 'viewAllWorkflowsRecommended':
|
||||
// Go to the workflows tab and open the workflow library modal with the recommended workflows view
|
||||
store.dispatch(setActiveTab('workflows'));
|
||||
navigationApi.switchToTab('workflows');
|
||||
$isWorkflowLibraryModalOpen.set(true);
|
||||
store.dispatch(workflowLibraryViewChanged('defaults'));
|
||||
store.dispatch(workflowLibraryTagsReset());
|
||||
@@ -202,7 +196,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
break;
|
||||
case 'viewAllStylePresets':
|
||||
// Go to the canvas tab and open the style presets menu
|
||||
store.dispatch(setActiveTab('canvas'));
|
||||
navigationApi.switchToTab('canvas');
|
||||
$isStylePresetsMenuOpen.set(true);
|
||||
break;
|
||||
}
|
||||
|
||||
36
invokeai/frontend/web/src/app/hooks/useSyncLangDirection.ts
Normal file
36
invokeai/frontend/web/src/app/hooks/useSyncLangDirection.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { atom } from 'nanostores';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
/**
|
||||
* Global atom storing the language direction, to be consumed by the Chakra theme.
|
||||
*
|
||||
* Why do we need this? We have a kind of catch-22:
|
||||
* - The Chakra theme needs to know the language direction to apply the correct styles.
|
||||
* - The language direction is determined by i18n and the language selection.
|
||||
* - We want our error boundary to be themed.
|
||||
* - It's possible that i18n can throw if the language selection is invalid or not supported.
|
||||
*
|
||||
* Previously, we had the logic in this file in the theme provider, which wrapped the error boundary. The error
|
||||
* was properly themed. But then, if i18n threw in the theme provider, the error boundary does not catch the
|
||||
* error. The app would crash to a white screen.
|
||||
*
|
||||
* We tried swapping the component hierarchy so that the error boundary wraps the theme provider, but then the
|
||||
* error boundary isn't themed!
|
||||
*
|
||||
* The solution is to move this i18n direction logic out of the theme provider and into a hook that we can use
|
||||
* within the error boundary. The error boundary will be themed, _and_ catch any i18n errors.
|
||||
*/
|
||||
export const $direction = atom<'ltr' | 'rtl'>('ltr');
|
||||
|
||||
export const useSyncLangDirection = () => {
|
||||
useAssertSingleton('useSyncLangDirection');
|
||||
const { i18n, t } = useTranslation();
|
||||
|
||||
useEffect(() => {
|
||||
const direction = i18n.dir();
|
||||
$direction.set(direction);
|
||||
document.body.dir = direction;
|
||||
}, [i18n, t]);
|
||||
};
|
||||
@@ -1,13 +1,13 @@
|
||||
import { objectEquals } from '@observ33r/object-equals';
|
||||
import { createDraftSafeSelectorCreator, createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
|
||||
import { isEqual } from 'lodash-es';
|
||||
|
||||
/**
|
||||
* A memoized selector creator that uses LRU cache and lodash's isEqual for equality check.
|
||||
* A memoized selector creator that uses LRU cache and @observ33r/object-equals's objectEquals for equality check.
|
||||
*/
|
||||
export const createMemoizedSelector = createSelectorCreator({
|
||||
memoize: lruMemoize,
|
||||
memoizeOptions: {
|
||||
resultEqualityCheck: isEqual,
|
||||
resultEqualityCheck: objectEquals,
|
||||
},
|
||||
argsMemoize: lruMemoize,
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user