Compare commits
1318 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dccf291f64 | ||
|
|
d3a94e5853 | ||
|
|
0166d7ba2b | ||
|
|
b700809e14 | ||
|
|
501cb4c1e2 | ||
|
|
56399a650a | ||
|
|
e4035a51af | ||
|
|
cf83ddea15 | ||
|
|
a79d5901c7 | ||
|
|
a98c37b7a3 | ||
|
|
252adb9e70 | ||
|
|
40a0b2c366 | ||
|
|
cfc4caf231 | ||
|
|
e16598c48a | ||
|
|
6506ce3e68 | ||
|
|
3afa73cd33 | ||
|
|
81ea742aea | ||
|
|
15d28bfdbf | ||
|
|
0e5eac7c21 | ||
|
|
0a1c5bea05 | ||
|
|
9c290f4575 | ||
|
|
500f3046a9 | ||
|
|
53f2369d18 | ||
|
|
357912285a | ||
|
|
0f2b8dd7df | ||
|
|
ba2ce72584 | ||
|
|
c54c1f603b | ||
|
|
9caa2a2043 | ||
|
|
86185f2fe3 | ||
|
|
dfbcb773da | ||
|
|
04c0a83bff | ||
|
|
7a30162583 | ||
|
|
2c65ffa305 | ||
|
|
331a6227cc | ||
|
|
eb90ea41fd | ||
|
|
f134804fe7 | ||
|
|
c59c3ae499 | ||
|
|
42ee95ee97 | ||
|
|
b008fd4a5f | ||
|
|
6b850d506a | ||
|
|
3f3e0ab9f5 | ||
|
|
8b305651f9 | ||
|
|
52bd2bbb13 | ||
|
|
a9fafad5b5 | ||
|
|
c5b9c8fc3a | ||
|
|
fb5ac78191 | ||
|
|
871b9286d1 | ||
|
|
c49b436f06 | ||
|
|
d2e327add9 | ||
|
|
2ab75bc52e | ||
|
|
384ad2df6a | ||
|
|
94115b5217 | ||
|
|
10eec546ad | ||
|
|
ac3bf81ca4 | ||
|
|
edd64bd537 | ||
|
|
8795ea8b06 | ||
|
|
b1ef3370fa | ||
|
|
db4af7c287 | ||
|
|
78cc5a7825 | ||
|
|
438bc70dfd | ||
|
|
1f6c868212 | ||
|
|
52d15e06bf | ||
|
|
3dbb0e1bfb | ||
|
|
d6317bc53f | ||
|
|
4aca264308 | ||
|
|
d9148fb619 | ||
|
|
59cb6305b9 | ||
|
|
945b9e3a0a | ||
|
|
920fc0e751 | ||
|
|
34e3c2e000 | ||
|
|
d65553841e | ||
|
|
446dc6bea1 | ||
|
|
92975130bd | ||
|
|
a765f01c08 | ||
|
|
09803b075d | ||
|
|
1062fc4796 | ||
|
|
17170e9dab | ||
|
|
d69f3a03bb | ||
|
|
95f44ff343 | ||
|
|
f9c3c07d98 | ||
|
|
c91ba2dbe7 | ||
|
|
917c2c480e | ||
|
|
fee5cd9c7e | ||
|
|
b0cce8008a | ||
|
|
368c2bf08b | ||
|
|
0a70a856e5 | ||
|
|
56204e84bc | ||
|
|
f1a01c473d | ||
|
|
e27819f18f | ||
|
|
f1f7778e73 | ||
|
|
7763594839 | ||
|
|
c965d3eb6b | ||
|
|
85879d3013 | ||
|
|
4fa66b2ba8 | ||
|
|
6cfabc585a | ||
|
|
b5f42bedce | ||
|
|
fded8bee39 | ||
|
|
ec09e21fc2 | ||
|
|
7d50e413bc | ||
|
|
625b08cff7 | ||
|
|
89b724d222 | ||
|
|
6f6d920686 | ||
|
|
699dfa222e | ||
|
|
288aec7080 | ||
|
|
2c754cfce7 | ||
|
|
8fa2302956 | ||
|
|
ec2b44bfbd | ||
|
|
f8bb1f7a3e | ||
|
|
9c3405e0c0 | ||
|
|
4b78deba92 | ||
|
|
d099924ae9 | ||
|
|
45259894e0 | ||
|
|
94473c541d | ||
|
|
0a7d06f8c6 | ||
|
|
3288d9b31a | ||
|
|
9cb04f6f80 | ||
|
|
7269ed2a0a | ||
|
|
4092d051e8 | ||
|
|
46bc6968b8 | ||
|
|
48484e9fc8 | ||
|
|
26f7adeaa3 | ||
|
|
a12fbc7406 | ||
|
|
ba2048dbc6 | ||
|
|
497f66e682 | ||
|
|
b73216ef81 | ||
|
|
469fc49a2f | ||
|
|
a36cf2f1dd | ||
|
|
5151798a16 | ||
|
|
1a9f552a75 | ||
|
|
10e4d8b72d | ||
|
|
6c2786201b | ||
|
|
2cb57ef301 | ||
|
|
44b49c7f2d | ||
|
|
52a5f1f56f | ||
|
|
7a295cbfd5 | ||
|
|
6f162c5dec | ||
|
|
b94ec14853 | ||
|
|
54cda8ea42 | ||
|
|
0d3d880323 | ||
|
|
a74e2108bb | ||
|
|
ca5689dc54 | ||
|
|
b567d65032 | ||
|
|
35ac8e78bd | ||
|
|
e90fd96eee | ||
|
|
ed72d51969 | ||
|
|
d5267357b1 | ||
|
|
e085eb63bd | ||
|
|
8e470f9b6f | ||
|
|
83163ddd9a | ||
|
|
715686477e | ||
|
|
05e203570d | ||
|
|
2bd3cf28ea | ||
|
|
3cd2d3b764 | ||
|
|
4bac36356a | ||
|
|
97763f778a | ||
|
|
754666ed09 | ||
|
|
4c407328f2 | ||
|
|
943bedadf2 | ||
|
|
667d4deeb7 | ||
|
|
adfdb02c1b | ||
|
|
24d44ca559 | ||
|
|
216dff143e | ||
|
|
4047343503 | ||
|
|
f2334ec302 | ||
|
|
044d4c107a | ||
|
|
ae05d34584 | ||
|
|
94d0c18cbd | ||
|
|
7b49f96472 | ||
|
|
9a2c0554de | ||
|
|
68fd07a606 | ||
|
|
71591d0bee | ||
|
|
8014fc2f4f | ||
|
|
29112f96d2 | ||
|
|
4405c39e48 | ||
|
|
1d6be7f7fd | ||
|
|
64723f0628 | ||
|
|
8982543312 | ||
|
|
d8ce20c06f | ||
|
|
0ed6a141f1 | ||
|
|
33cb6cb4d8 | ||
|
|
600e9ecf8d | ||
|
|
ca15b8b33e | ||
|
|
8562dbaaa8 | ||
|
|
db4d35ed45 | ||
|
|
65fb6af01f | ||
|
|
c6bab14043 | ||
|
|
55f19aff3a | ||
|
|
1b6586dd8c | ||
|
|
b5da7faafb | ||
|
|
b13a06f650 | ||
|
|
8e4d288f02 | ||
|
|
8d4caaabb0 | ||
|
|
171a0eaf51 | ||
|
|
2469859c01 | ||
|
|
cff391aa1d | ||
|
|
4fd4aee2ab | ||
|
|
f5c5f59220 | ||
|
|
9afc909ff0 | ||
|
|
176d41d624 | ||
|
|
9eed8cdc27 | ||
|
|
98e905ee48 | ||
|
|
52c2397498 | ||
|
|
9f9807d7f7 | ||
|
|
11fa87388b | ||
|
|
258b0814a8 | ||
|
|
dd2057322c | ||
|
|
41c5963e41 | ||
|
|
ed1456e0cc | ||
|
|
15a927b517 | ||
|
|
121396f844 | ||
|
|
d251124196 | ||
|
|
243e76dd80 | ||
|
|
cfee8d9804 | ||
|
|
68dc3c6cb4 | ||
|
|
4196c669a0 | ||
|
|
a1398dec91 | ||
|
|
c4bec0e81b | ||
|
|
a03233bd8a | ||
|
|
6fdeeb8ce8 | ||
|
|
9993e4b02e | ||
|
|
e6b677873a | ||
|
|
44e77589b7 | ||
|
|
d0c74822eb | ||
|
|
383d008529 | ||
|
|
59511783fc | ||
|
|
605e13eac0 | ||
|
|
2a1d7342a7 | ||
|
|
d1efabaf2f | ||
|
|
577464091c | ||
|
|
aaae471910 | ||
|
|
56ed76fd95 | ||
|
|
5133825efb | ||
|
|
99475ab800 | ||
|
|
50a266e064 | ||
|
|
87bb4d8f6e | ||
|
|
fcb60a7a59 | ||
|
|
b5dac99411 | ||
|
|
a08d22587b | ||
|
|
0ea67050f1 | ||
|
|
6db19a8dee | ||
|
|
ef58635a76 | ||
|
|
594e547c3b | ||
|
|
2bf747caf6 | ||
|
|
cd548f73fd | ||
|
|
bb085c5fba | ||
|
|
3efb1f6f17 | ||
|
|
1ed0d7bf3c | ||
|
|
a5fe6c8af6 | ||
|
|
3c37245804 | ||
|
|
e60af40c8d | ||
|
|
421f5b7d75 | ||
|
|
3ef36707a8 | ||
|
|
00ca9b027a | ||
|
|
e81e17ccb6 | ||
|
|
b9731cb434 | ||
|
|
502570e083 | ||
|
|
1f476692da | ||
|
|
5fdd25501b | ||
|
|
4f00dbe704 | ||
|
|
b65c9ad612 | ||
|
|
ef3bf2803f | ||
|
|
f87b2364b7 | ||
|
|
3e6c49001c | ||
|
|
19e0f360e7 | ||
|
|
ea40a7844a | ||
|
|
0d2e194213 | ||
|
|
c6d00387a7 | ||
|
|
3de45af734 | ||
|
|
526c7e7737 | ||
|
|
1811b54727 | ||
|
|
95883c2efd | ||
|
|
b5a83bbc8a | ||
|
|
38851ae19a | ||
|
|
71c3955530 | ||
|
|
3f8d17d6b7 | ||
|
|
b18695df6f | ||
|
|
249048aae7 | ||
|
|
521da555d6 | ||
|
|
c923d094c6 | ||
|
|
226721ce51 | ||
|
|
af3e316cee | ||
|
|
382a55afd3 | ||
|
|
e9633a3adb | ||
|
|
61224e5cfe | ||
|
|
dc581350e6 | ||
|
|
64c5b20ce3 | ||
|
|
8a79798fa6 | ||
|
|
6b462f2ed5 | ||
|
|
9c13f1b0fd | ||
|
|
7ab3d3861c | ||
|
|
8e90468637 | ||
|
|
f67bbadf83 | ||
|
|
e2942b9b8d | ||
|
|
ac942a2034 | ||
|
|
0bf5fee1b2 | ||
|
|
8114fc7bc2 | ||
|
|
f9d2bcce04 | ||
|
|
84bf2a03e9 | ||
|
|
4ee65d179c | ||
|
|
368ff17ed4 | ||
|
|
d52a096607 | ||
|
|
44b6adfb9f | ||
|
|
466a819f06 | ||
|
|
e6fd1c3d1f | ||
|
|
7caccb11fa | ||
|
|
e22c797fa3 | ||
|
|
0c5736d9c9 | ||
|
|
2d8f7d425c | ||
|
|
7d1942e9f0 | ||
|
|
5d8cd62e44 | ||
|
|
b6dc5c0fee | ||
|
|
c1b8e4b501 | ||
|
|
65feb92286 | ||
|
|
7f6fdf5d39 | ||
|
|
40e6dd8464 | ||
|
|
79df46bad2 | ||
|
|
2f11936db0 | ||
|
|
2ba52b8921 | ||
|
|
fa3fcd7820 | ||
|
|
f45ea1145d | ||
|
|
5eb6148336 | ||
|
|
49892faee4 | ||
|
|
7bb876a79b | ||
|
|
f89be8c685 | ||
|
|
7e4009a58e | ||
|
|
5141e82f88 | ||
|
|
8277bfab5e | ||
|
|
0af8a0e84b | ||
|
|
9bafe4a94f | ||
|
|
54e844f7da | ||
|
|
111322b015 | ||
|
|
859c155e7f | ||
|
|
955fef35aa | ||
|
|
f3b293b5cc | ||
|
|
6efa953172 | ||
|
|
06ac16a77d | ||
|
|
05c939d41e | ||
|
|
cfee02b753 | ||
|
|
4f088252db | ||
|
|
ca3e826a14 | ||
|
|
0cb886b915 | ||
|
|
2ec8fd3dc7 | ||
|
|
90abd0fe49 | ||
|
|
3651cf7ee2 | ||
|
|
8eca3bbbcd | ||
|
|
73318c2847 | ||
|
|
6d10e40c9b | ||
|
|
5cf9b75d77 | ||
|
|
d4463674cf | ||
|
|
ce7172d78c | ||
|
|
38b2dedc1d | ||
|
|
cd73085eb9 | ||
|
|
2497aa5cd8 | ||
|
|
089ada8cd1 | ||
|
|
35d14fc0f9 | ||
|
|
b79bca2c14 | ||
|
|
5fc60d0539 | ||
|
|
7b97754271 | ||
|
|
98dcc8d8b3 | ||
|
|
d3c177aaef | ||
|
|
3f7ac556c6 | ||
|
|
56c052a747 | ||
|
|
8087b428cc | ||
|
|
0c639bd751 | ||
|
|
be6ba57775 | ||
|
|
2f8d3022a0 | ||
|
|
4da861e980 | ||
|
|
9d7dfeb857 | ||
|
|
572e6b892a | ||
|
|
76750b0121 | ||
|
|
3039f92e69 | ||
|
|
88963dbe6e | ||
|
|
7b2079cf83 | ||
|
|
535eb1db16 | ||
|
|
01738deb23 | ||
|
|
fbff22c94b | ||
|
|
5c305b1eeb | ||
|
|
990b6b5f6a | ||
|
|
2dfcba8654 | ||
|
|
d95773f50f | ||
|
|
6d111aac90 | ||
|
|
f9fc89b3c5 | ||
|
|
ab76d54c10 | ||
|
|
56245a7406 | ||
|
|
bf04e913c2 | ||
|
|
cdc49456e8 | ||
|
|
37dc2d9d4d | ||
|
|
6e1ddb671e | ||
|
|
496a2db15c | ||
|
|
5292eda0e4 | ||
|
|
4ac41bc4b1 | ||
|
|
4be4fc6731 | ||
|
|
a9fdc77edd | ||
|
|
385765faec | ||
|
|
adb05cde5b | ||
|
|
211e8203f8 | ||
|
|
0b9ae74192 | ||
|
|
165c57c001 | ||
|
|
2514af79a0 | ||
|
|
f952f8f685 | ||
|
|
484b572023 | ||
|
|
cd9baf8092 | ||
|
|
81385d7d35 | ||
|
|
519bcb38c1 | ||
|
|
567d46b646 | ||
|
|
030802295b | ||
|
|
a495c8c156 | ||
|
|
ae6db67068 | ||
|
|
3d84e7756a | ||
|
|
98431b3de4 | ||
|
|
210a3f9aa7 | ||
|
|
9332ce639c | ||
|
|
84cf8bdc08 | ||
|
|
64a6aa0293 | ||
|
|
5ae14bffba | ||
|
|
0909812c84 | ||
|
|
66c0aea9e7 | ||
|
|
2bcded78e1 | ||
|
|
beb3e5aeb7 | ||
|
|
5b6069b916 | ||
|
|
766cb887e4 | ||
|
|
ef317be1f9 | ||
|
|
027b84d1aa | ||
|
|
11b670755d | ||
|
|
a536719fc3 | ||
|
|
8e6d88e98c | ||
|
|
0f1b975d0e | ||
|
|
2fef478497 | ||
|
|
6df6abf6f6 | ||
|
|
1b70bd1380 | ||
|
|
d1d2d5a47d | ||
|
|
3798c8bdb0 | ||
|
|
c49851e027 | ||
|
|
3c43594c26 | ||
|
|
c96ae4c331 | ||
|
|
ce465acf04 | ||
|
|
33ee418d8c | ||
|
|
4f1008f31f | ||
|
|
6cc629e19d | ||
|
|
537ae2f901 | ||
|
|
f6db9da06c | ||
|
|
a17dbd7df6 | ||
|
|
98a4cc20a9 | ||
|
|
e2bdcc0271 | ||
|
|
ffd0f5924b | ||
|
|
654dcd453f | ||
|
|
cfd827cfad | ||
|
|
498d2ecc2b | ||
|
|
4ebe839d54 | ||
|
|
bc16b50302 | ||
|
|
4267132926 | ||
|
|
e9a294f733 | ||
|
|
b69f26c85c | ||
|
|
832335998f | ||
|
|
1102c12084 | ||
|
|
b5cee7d20c | ||
|
|
23b4e1cea0 | ||
|
|
635a814dfb | ||
|
|
c19835c2d0 | ||
|
|
ed38eaa10c | ||
|
|
b213335316 | ||
|
|
ff5c725586 | ||
|
|
bf0dfcac2f | ||
|
|
842eb4bb0a | ||
|
|
89b82b3dc4 | ||
|
|
8923201fdf | ||
|
|
226409107b | ||
|
|
ae986bf873 | ||
|
|
503e3bca54 | ||
|
|
daf75a1361 | ||
|
|
fe4b2d53ed | ||
|
|
c39f8b478b | ||
|
|
1f82d8013e | ||
|
|
e373bfca54 | ||
|
|
2ca8611723 | ||
|
|
5aa7bfebd4 | ||
|
|
b12cf315a8 | ||
|
|
975586bb40 | ||
|
|
a7ba142ad9 | ||
|
|
0d36bab6cc | ||
|
|
c2e7f62701 | ||
|
|
1f194e3688 | ||
|
|
f9b8b5cff2 | ||
|
|
f7c92e1eff | ||
|
|
70b8c3dfea | ||
|
|
43b30355e4 | ||
|
|
a93bd01353 | ||
|
|
bb1b8ceaa8 | ||
|
|
be8edaf3fd | ||
|
|
9cbaefaa81 | ||
|
|
cc7c6e5d41 | ||
|
|
f2ee8a3da8 | ||
|
|
e98d7a52d4 | ||
|
|
21e1c0a5f0 | ||
|
|
611e241ca7 | ||
|
|
6df4af2c79 | ||
|
|
0f8606914e | ||
|
|
5b1099193d | ||
|
|
230131646f | ||
|
|
8b1ec2685f | ||
|
|
60c2c877d7 | ||
|
|
315a056686 | ||
|
|
80b0c5eab4 | ||
|
|
08dc265e09 | ||
|
|
029a95550e | ||
|
|
ee6a26a97d | ||
|
|
a512fdc0f6 | ||
|
|
767a612746 | ||
|
|
0a71d6baa1 | ||
|
|
37be827e17 | ||
|
|
04a9894e77 | ||
|
|
f9958de6be | ||
|
|
ec10aca91e | ||
|
|
2b7dd3e236 | ||
|
|
fa884134d9 | ||
|
|
18006cab9a | ||
|
|
75ea716c13 | ||
|
|
d5f7027597 | ||
|
|
b1ad777f5a | ||
|
|
f65c8092cb | ||
|
|
94bfef3543 | ||
|
|
c48fd9c083 | ||
|
|
f49fc7fb55 | ||
|
|
a4b029d03c | ||
|
|
d6c9bf5b38 | ||
|
|
4f82273fc4 | ||
|
|
e54355f0f3 | ||
|
|
b2934be6ba | ||
|
|
eab67b6a01 | ||
|
|
02fa116690 | ||
|
|
5190a4c282 | ||
|
|
141d438517 | ||
|
|
549d2e0485 | ||
|
|
d3d8b71c67 | ||
|
|
6eaaa75a5d | ||
|
|
ba57ec5907 | ||
|
|
b524bf3c04 | ||
|
|
cd0e4bc1d7 | ||
|
|
9d3cd85bdd | ||
|
|
46a8eed33e | ||
|
|
9fee3f7b66 | ||
|
|
9217a217d4 | ||
|
|
b2700ffde4 | ||
|
|
511da59793 | ||
|
|
409e5d01ba | ||
|
|
58d5c61c79 | ||
|
|
3d8da67be3 | ||
|
|
957ee6d370 | ||
|
|
fecad2c014 | ||
|
|
550e6ef27a | ||
|
|
cc85c98bf3 | ||
|
|
75fb3f429f | ||
|
|
d63bb39475 | ||
|
|
096333ba3f | ||
|
|
0b2925709c | ||
|
|
7a8f14d595 | ||
|
|
59ba9fc0f6 | ||
|
|
6e0beb1ed4 | ||
|
|
94636ddb03 | ||
|
|
746e099f0d | ||
|
|
499e89d6f6 | ||
|
|
250d530260 | ||
|
|
90fa3eebb3 | ||
|
|
0aba105a8f | ||
|
|
9e2e82a752 | ||
|
|
561951ad98 | ||
|
|
3ff9961bda | ||
|
|
33779b6339 | ||
|
|
b35cdc05a5 | ||
|
|
9afb5d6ace | ||
|
|
50177b8ed9 | ||
|
|
c8864e475b | ||
|
|
fcf7f4ac77 | ||
|
|
29f1c6dc82 | ||
|
|
28208e6f49 | ||
|
|
c33acf951e | ||
|
|
500cd552bc | ||
|
|
55d27f71a3 | ||
|
|
746c7c59ff | ||
|
|
ad96c41156 | ||
|
|
27bd127fb0 | ||
|
|
f296e5c41e | ||
|
|
a67d8376c7 | ||
|
|
9f6221fe8c | ||
|
|
7587b54787 | ||
|
|
7254ffc3e7 | ||
|
|
6034fa12de | ||
|
|
ce3675fc14 | ||
|
|
8acd7eeca5 | ||
|
|
7293a6036a | ||
|
|
0b11f309ca | ||
|
|
6a8eb392b2 | ||
|
|
f343ab0302 | ||
|
|
824ca92760 | ||
|
|
d7d6298ec0 | ||
|
|
58a48bf197 | ||
|
|
5629d8fa37 | ||
|
|
1affb7f647 | ||
|
|
69a9dc7b36 | ||
|
|
f3ae52ff97 | ||
|
|
7479f9cc02 | ||
|
|
87ce4ab27c | ||
|
|
7c0023ad9e | ||
|
|
231e665675 | ||
|
|
80fd4c2176 | ||
|
|
3b6e425e17 | ||
|
|
50415450d8 | ||
|
|
06296896a9 | ||
|
|
a7399aca0c | ||
|
|
d1ea8b1e98 | ||
|
|
f851ad7ba0 | ||
|
|
591838a84b | ||
|
|
c0c2ab3dcf | ||
|
|
56023bc725 | ||
|
|
2ef6a8995b | ||
|
|
d0fee93aac | ||
|
|
1bfe9835cf | ||
|
|
8e7eae6cc7 | ||
|
|
f6522c8971 | ||
|
|
a969707e45 | ||
|
|
6c8e898f09 | ||
|
|
7bad9bcf53 | ||
|
|
d42b45116f | ||
|
|
d4812bbc8d | ||
|
|
3cd05cf6bf | ||
|
|
2564301aeb | ||
|
|
da0efeaa7f | ||
|
|
49cce1eec6 | ||
|
|
e9ec5ab85c | ||
|
|
17fed1c870 | ||
|
|
ade78b9591 | ||
|
|
c8fbaf54b6 | ||
|
|
f86d388786 | ||
|
|
cd2c688562 | ||
|
|
2d29ac6f0d | ||
|
|
2c2b731386 | ||
|
|
2f68a1a76c | ||
|
|
930e7bc754 | ||
|
|
7d4ace962a | ||
|
|
06842f8e0a | ||
|
|
c82da330db | ||
|
|
628df4ec98 | ||
|
|
16b956616f | ||
|
|
604cc17a3a | ||
|
|
37c9b85549 | ||
|
|
8b39b67ec7 | ||
|
|
a933977861 | ||
|
|
dfb41d8461 | ||
|
|
e98f7eda2e | ||
|
|
b4a74f6523 | ||
|
|
f7aec3b934 | ||
|
|
4d5169e16d | ||
|
|
a7e44678fb | ||
|
|
da0184a786 | ||
|
|
f56f19710d | ||
|
|
96b7248051 | ||
|
|
e77400ab62 | ||
|
|
13347f6aec | ||
|
|
a9bf387e5e | ||
|
|
8258c87a9f | ||
|
|
1b1b399fd0 | ||
|
|
a8d3e078c0 | ||
|
|
6ed7ba57dd | ||
|
|
2b3b77a276 | ||
|
|
8b8ec68b30 | ||
|
|
e20af5aef0 | ||
|
|
57e8ec9488 | ||
|
|
734a9e4271 | ||
|
|
fe924daee3 | ||
|
|
750f09fbed | ||
|
|
4df581811e | ||
|
|
eb70bc2ae4 | ||
|
|
5f29526a8e | ||
|
|
492bfe002a | ||
|
|
809705c30d | ||
|
|
f0918edf98 | ||
|
|
a846d82fa1 | ||
|
|
22f7cf0638 | ||
|
|
25c669b1d6 | ||
|
|
4367061b19 | ||
|
|
0fd13d3604 | ||
|
|
72a3e776b2 | ||
|
|
af044007d5 | ||
|
|
1db2c93f75 | ||
|
|
f272a44feb | ||
|
|
2539e26c18 | ||
|
|
b0738b7f70 | ||
|
|
8469d3e95a | ||
|
|
ae17d01e1d | ||
|
|
f3d3316558 | ||
|
|
5a6cefb0ea | ||
|
|
1a6f5f0860 | ||
|
|
5bfd6cb66f | ||
|
|
59caff7ff0 | ||
|
|
6487e7d906 | ||
|
|
77033eabd3 | ||
|
|
b80abdd101 | ||
|
|
006d782cc8 | ||
|
|
d09dfc3e9b | ||
|
|
66f524cae7 | ||
|
|
9ba50130a1 | ||
|
|
d4cf2d2666 | ||
|
|
9aaf67c5b4 | ||
|
|
b8b589c150 | ||
|
|
d93900a8de | ||
|
|
7f4c387080 | ||
|
|
80876bbbd1 | ||
|
|
7a4ff4c089 | ||
|
|
44bf308192 | ||
|
|
12e51c84ae | ||
|
|
b2eb83deff | ||
|
|
0ccc3b509e | ||
|
|
4043a4c21c | ||
|
|
c8ceb96091 | ||
|
|
83f75750a9 | ||
|
|
dc96a3e79d | ||
|
|
c076f1397e | ||
|
|
2568aafc0b | ||
|
|
65ed224bfc | ||
|
|
b6e369c745 | ||
|
|
ecabfc252b | ||
|
|
da96a41103 | ||
|
|
d162b78767 | ||
|
|
eb6c317f04 | ||
|
|
6d7223238f | ||
|
|
8607d124c5 | ||
|
|
23497bf759 | ||
|
|
b10cf20eb1 | ||
|
|
3d93851dba | ||
|
|
9bacd77a79 | ||
|
|
1b158f62c4 | ||
|
|
6ad565d84c | ||
|
|
04229082d6 | ||
|
|
03c27412f7 | ||
|
|
f0613bb0ef | ||
|
|
0e9f92b868 | ||
|
|
7d0cc6ec3f | ||
|
|
2f8b928486 | ||
|
|
0d3c27f46c | ||
|
|
cff91f06d3 | ||
|
|
1d5d187ba1 | ||
|
|
1ac14a1e43 | ||
|
|
cfc3a20565 | ||
|
|
05ae4e283c | ||
|
|
f06fee4581 | ||
|
|
9091e19de8 | ||
|
|
0a0b7141af | ||
|
|
1deca89fde | ||
|
|
446fb4a438 | ||
|
|
ab5d938a1d | ||
|
|
9942af756a | ||
|
|
06742faca7 | ||
|
|
d2bddf7f91 | ||
|
|
91ebf9f76e | ||
|
|
bf94412d14 | ||
|
|
e080fd1e08 | ||
|
|
eeef1e08f8 | ||
|
|
b3b94b5a8d | ||
|
|
5c9787c145 | ||
|
|
cf72eba15c | ||
|
|
a6f9396a30 | ||
|
|
118d5b387b | ||
|
|
02d2cc758d | ||
|
|
db545f8801 | ||
|
|
b0d72b15b3 | ||
|
|
4e0949fa55 | ||
|
|
f028342f5b | ||
|
|
7021467048 | ||
|
|
26ef5249b1 | ||
|
|
87424be95d | ||
|
|
366952f810 | ||
|
|
450e95de59 | ||
|
|
0ba8a0ea6c | ||
|
|
f4981f26d5 | ||
|
|
6bc21984c6 | ||
|
|
43d6312587 | ||
|
|
0d125bf3e4 | ||
|
|
921ccad04d | ||
|
|
05c9207e7b | ||
|
|
3fc789a7ee | ||
|
|
008362918e | ||
|
|
8fc75a71ee | ||
|
|
82d259f43b | ||
|
|
ec48779080 | ||
|
|
bc20fe4cb5 | ||
|
|
5de42be4a6 | ||
|
|
818c55cd53 | ||
|
|
0db1e97119 | ||
|
|
29ac252501 | ||
|
|
880727436c | ||
|
|
77c5c18542 | ||
|
|
ed76250dba | ||
|
|
4d22cafdad | ||
|
|
1f9e984b0d | ||
|
|
8a4e5f73aa | ||
|
|
4599575e65 | ||
|
|
242d860a47 | ||
|
|
0c1a7e72d4 | ||
|
|
11a44b944d | ||
|
|
fd7b842419 | ||
|
|
5998509888 | ||
|
|
403a6e88f2 | ||
|
|
c9d452b9d4 | ||
|
|
dcc274a2b9 | ||
|
|
f404669831 | ||
|
|
ce687b28ef | ||
|
|
7292d89108 | ||
|
|
41d6a38690 | ||
|
|
fb8f218901 | ||
|
|
e7d9e552a7 | ||
|
|
437f45a97f | ||
|
|
13ef33ed64 | ||
|
|
86d8b46fca | ||
|
|
e86925d424 | ||
|
|
df53b62048 | ||
|
|
55d3f04476 | ||
|
|
72ebe2ce68 | ||
|
|
7cd8b2f207 | ||
|
|
52437205bb | ||
|
|
ceebb501a4 | ||
|
|
cbe874b964 | ||
|
|
e2e5918ee2 | ||
|
|
1b131e328a | ||
|
|
81654daed7 | ||
|
|
746afcd235 | ||
|
|
ae0f4efcca | ||
|
|
23647336ce | ||
|
|
4ca54dd5fa | ||
|
|
d3a3067164 | ||
|
|
aeac557c41 | ||
|
|
af4fd328a6 | ||
|
|
c40c7424b6 | ||
|
|
a6b907150b | ||
|
|
bacdf985f1 | ||
|
|
e3519052ae | ||
|
|
b0e84c6497 | ||
|
|
f784e8412c | ||
|
|
1bafbafdd3 | ||
|
|
f5ac73b091 | ||
|
|
eb642653cb | ||
|
|
2c07f54b6e | ||
|
|
0691e0a12a | ||
|
|
79afcbd07e | ||
|
|
f4ead5e07f | ||
|
|
6d24ca7f52 | ||
|
|
2164da8592 | ||
|
|
adfd1e52f4 | ||
|
|
0e48c98330 | ||
|
|
4121c261a0 | ||
|
|
99823d5039 | ||
|
|
0abceb0e7b | ||
|
|
83d3f2347e | ||
|
|
73e25d8dbe | ||
|
|
50e00feceb | ||
|
|
03594c949a | ||
|
|
adb85036e6 | ||
|
|
7d7a9273ed | ||
|
|
f17ad227cf | ||
|
|
f91d01eb38 | ||
|
|
adfcb610b6 | ||
|
|
d2c55dc011 | ||
|
|
cafcd16657 | ||
|
|
2537ff0280 | ||
|
|
0f5f08e494 | ||
|
|
e20c4dc1e8 | ||
|
|
6dc4ddef1b | ||
|
|
26af5ec341 | ||
|
|
10b182f316 | ||
|
|
ac84a9f915 | ||
|
|
844578ab88 | ||
|
|
ff1c40747e | ||
|
|
dbfd1bcb5e | ||
|
|
444390617f | ||
|
|
6cb40d9d7b | ||
|
|
ca895a9cd0 | ||
|
|
7d27c7b1a4 | ||
|
|
6c82229910 | ||
|
|
43b1eb8e84 | ||
|
|
b10b07220e | ||
|
|
c2eb50d1cd | ||
|
|
73f3b7f84b | ||
|
|
bb18251fad | ||
|
|
348bee8981 | ||
|
|
078b33bda2 | ||
|
|
e82eb0b9fc | ||
|
|
ad976e5198 | ||
|
|
0e28961e69 | ||
|
|
6ce059f063 | ||
|
|
1de783b1ce | ||
|
|
3f9105be50 | ||
|
|
781322a647 | ||
|
|
9a1cfadd8b | ||
|
|
2a2d988928 | ||
|
|
ccceb32a85 | ||
|
|
72c519c6ad | ||
|
|
af12f67948 | ||
|
|
60f5606c2d | ||
|
|
24b19166dd | ||
|
|
0396bce4f9 | ||
|
|
71768f5988 | ||
|
|
0fb7328022 | ||
|
|
99daa97978 | ||
|
|
21617e60e1 | ||
|
|
982a568349 | ||
|
|
d79d5a4ff7 | ||
|
|
9968ff2893 | ||
|
|
35dd58e273 | ||
|
|
6d82a1019a | ||
|
|
6ed1bf7084 | ||
|
|
974175be45 | ||
|
|
86b8b69e88 | ||
|
|
bc9a5038fd | ||
|
|
bee678fdd1 | ||
|
|
c5caf1e8fe | ||
|
|
b163ae6a4d | ||
|
|
dca685ac25 | ||
|
|
72708eb53c | ||
|
|
aae1670080 | ||
|
|
e70bedba7d | ||
|
|
1e776d2523 | ||
|
|
8e06e6abbc | ||
|
|
8a0e1b6cfc | ||
|
|
2d9bc79ca4 | ||
|
|
6886eb094d | ||
|
|
6ca0c38ee3 | ||
|
|
d633eb1612 | ||
|
|
1bbf2f269d | ||
|
|
ac22652686 | ||
|
|
77cfec5cc8 | ||
|
|
3e4420c1ae | ||
|
|
f8181ab1b3 | ||
|
|
3dfeead9b8 | ||
|
|
d3f6c7f983 | ||
|
|
390ce9f249 | ||
|
|
3da0be7eb9 | ||
|
|
8935ae0ea3 | ||
|
|
31e5f4bb0e | ||
|
|
2164674b01 | ||
|
|
8f2a646286 | ||
|
|
5ff4dd26bb | ||
|
|
e342ca872f | ||
|
|
a2aa66f43a | ||
|
|
da751da3dd | ||
|
|
2b7b3dd4ba | ||
|
|
dc1148106d | ||
|
|
062a369044 | ||
|
|
e4a2f56ad1 | ||
|
|
1df30f7260 | ||
|
|
514722d67a | ||
|
|
5dbde2116f | ||
|
|
14c4650801 | ||
|
|
f155b03eee | ||
|
|
ddaf753f7b | ||
|
|
e6d14c708c | ||
|
|
7f81a95b20 | ||
|
|
6a49eec606 | ||
|
|
fd67b18c9a | ||
|
|
9affdbbaad | ||
|
|
8d300bddd0 | ||
|
|
aa2c94be9e | ||
|
|
4c79350300 | ||
|
|
10e1d623c3 | ||
|
|
aa1f827271 | ||
|
|
fb113b9077 | ||
|
|
bb9460d278 | ||
|
|
6edeb4e072 | ||
|
|
2bb4e6d5aa | ||
|
|
53028feb83 | ||
|
|
d983dd371c | ||
|
|
17ee17a789 | ||
|
|
6b3ec29480 | ||
|
|
4a30773d09 | ||
|
|
006075483d | ||
|
|
1ea9ba84f5 | ||
|
|
64bd11541a | ||
|
|
52bd29d484 | ||
|
|
41b13e83a5 | ||
|
|
0d8f9cbe55 | ||
|
|
fd75a1dd10 | ||
|
|
bfdc8c80f3 | ||
|
|
3bb81bedbd | ||
|
|
e191f6d4b2 | ||
|
|
00988e4972 | ||
|
|
7d458eb1ac | ||
|
|
b8b46aec09 | ||
|
|
4d2b87ea01 | ||
|
|
8023a23cec | ||
|
|
e4c0102b3c | ||
|
|
16d044336f | ||
|
|
c4a2808a4b | ||
|
|
59716938bf | ||
|
|
611f31c057 | ||
|
|
b60adc31d0 | ||
|
|
a98ed3a5ba | ||
|
|
f057d5c85b | ||
|
|
918a0dedc0 | ||
|
|
218b6d0546 | ||
|
|
2183dba5c5 | ||
|
|
a491e326c5 | ||
|
|
f7bb4c3f05 | ||
|
|
57271ad125 | ||
|
|
33245b37ad | ||
|
|
81d8fb8762 | ||
|
|
fc9dacd082 | ||
|
|
8b4af69d87 | ||
|
|
989d3d7f3c | ||
|
|
d2a46b4308 | ||
|
|
eb1ba8d74b | ||
|
|
4ebde013ea | ||
|
|
024f92f9a9 | ||
|
|
562c937a14 | ||
|
|
5300e353d8 | ||
|
|
d78c97f8a8 | ||
|
|
52f61698e9 | ||
|
|
6f54fe9003 | ||
|
|
895917c3ab | ||
|
|
be00a837cc | ||
|
|
dcb85b0097 | ||
|
|
5956c601f7 | ||
|
|
b67041dd29 | ||
|
|
5b62d97a47 | ||
|
|
c02b9db064 | ||
|
|
2e19b23eed | ||
|
|
f7f20fdfe4 | ||
|
|
61aff8540c | ||
|
|
2b7807e6a0 | ||
|
|
fc19624bd8 | ||
|
|
77946bfea5 | ||
|
|
d4d4d749f2 | ||
|
|
43fe8b1dda | ||
|
|
3e441f773f | ||
|
|
9c4acb9d3f | ||
|
|
451b8c96e5 | ||
|
|
b8376a4932 | ||
|
|
0d344872f1 | ||
|
|
4bfbdb0d97 | ||
|
|
049e666412 | ||
|
|
83a981b585 | ||
|
|
049645d66e | ||
|
|
4d732e06de | ||
|
|
c90c4a32ee | ||
|
|
3ff8c87c09 | ||
|
|
f26a423e95 | ||
|
|
0100ac8f2d | ||
|
|
6a3a776f4e | ||
|
|
020031f376 | ||
|
|
7053347559 | ||
|
|
bf1f6619df | ||
|
|
6bdcc32414 | ||
|
|
4f39c81dec | ||
|
|
3376968cbb | ||
|
|
0420d75d2b | ||
|
|
3bd9c27a79 | ||
|
|
b6522cf2cf | ||
|
|
861c0fe76b | ||
|
|
13ac5c6899 | ||
|
|
05070304ff | ||
|
|
af8fc6ff82 | ||
|
|
f86d0d1b69 | ||
|
|
e6741cee75 | ||
|
|
c16da75ac7 | ||
|
|
575ebaeb75 | ||
|
|
385483ff8e | ||
|
|
c7f883d22a | ||
|
|
58ff5d3f5b | ||
|
|
f060e321eb | ||
|
|
dc8c3d8073 | ||
|
|
819136c345 | ||
|
|
989b68c772 | ||
|
|
a6347a1d3c | ||
|
|
a00d1e87e4 | ||
|
|
c7d24081e2 | ||
|
|
17900e5140 | ||
|
|
6fa42cb10c | ||
|
|
4bea846199 | ||
|
|
3dccc4d61e | ||
|
|
bf0587da5f | ||
|
|
58c0bee325 | ||
|
|
b8f43f444a | ||
|
|
da76f6fee4 | ||
|
|
c4f064bbf3 | ||
|
|
0ce8472562 | ||
|
|
3e206d4d6a | ||
|
|
ce7fa96dbc | ||
|
|
a705461c04 | ||
|
|
fda7e0a71a | ||
|
|
36455f6cac | ||
|
|
513b223ef6 | ||
|
|
db05445103 | ||
|
|
30c3b7a6fc | ||
|
|
2d0f932737 | ||
|
|
9e9dce44b4 | ||
|
|
6fd8543e69 | ||
|
|
db48f3230b | ||
|
|
397604a094 | ||
|
|
f5139b174a | ||
|
|
050e5091db | ||
|
|
2c5b539d3a | ||
|
|
85ad5ef204 | ||
|
|
5beb11f4e2 | ||
|
|
844d37c642 | ||
|
|
b3723d1ccf | ||
|
|
bd43751323 | ||
|
|
e32cd794f7 | ||
|
|
761fc4beb8 | ||
|
|
531bc40d3f | ||
|
|
676051edb9 | ||
|
|
de65b82569 | ||
|
|
934f9afd7e | ||
|
|
1c01a31ee8 | ||
|
|
c5389b3298 | ||
|
|
fdbab5ffa9 | ||
|
|
a6e544ebd5 | ||
|
|
75b0507434 | ||
|
|
59c2556e6b | ||
|
|
4fe889bbf8 | ||
|
|
cbcd416b70 | ||
|
|
6fa244a343 | ||
|
|
e5a660930c | ||
|
|
61291ea105 | ||
|
|
840205496a | ||
|
|
016797c890 | ||
|
|
00e69d5d12 | ||
|
|
8e90f9024d | ||
|
|
751c4407e4 | ||
|
|
6c46304eb8 | ||
|
|
0eb31c5710 | ||
|
|
6295e56d96 | ||
|
|
5202610160 | ||
|
|
8d1b8179af | ||
|
|
3bdb059eb7 | ||
|
|
b0ebd148fa | ||
|
|
9f94d0e52a | ||
|
|
9c180da58a | ||
|
|
57d833035d | ||
|
|
c145681488 | ||
|
|
3eaf8c3b2f | ||
|
|
d9527bf445 | ||
|
|
032e9c8165 | ||
|
|
dbc3d42afc | ||
|
|
d5998ad3ef | ||
|
|
a4c8d86faa | ||
|
|
f4da66aa0f | ||
|
|
7f5a89f567 | ||
|
|
2db9b3b2ae | ||
|
|
77107dfcbc | ||
|
|
e43e198102 | ||
|
|
2aefa921fe | ||
|
|
11e6ecc1bf | ||
|
|
7d337dccc2 | ||
|
|
91e903c8ab | ||
|
|
efa615a8fd | ||
|
|
cf10852ee3 | ||
|
|
437532f2f9 | ||
|
|
8c449c4756 | ||
|
|
fc4e104c61 | ||
|
|
4194a0ed99 | ||
|
|
7ce5b6504f | ||
|
|
aea8ad5670 | ||
|
|
97f4475fdf | ||
|
|
4f9c728db0 | ||
|
|
7ea477abef | ||
|
|
d42c394ab7 | ||
|
|
61fa960a18 | ||
|
|
1969afd038 | ||
|
|
2b65e40896 | ||
|
|
d6bf6513ef | ||
|
|
14659277e7 | ||
|
|
cbb90cbdbb | ||
|
|
9c59083406 | ||
|
|
86b62cfccc | ||
|
|
e766ddbcf4 | ||
|
|
374b4a1b12 | ||
|
|
0cf7a10c5c | ||
|
|
1c44a0feba | ||
|
|
66cdeba8a1 | ||
|
|
d5a75eb833 | ||
|
|
8eab96c441 | ||
|
|
4754a94102 | ||
|
|
5c6f417471 | ||
|
|
0beec08d38 | ||
|
|
02618a701d | ||
|
|
f2a6f0cf21 | ||
|
|
07a90c0198 | ||
|
|
28031ead70 | ||
|
|
4b334be7d0 | ||
|
|
de73e4f5b9 | ||
|
|
af4579b4d4 | ||
|
|
35acb5de76 | ||
|
|
225f608556 | ||
|
|
00d3cd4aed | ||
|
|
5e59edfaf1 | ||
|
|
fdc444ed61 | ||
|
|
075f9b3a7a | ||
|
|
b1d7c9b306 | ||
|
|
5607794dbb | ||
|
|
c5147d0f57 | ||
|
|
6452d0fc28 | ||
|
|
5468d9a9fc | ||
|
|
75863e7181 | ||
|
|
0689e36390 | ||
|
|
907ff165be | ||
|
|
53c8c3b4f5 | ||
|
|
8262c31866 | ||
|
|
b940ae8dbb | ||
|
|
845d1524ad | ||
|
|
6c82b694a7 | ||
|
|
f1fcc3fb74 | ||
|
|
2dd59d31d0 | ||
|
|
78750042f5 | ||
|
|
3f79812dc6 | ||
|
|
055b2207cb | ||
|
|
19cdd5a99b | ||
|
|
5db66e00b6 | ||
|
|
76337e13f5 | ||
|
|
eb4ca4042e | ||
|
|
594bf6fef1 | ||
|
|
6f2e8d5217 | ||
|
|
52ae15c167 | ||
|
|
2c4128d44e | ||
|
|
01b106d939 | ||
|
|
68f1f87c6f | ||
|
|
c2c99b8650 | ||
|
|
896b77cf56 | ||
|
|
6f7d221f57 | ||
|
|
fba4085939 | ||
|
|
13e7614508 | ||
|
|
48ad005732 | ||
|
|
9ce4bd1182 | ||
|
|
39b7ace273 | ||
|
|
319c56f844 | ||
|
|
389a0d2810 | ||
|
|
fe33acedad | ||
|
|
eab18c7385 | ||
|
|
8e98085530 | ||
|
|
5396e998b3 | ||
|
|
fc98089960 | ||
|
|
dd0b4dc744 | ||
|
|
ddeba190bc | ||
|
|
3a610e1a65 | ||
|
|
e10e22440d | ||
|
|
f4e8a91bcf | ||
|
|
ce7fbdb01d | ||
|
|
4da6623700 | ||
|
|
4e1786d9ae | ||
|
|
0e3ca59e49 | ||
|
|
e06f2229ac | ||
|
|
5962d96f27 | ||
|
|
d4854c4fac | ||
|
|
585520d8d2 | ||
|
|
46801c076f | ||
|
|
9370572169 | ||
|
|
ace65325ff | ||
|
|
e6d890888c | ||
|
|
8e7f581065 | ||
|
|
98b2734240 | ||
|
|
7b428b5240 | ||
|
|
85ef3f51e7 | ||
|
|
ce08aa350c | ||
|
|
ba1a934297 | ||
|
|
4e90376d11 | ||
|
|
8fdc8a8da5 | ||
|
|
52d56e96a5 | ||
|
|
f73b45bcb5 | ||
|
|
23f4a4ea1a | ||
|
|
6aab8f16ce | ||
|
|
8f61413865 | ||
|
|
43b6a077fb | ||
|
|
e8299d0abb | ||
|
|
a28ab654ef | ||
|
|
8699fd7050 | ||
|
|
9e65470ada | ||
|
|
f4e52fafac | ||
|
|
ee7b36cea5 | ||
|
|
487455ef2e | ||
|
|
e201ad2f51 | ||
|
|
869f418b03 | ||
|
|
35d5ef9118 | ||
|
|
bcce70fca6 | ||
|
|
932112b640 | ||
|
|
91112167b1 | ||
|
|
bd7b59910d | ||
|
|
524888bf3b | ||
|
|
0327eae509 | ||
|
|
bb85608890 | ||
|
|
6c7668aaca | ||
|
|
7759b3f75a | ||
|
|
4d337f6abc | ||
|
|
92c86fd0b8 | ||
|
|
46dc751139 | ||
|
|
4cefe37723 | ||
|
|
82b73c50a0 | ||
|
|
7df7a95299 | ||
|
|
85b4b359c2 | ||
|
|
cfe81b5e00 | ||
|
|
b0c4451324 | ||
|
|
d4931522d4 | ||
|
|
17e2a35228 | ||
|
|
91016d8b29 | ||
|
|
9fda21cf40 | ||
|
|
809ec7163e | ||
|
|
7c9a939b47 | ||
|
|
9634c96020 | ||
|
|
e0c105f413 | ||
|
|
f0bf32c476 | ||
|
|
28373dbb98 | ||
|
|
4133d77772 | ||
|
|
61c426f502 | ||
|
|
bf0577c882 | ||
|
|
24673fd859 | ||
|
|
dc669d1447 | ||
|
|
ce4110b9f4 | ||
|
|
0f3b7d2b3d | ||
|
|
16dc78f6c6 | ||
|
|
7a66856785 | ||
|
|
c8dfa49d86 | ||
|
|
76dd749b1e | ||
|
|
67d05d2066 |
@@ -20,13 +20,13 @@ def calc_images_mean_L1(image1_path, image2_path):
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('image1_path')
|
||||
parser.add_argument('image2_path')
|
||||
parser.add_argument("image1_path")
|
||||
parser.add_argument("image2_path")
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
mean_L1 = calc_images_mean_L1(args.image1_path, args.image2_path)
|
||||
print(mean_L1)
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
b3dccfaeb636599c02effc377cdd8a87d658256c
|
||||
218b6d0546b990fc449c876fb99f44b50c4daa35
|
||||
|
||||
38
.github/CODEOWNERS
vendored
@@ -1,34 +1,34 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername
|
||||
/mkdocs.yml @lstein @blessedcoolant
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @Millu
|
||||
|
||||
# nodes
|
||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising
|
||||
/invokeai/app/ @Kyle0654 @blessedcoolant @psychedelicious @brandonrising @hipsterusername
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant
|
||||
/docker/ @lstein @blessedcoolant
|
||||
/scripts/ @ebr @lstein
|
||||
/installer/ @lstein @ebr
|
||||
/invokeai/assets @lstein @ebr
|
||||
/invokeai/configs @lstein
|
||||
/invokeai/version @lstein @blessedcoolant
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
|
||||
# generation, model management, postprocessing
|
||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising
|
||||
/invokeai/backend @damian0815 @lstein @blessedcoolant @gregghelt2 @StAlKeR7779 @brandonrising @ryanjdick @hipsterusername
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein
|
||||
/invokeai/frontend/install @lstein @ebr
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant
|
||||
/invokeai/frontend/training @lstein @blessedcoolant
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: Feature Request
|
||||
description: Commit a idea or Request a new feature
|
||||
description: Contribute a idea or request a new feature
|
||||
title: '[enhancement]: '
|
||||
labels: ['enhancement']
|
||||
# assignees:
|
||||
@@ -9,14 +9,14 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this Feature request!
|
||||
Thanks for taking the time to fill out this feature request!
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: |
|
||||
Please make use of the [search function](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
|
||||
to see if a simmilar issue already exists for the feature you want to request
|
||||
to see if a similar issue already exists for the feature you want to request
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
@@ -36,7 +36,7 @@ body:
|
||||
label: What should this feature add?
|
||||
description: Please try to explain the functionality this feature should add
|
||||
placeholder: |
|
||||
Instead of one huge textfield, it would be nice to have forms for bug-reports, feature-requests, ...
|
||||
Instead of one huge text field, it would be nice to have forms for bug-reports, feature-requests, ...
|
||||
Great benefits with automatic labeling, assigning and other functionalitys not available in that form
|
||||
via old-fashioned markdown-templates. I would also love to see the use of a moderator bot 🤖 like
|
||||
https://github.com/marketplace/actions/issue-moderator-with-commands to auto close old issues and other things
|
||||
@@ -51,6 +51,6 @@ body:
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Aditional Content
|
||||
label: Additional Content
|
||||
description: Add any other context or screenshots about the feature request here.
|
||||
placeholder: This is a Mockup of the design how I imagine it <screenshot>
|
||||
placeholder: This is a mockup of the design how I imagine it <screenshot>
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
- [ ] No, because:
|
||||
|
||||
|
||||
## Have you updated relevant documentation?
|
||||
## Have you updated all relevant documentation?
|
||||
- [ ] Yes
|
||||
- [ ] No
|
||||
|
||||
9
.github/workflows/close-inactive-issues.yml
vendored
@@ -1,11 +1,11 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "00 6 * * *"
|
||||
- cron: "00 4 * * *"
|
||||
|
||||
env:
|
||||
DAYS_BEFORE_ISSUE_STALE: 14
|
||||
DAYS_BEFORE_ISSUE_CLOSE: 28
|
||||
DAYS_BEFORE_ISSUE_STALE: 30
|
||||
DAYS_BEFORE_ISSUE_CLOSE: 14
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
|
||||
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
|
||||
@@ -23,5 +23,6 @@ jobs:
|
||||
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
exempt-issue-labels: "Active Issue"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
operations-per-run: 500
|
||||
|
||||
4
.github/workflows/lint-frontend.yml
vendored
@@ -2,8 +2,6 @@ name: Lint frontend
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'invokeai/frontend/web/**'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
@@ -11,8 +9,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- 'invokeai/frontend/web/**'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
2
.github/workflows/mkdocs-material.yml
vendored
@@ -2,7 +2,7 @@ name: mkdocs-material
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'refs/heads/v2.3'
|
||||
- 'refs/heads/main'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
27
.github/workflows/style-checks.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: style checks
|
||||
# just formatting and flake8 for now
|
||||
# TODO: add isort later
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: main
|
||||
|
||||
jobs:
|
||||
black:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies with pip
|
||||
run: |
|
||||
pip install black flake8 Flake8-pyproject
|
||||
|
||||
# - run: isort --check-only .
|
||||
- run: black --check .
|
||||
- run: flake8
|
||||
50
.github/workflows/test-invoke-pip-skip.yml
vendored
@@ -1,50 +0,0 @@
|
||||
name: Test invoke.py pip
|
||||
|
||||
# This is a dummy stand-in for the actual tests
|
||||
# we don't need to run python tests on non-Python changes
|
||||
# But PRs require passing tests to be mergeable
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- '**'
|
||||
- '!pyproject.toml'
|
||||
- '!invokeai/**'
|
||||
- '!tests/**'
|
||||
- 'invokeai/frontend/web/**'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.10'
|
||||
pytorch:
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
include:
|
||||
- pytorch: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
- pytorch: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
- pytorch: linux-cpu
|
||||
os: ubuntu-22.04
|
||||
- pytorch: macos-default
|
||||
os: macOS-12
|
||||
- pytorch: windows-cpu
|
||||
os: windows-2022
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: skip
|
||||
run: echo "no build required"
|
||||
24
.github/workflows/test-invoke-pip.yml
vendored
@@ -3,16 +3,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- 'tests/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
@@ -65,10 +56,23 @@ jobs:
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Check for changed python files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v37
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'tests/**'
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: setup python
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@@ -76,6 +80,7 @@ jobs:
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install invokeai
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
run: >
|
||||
@@ -83,6 +88,7 @@ jobs:
|
||||
--editable=".[test]"
|
||||
|
||||
- name: run pytest
|
||||
if: steps.changed-files.outputs.python_any_changed == 'true'
|
||||
id: run-pytest
|
||||
run: pytest
|
||||
|
||||
|
||||
38
.gitignore
vendored
@@ -1,23 +1,8 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
.idea/
|
||||
embeddings/
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
**/restoration/codeformer/weights
|
||||
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
invokeai.init
|
||||
.version
|
||||
.last_model
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
|
||||
# ignore a directory which serves as a place for initial images
|
||||
inputs/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -38,7 +23,6 @@ develop-eggs/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
@@ -190,39 +174,17 @@ cython_debug/
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
src
|
||||
**/__pycache__/
|
||||
outputs
|
||||
|
||||
# Logs and associated folders
|
||||
# created from generated embeddings.
|
||||
logs
|
||||
testtube
|
||||
checkpoints
|
||||
# If it's a Mac
|
||||
.DS_Store
|
||||
|
||||
invokeai/frontend/yarn.lock
|
||||
invokeai/frontend/node_modules
|
||||
|
||||
# Let the frontend manage its own gitignore
|
||||
!invokeai/frontend/web/*
|
||||
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
.vscode/
|
||||
gfpgan/
|
||||
models/ldm/stable-diffusion-v1/*.sha256
|
||||
|
||||
|
||||
# GFPGAN model files
|
||||
gfpgan/
|
||||
|
||||
# config file (will be created by installer)
|
||||
configs/models.yaml
|
||||
|
||||
# ignore initfile
|
||||
.invokeai
|
||||
|
||||
# ignore environment.yml and requirements.txt
|
||||
# these are links to the real files in environments-and-requirements
|
||||
|
||||
17
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
# See https://pre-commit.com/ for usage and config
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: black
|
||||
name: black
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: black
|
||||
types: [python]
|
||||
|
||||
- id: flake8
|
||||
name: flake8
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: flake8
|
||||
types: [python]
|
||||
290
LICENSE-SDXL.txt
Normal file
@@ -0,0 +1,290 @@
|
||||
Copyright (c) 2023 Stability AI
|
||||
CreativeML Open RAIL++-M License dated July 26, 2023
|
||||
|
||||
Section I: PREAMBLE
|
||||
|
||||
Multimodal generative models are being widely adopted and used, and
|
||||
have the potential to transform the way artists, among other
|
||||
individuals, conceive and benefit from AI or ML technologies as a tool
|
||||
for content creation.
|
||||
|
||||
Notwithstanding the current and potential benefits that these
|
||||
artifacts can bring to society at large, there are also concerns about
|
||||
potential misuses of them, either due to their technical limitations
|
||||
or ethical considerations.
|
||||
|
||||
In short, this license strives for both the open and responsible
|
||||
downstream use of the accompanying model. When it comes to the open
|
||||
character, we took inspiration from open source permissive licenses
|
||||
regarding the grant of IP rights. Referring to the downstream
|
||||
responsible use, we added use-based restrictions not permitting the
|
||||
use of the model in very specific scenarios, in order for the licensor
|
||||
to be able to enforce the license in case potential misuses of the
|
||||
Model may occur. At the same time, we strive to promote open and
|
||||
responsible research on generative models for art and content
|
||||
generation.
|
||||
|
||||
Even though downstream derivative versions of the model could be
|
||||
released under different licensing terms, the latter will always have
|
||||
to include - at minimum - the same use-based restrictions as the ones
|
||||
in the original license (this license). We believe in the intersection
|
||||
between open and responsible AI development; thus, this agreement aims
|
||||
to strike a balance between both in order to enable responsible
|
||||
open-science in the field of AI.
|
||||
|
||||
This CreativeML Open RAIL++-M License governs the use of the model
|
||||
(and its derivatives) and is informed by the model card associated
|
||||
with the model.
|
||||
|
||||
NOW THEREFORE, You and Licensor agree as follows:
|
||||
|
||||
Definitions
|
||||
|
||||
"License" means the terms and conditions for use, reproduction, and
|
||||
Distribution as defined in this document.
|
||||
|
||||
"Data" means a collection of information and/or content extracted from
|
||||
the dataset used with the Model, including to train, pretrain, or
|
||||
otherwise evaluate the Model. The Data is not licensed under this
|
||||
License.
|
||||
|
||||
"Output" means the results of operating a Model as embodied in
|
||||
informational content resulting therefrom.
|
||||
|
||||
"Model" means any accompanying machine-learning based assemblies
|
||||
(including checkpoints), consisting of learnt weights, parameters
|
||||
(including optimizer states), corresponding to the model architecture
|
||||
as embodied in the Complementary Material, that have been trained or
|
||||
tuned, in whole or in part on the Data, using the Complementary
|
||||
Material.
|
||||
|
||||
"Derivatives of the Model" means all modifications to the Model, works
|
||||
based on the Model, or any other model which is created or initialized
|
||||
by transfer of patterns of the weights, parameters, activations or
|
||||
output of the Model, to the other model, in order to cause the other
|
||||
model to perform similarly to the Model, including - but not limited
|
||||
to - distillation methods entailing the use of intermediate data
|
||||
representations or methods based on the generation of synthetic data
|
||||
by the Model for training the other model.
|
||||
|
||||
"Complementary Material" means the accompanying source code and
|
||||
scripts used to define, run, load, benchmark or evaluate the Model,
|
||||
and used to prepare data for training or evaluation, if any. This
|
||||
includes any accompanying documentation, tutorials, examples, etc, if
|
||||
any.
|
||||
|
||||
"Distribution" means any transmission, reproduction, publication or
|
||||
other sharing of the Model or Derivatives of the Model to a third
|
||||
party, including providing the Model as a hosted service made
|
||||
available by electronic or other remote means - e.g. API-based or web
|
||||
access.
|
||||
|
||||
"Licensor" means the copyright owner or entity authorized by the
|
||||
copyright owner that is granting the License, including the persons or
|
||||
entities that may have rights in the Model and/or distributing the
|
||||
Model.
|
||||
|
||||
"You" (or "Your") means an individual or Legal Entity exercising
|
||||
permissions granted by this License and/or making use of the Model for
|
||||
whichever purpose and in any field of use, including usage of the
|
||||
Model in an end-use application - e.g. chatbot, translator, image
|
||||
generator.
|
||||
|
||||
"Third Parties" means individuals or legal entities that are not under
|
||||
common control with Licensor or You.
|
||||
|
||||
"Contribution" means any work of authorship, including the original
|
||||
version of the Model and any modifications or additions to that Model
|
||||
or Derivatives of the Model thereof, that is intentionally submitted
|
||||
to Licensor for inclusion in the Model by the copyright owner or by an
|
||||
individual or Legal Entity authorized to submit on behalf of the
|
||||
copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent to
|
||||
the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control
|
||||
systems, and issue tracking systems that are managed by, or on behalf
|
||||
of, the Licensor for the purpose of discussing and improving the
|
||||
Model, but excluding communication that is conspicuously marked or
|
||||
otherwise designated in writing by the copyright owner as "Not a
|
||||
Contribution."
|
||||
|
||||
"Contributor" means Licensor and any individual or Legal Entity on
|
||||
behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Model.
|
||||
|
||||
Section II: INTELLECTUAL PROPERTY RIGHTS
|
||||
|
||||
Both copyright and patent grants apply to the Model, Derivatives of
|
||||
the Model and Complementary Material. The Model and Derivatives of the
|
||||
Model are subject to additional terms as described in
|
||||
|
||||
Section III.
|
||||
|
||||
Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare, publicly display, publicly
|
||||
perform, sublicense, and distribute the Complementary Material, the
|
||||
Model, and Derivatives of the Model.
|
||||
|
||||
Grant of Patent License. Subject to the terms and conditions of this
|
||||
License and where and as applicable, each Contributor hereby grants to
|
||||
You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this paragraph) patent license to
|
||||
make, have made, use, offer to sell, sell, import, and otherwise
|
||||
transfer the Model and the Complementary Material, where such license
|
||||
applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by
|
||||
combination of their Contribution(s) with the Model to which such
|
||||
Contribution(s) was submitted. If You institute patent litigation
|
||||
against any entity (including a cross-claim or counterclaim in a
|
||||
lawsuit) alleging that the Model and/or Complementary Material or a
|
||||
Contribution incorporated within the Model and/or Complementary
|
||||
Material constitutes direct or contributory patent infringement, then
|
||||
any patent licenses granted to You under this License for the Model
|
||||
and/or Work shall terminate as of the date such litigation is asserted
|
||||
or filed.
|
||||
|
||||
Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
|
||||
|
||||
Distribution and Redistribution. You may host for Third Party remote
|
||||
access purposes (e.g. software-as-a-service), reproduce and distribute
|
||||
copies of the Model or Derivatives of the Model thereof in any medium,
|
||||
with or without modifications, provided that You meet the following
|
||||
conditions: Use-based restrictions as referenced in paragraph 5 MUST
|
||||
be included as an enforceable provision by You in any type of legal
|
||||
agreement (e.g. a license) governing the use and/or distribution of
|
||||
the Model or Derivatives of the Model, and You shall give notice to
|
||||
subsequent users You Distribute to, that the Model or Derivatives of
|
||||
the Model are subject to paragraph 5. This provision does not apply to
|
||||
the use of Complementary Material. You must give any Third Party
|
||||
recipients of the Model or Derivatives of the Model a copy of this
|
||||
License; You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; You must retain all copyright,
|
||||
patent, trademark, and attribution notices excluding those notices
|
||||
that do not pertain to any part of the Model, Derivatives of the
|
||||
Model. You may add Your own copyright statement to Your modifications
|
||||
and may provide additional or different license terms and conditions -
|
||||
respecting paragraph 4.a. - for use, reproduction, or Distribution of
|
||||
Your modifications, or for any such Derivatives of the Model as a
|
||||
whole, provided Your use, reproduction, and Distribution of the Model
|
||||
otherwise complies with the conditions stated in this License.
|
||||
|
||||
Use-based restrictions. The restrictions set forth in Attachment A are
|
||||
considered Use-based restrictions. Therefore You cannot use the Model
|
||||
and the Derivatives of the Model for the specified restricted
|
||||
uses. You may use the Model subject to this License, including only
|
||||
for lawful purposes and in accordance with the License. Use may
|
||||
include creating any content with, finetuning, updating, running,
|
||||
training, evaluating and/or reparametrizing the Model. You shall
|
||||
require all of Your users who use the Model or a Derivative of the
|
||||
Model to comply with the terms of this paragraph (paragraph 5).
|
||||
|
||||
The Output You Generate. Except as set forth herein, Licensor claims
|
||||
no rights in the Output You generate using the Model. You are
|
||||
accountable for the Output you generate and its subsequent uses. No
|
||||
use of the output can contravene any provision as stated in the
|
||||
License.
|
||||
|
||||
Section IV: OTHER PROVISIONS
|
||||
|
||||
Updates and Runtime Restrictions. To the maximum extent permitted by
|
||||
law, Licensor reserves the right to restrict (remotely or otherwise)
|
||||
usage of the Model in violation of this License.
|
||||
|
||||
Trademarks and related. Nothing in this License permits You to make
|
||||
use of Licensors’ trademarks, trade names, logos or to otherwise
|
||||
suggest endorsement or misrepresent the relationship between the
|
||||
parties; and any rights not expressly granted herein are reserved by
|
||||
the Licensors.
|
||||
|
||||
Disclaimer of Warranty. Unless required by applicable law or agreed to
|
||||
in writing, Licensor provides the Model and the Complementary Material
|
||||
(and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Model, Derivatives of
|
||||
the Model, and the Complementary Material and assume any risks
|
||||
associated with Your exercise of permissions under this License.
|
||||
|
||||
Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise, unless
|
||||
required by applicable law (such as deliberate and grossly negligent
|
||||
acts) or agreed to in writing, shall any Contributor be liable to You
|
||||
for damages, including any direct, indirect, special, incidental, or
|
||||
consequential damages of any character arising as a result of this
|
||||
License or out of the use or inability to use the Model and the
|
||||
Complementary Material (including but not limited to damages for loss
|
||||
of goodwill, work stoppage, computer failure or malfunction, or any
|
||||
and all other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
Accepting Warranty or Additional Liability. While redistributing the
|
||||
Model, Derivatives of the Model and the Complementary Material
|
||||
thereof, You may choose to offer, and charge a fee for, acceptance of
|
||||
support, warranty, indemnity, or other liability obligations and/or
|
||||
rights consistent with this License. However, in accepting such
|
||||
obligations, You may act only on Your own behalf and on Your sole
|
||||
responsibility, not on behalf of any other Contributor, and only if
|
||||
You agree to indemnify, defend, and hold each Contributor harmless for
|
||||
any liability incurred by, or claims asserted against, such
|
||||
Contributor by reason of your accepting any such warranty or
|
||||
additional liability.
|
||||
|
||||
If any provision of this License is held to be invalid, illegal or
|
||||
unenforceable, the remaining provisions shall be unaffected thereby
|
||||
and remain valid as if such provision had not been set forth herein.
|
||||
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Attachment A
|
||||
|
||||
Use Restrictions
|
||||
|
||||
You agree not to use the Model or Derivatives of the Model:
|
||||
|
||||
* In any way that violates any applicable national, federal, state,
|
||||
local or international law or regulation;
|
||||
|
||||
* For the purpose of exploiting, harming or attempting to exploit or
|
||||
harm minors in any way;
|
||||
|
||||
* To generate or disseminate verifiably false information and/or
|
||||
content with the purpose of harming others;
|
||||
|
||||
* To generate or disseminate personal identifiable information that
|
||||
can be used to harm an individual;
|
||||
|
||||
* To defame, disparage or otherwise harass others;
|
||||
|
||||
* For fully automated decision making that adversely impacts an
|
||||
individual’s legal rights or otherwise creates or modifies a
|
||||
binding, enforceable obligation;
|
||||
|
||||
* For any use intended to or which has the effect of discriminating
|
||||
against or harming individuals or groups based on online or offline
|
||||
social behavior or known or predicted personal or personality
|
||||
characteristics;
|
||||
|
||||
* To exploit any of the vulnerabilities of a specific group of persons
|
||||
based on their age, social, physical or mental characteristics, in
|
||||
order to materially distort the behavior of a person pertaining to
|
||||
that group in a manner that causes or is likely to cause that person
|
||||
or another person physical or psychological harm;
|
||||
|
||||
* For any use intended to or which has the effect of discriminating
|
||||
against individuals or groups based on legally protected
|
||||
characteristics or categories;
|
||||
|
||||
* To provide medical advice and medical results interpretation;
|
||||
|
||||
* To generate or disseminate information for the purpose to be used
|
||||
for administration of justice, law enforcement, immigration or
|
||||
asylum processes, such as predicting an individual will commit
|
||||
fraud/crime commitment (e.g. by text profiling, drawing causal
|
||||
relationships between assertions made in documents, indiscriminate
|
||||
and arbitrarily-targeted use).
|
||||
|
||||
44
README.md
@@ -43,7 +43,7 @@ Web Interface, interactive Command Line Interface, and also serves as
|
||||
the foundation for multiple commercial products.
|
||||
|
||||
**Quick links**: [[How to
|
||||
Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a
|
||||
Install](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)] [<a
|
||||
href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a
|
||||
href="https://invoke-ai.github.io/InvokeAI/">Documentation and
|
||||
Tutorials</a>] [<a
|
||||
@@ -81,7 +81,7 @@ Table of Contents 📝
|
||||
## Quick Start
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)
|
||||
|
||||
If upgrading from version 2.3, please read [Migrating a 2.3 root
|
||||
directory to 3.0](#migrating-to-3) first.
|
||||
@@ -123,7 +123,7 @@ and go to http://localhost:9090.
|
||||
|
||||
### Command-Line Installation (for developers and users familiar with Terminals)
|
||||
|
||||
You must have Python 3.9 or 3.10 installed on your machine. Earlier or
|
||||
You must have Python 3.9 through 3.11 installed on your machine. Earlier or
|
||||
later versions are not supported.
|
||||
Node.js also needs to be installed along with yarn (can be installed with
|
||||
the command `npm install -g yarn` if needed)
|
||||
@@ -161,7 +161,7 @@ the command `npm install -g yarn` if needed)
|
||||
_For Windows/Linux with an NVIDIA GPU:_
|
||||
|
||||
```terminal
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
||||
```
|
||||
|
||||
_For Linux with an AMD GPU:_
|
||||
@@ -184,8 +184,9 @@ the command `npm install -g yarn` if needed)
|
||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||
|
||||
```terminal
|
||||
invokeai-configure
|
||||
invokeai-configure --root .
|
||||
```
|
||||
Don't miss the dot at the end!
|
||||
|
||||
7. Launch the web server (do it every time you run InvokeAI):
|
||||
|
||||
@@ -193,15 +194,9 @@ the command `npm install -g yarn` if needed)
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
8. Build Node.js assets
|
||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
|
||||
```terminal
|
||||
cd invokeai/frontend/web/
|
||||
yarn vite build
|
||||
```
|
||||
|
||||
9. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
10. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
|
||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||
@@ -311,13 +306,30 @@ InvokeAI. The second will prepare the 2.3 directory for use with 3.0.
|
||||
You may now launch the WebUI in the usual way, by selecting option [1]
|
||||
from the launcher script
|
||||
|
||||
#### Migration Caveats
|
||||
#### Migrating Images
|
||||
|
||||
The migration script will migrate your invokeai settings and models,
|
||||
including textual inversion models, LoRAs and merges that you may have
|
||||
installed previously. However it does **not** migrate the generated
|
||||
images stored in your 2.3-format outputs directory. You will need to
|
||||
manually import selected images into the 3.0 gallery via drag-and-drop.
|
||||
images stored in your 2.3-format outputs directory. To do this, you
|
||||
need to run an additional step:
|
||||
|
||||
1. From a working InvokeAI 3.0 root directory, start the launcher and
|
||||
enter menu option [8] to open the "developer's console".
|
||||
|
||||
2. At the developer's console command line, type the command:
|
||||
|
||||
```bash
|
||||
invokeai-import-images
|
||||
```
|
||||
|
||||
3. This will lead you through the process of confirming the desired
|
||||
source and destination for the imported images. The images will
|
||||
appear in the gallery board of your choice, and contain the
|
||||
original prompt, model name, and other parameters used to generate
|
||||
the image.
|
||||
|
||||
(Many kudos to **techjedi** for contributing this script.)
|
||||
|
||||
## Hardware Requirements
|
||||
|
||||
|
||||
@@ -29,8 +29,8 @@ configure() {
|
||||
echo "To reconfigure InvokeAI, delete the above file."
|
||||
echo "======================================================================"
|
||||
else
|
||||
mkdir -p ${INVOKEAI_ROOT}
|
||||
chown --recursive ${USER} ${INVOKEAI_ROOT}
|
||||
mkdir -p "${INVOKEAI_ROOT}"
|
||||
chown --recursive ${USER} "${INVOKEAI_ROOT}"
|
||||
gosu ${USER} invokeai-configure --yes --default_only
|
||||
fi
|
||||
}
|
||||
@@ -50,16 +50,16 @@ fi
|
||||
if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
|
||||
apt-get update
|
||||
apt-get install -y openssh-server
|
||||
pushd $HOME
|
||||
pushd "$HOME"
|
||||
mkdir -p .ssh
|
||||
echo ${PUBLIC_KEY} > .ssh/authorized_keys
|
||||
echo "${PUBLIC_KEY}" > .ssh/authorized_keys
|
||||
chmod -R 700 .ssh
|
||||
popd
|
||||
service ssh start
|
||||
fi
|
||||
|
||||
|
||||
cd ${INVOKEAI_ROOT}
|
||||
cd "${INVOKEAI_ROOT}"
|
||||
|
||||
# Run the CMD as the Container User (not root).
|
||||
exec gosu ${USER} "$@"
|
||||
|
||||
|
Before Width: | Height: | Size: 310 KiB After Width: | Height: | Size: 297 KiB |
BIN
docs/assets/nodes/groupsallscale.png
Normal file
|
After Width: | Height: | Size: 490 KiB |
BIN
docs/assets/nodes/groupsconditioning.png
Normal file
|
After Width: | Height: | Size: 319 KiB |
BIN
docs/assets/nodes/groupscontrol.png
Normal file
|
After Width: | Height: | Size: 217 KiB |
BIN
docs/assets/nodes/groupsimgvae.png
Normal file
|
After Width: | Height: | Size: 244 KiB |
BIN
docs/assets/nodes/groupsiterate.png
Normal file
|
After Width: | Height: | Size: 948 KiB |
BIN
docs/assets/nodes/groupslora.png
Normal file
|
After Width: | Height: | Size: 292 KiB |
BIN
docs/assets/nodes/groupsmultigenseeding.png
Normal file
|
After Width: | Height: | Size: 420 KiB |
BIN
docs/assets/nodes/groupsnoise.png
Normal file
|
After Width: | Height: | Size: 197 KiB |
BIN
docs/assets/nodes/groupsrandseed.png
Normal file
|
After Width: | Height: | Size: 216 KiB |
BIN
docs/assets/nodes/nodescontrol.png
Normal file
|
After Width: | Height: | Size: 439 KiB |
BIN
docs/assets/nodes/nodesi2i.png
Normal file
|
After Width: | Height: | Size: 563 KiB |
BIN
docs/assets/nodes/nodest2i.png
Normal file
|
After Width: | Height: | Size: 353 KiB |
|
Before Width: | Height: | Size: 501 KiB After Width: | Height: | Size: 421 KiB |
|
Before Width: | Height: | Size: 473 KiB After Width: | Height: | Size: 585 KiB |
|
Before Width: | Height: | Size: 557 KiB After Width: | Height: | Size: 598 KiB |
|
Before Width: | Height: | Size: 340 KiB After Width: | Height: | Size: 438 KiB |
BIN
docs/assets/troubleshooting/broken-dependency.png
Normal file
|
After Width: | Height: | Size: 131 KiB |
@@ -14,11 +14,14 @@ To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the
|
||||
#### Development
|
||||
If you’d like to help with development, please see our [development guide](contribution_guides/development.md). If you’re unfamiliar with contributing to open source projects, there is a tutorial contained within the development guide.
|
||||
|
||||
#### Nodes
|
||||
If you’d like to help with development, please see our [nodes contribution guide](/nodes/contributingNodes). If you’re unfamiliar with contributing to open source projects, there is a tutorial contained within the development guide.
|
||||
|
||||
#### Documentation
|
||||
If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documenation.md).
|
||||
If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documentation.md).
|
||||
|
||||
#### Translation
|
||||
If you'd like to help with translation, please see our [translation guide](docs/contributing/.contribution_guides/translation.md).
|
||||
If you'd like to help with translation, please see our [translation guide](contribution_guides/translation.md).
|
||||
|
||||
#### Tutorials
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
|
||||
@@ -29,12 +29,13 @@ The first set of things we need to do when creating a new Invocation are -
|
||||
|
||||
- Create a new class that derives from a predefined parent class called
|
||||
`BaseInvocation`.
|
||||
- The name of every Invocation must end with the word `Invocation` in order for
|
||||
it to be recognized as an Invocation.
|
||||
- Every Invocation must have a `docstring` that describes what this Invocation
|
||||
does.
|
||||
- Every Invocation must have a unique `type` field defined which becomes its
|
||||
indentifier.
|
||||
- While not strictly required, we suggest every invocation class name ends in
|
||||
"Invocation", eg "CropImageInvocation".
|
||||
- Every Invocation must use the `@invocation` decorator to provide its unique
|
||||
invocation type. You may also provide its title, tags and category using the
|
||||
decorator.
|
||||
- Invocations are strictly typed. We make use of the native
|
||||
[typing](https://docs.python.org/3/library/typing.html) library and the
|
||||
installed [pydantic](https://pydantic-docs.helpmanual.io/) library for
|
||||
@@ -43,12 +44,11 @@ The first set of things we need to do when creating a new Invocation are -
|
||||
So let us do that.
|
||||
|
||||
```python
|
||||
from typing import Literal
|
||||
from .baseinvocation import BaseInvocation
|
||||
from .baseinvocation import BaseInvocation, invocation
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
```
|
||||
|
||||
That's great.
|
||||
@@ -62,8 +62,10 @@ our Invocation takes.
|
||||
|
||||
### **Inputs**
|
||||
|
||||
Every Invocation input is a pydantic `Field` and like everything else should be
|
||||
strictly typed and defined.
|
||||
Every Invocation input must be defined using the `InputField` function. This is
|
||||
a wrapper around the pydantic `Field` function, which handles a few extra things
|
||||
and provides type hints. Like everything else, this should be strictly typed and
|
||||
defined.
|
||||
|
||||
So let us create these inputs for our Invocation. First up, the `image` input we
|
||||
need. Generally, we can use standard variable types in Python but InvokeAI
|
||||
@@ -76,55 +78,51 @@ create your own custom field types later in this guide. For now, let's go ahead
|
||||
and use it.
|
||||
|
||||
```python
|
||||
from typing import Literal, Union
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocation
|
||||
from ..models.image import ImageField
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
image: ImageField = InputField(description="The input image")
|
||||
```
|
||||
|
||||
Let us break down our input code.
|
||||
|
||||
```python
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
image: ImageField = InputField(description="The input image")
|
||||
```
|
||||
|
||||
| Part | Value | Description |
|
||||
| --------- | ---------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
|
||||
| Name | `image` | The variable that will hold our image |
|
||||
| Type Hint | `Union[ImageField, None]` | The types for our field. Indicates that the image can either be an `ImageField` type or `None` |
|
||||
| Field | `Field(description="The input image", default=None)` | The image variable is a field which needs a description and a default value that we set to `None`. |
|
||||
| Part | Value | Description |
|
||||
| --------- | ------------------------------------------- | ------------------------------------------------------------------------------- |
|
||||
| Name | `image` | The variable that will hold our image |
|
||||
| Type Hint | `ImageField` | The types for our field. Indicates that the image must be an `ImageField` type. |
|
||||
| Field | `InputField(description="The input image")` | The image variable is an `InputField` which needs a description. |
|
||||
|
||||
Great. Now let us create our other inputs for `width` and `height`
|
||||
|
||||
```python
|
||||
from typing import Literal, Union
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocation
|
||||
from ..models.image import ImageField
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
image: ImageField = InputField(description="The input image")
|
||||
width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
```
|
||||
|
||||
As you might have noticed, we added two new parameters to the field type for
|
||||
`width` and `height` called `gt` and `le`. These basically stand for _greater
|
||||
than or equal to_ and _less than or equal to_. There are various other param
|
||||
types for field that you can find on the **pydantic** documentation.
|
||||
As you might have noticed, we added two new arguments to the `InputField`
|
||||
definition for `width` and `height`, called `gt` and `le`. They stand for
|
||||
_greater than or equal to_ and _less than or equal to_.
|
||||
|
||||
These impose contraints on those fields, and will raise an exception if the
|
||||
values do not meet the constraints. Field constraints are provided by
|
||||
**pydantic**, so anything you see in the **pydantic docs** will work.
|
||||
|
||||
**Note:** _Any time it is possible to define constraints for our field, we
|
||||
should do it so the frontend has more information on how to parse this field._
|
||||
@@ -141,20 +139,17 @@ that are provided by it by InvokeAI.
|
||||
Let us create this function first.
|
||||
|
||||
```python
|
||||
from typing import Literal, Union
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocation, InvocationContext
|
||||
from ..models.image import ImageField
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
image: ImageField = InputField(description="The input image")
|
||||
width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
|
||||
def invoke(self, context: InvocationContext):
|
||||
pass
|
||||
@@ -173,21 +168,18 @@ all the necessary info related to image outputs. So let us use that.
|
||||
We will cover how to create your own output types later in this guide.
|
||||
|
||||
```python
|
||||
from typing import Literal, Union
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocation, InvocationContext
|
||||
from ..models.image import ImageField
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from .image import ImageOutput
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
image: ImageField = InputField(description="The input image")
|
||||
width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
pass
|
||||
@@ -195,39 +187,34 @@ class ResizeInvocation(BaseInvocation):
|
||||
|
||||
Perfect. Now that we have our Invocation setup, let us do what we want to do.
|
||||
|
||||
- We will first load the image. Generally we do this using the `PIL` library but
|
||||
we can use one of the services provided by InvokeAI to load the image.
|
||||
- We will first load the image using one of the services provided by InvokeAI to
|
||||
load the image.
|
||||
- We will resize the image using `PIL` to our input data.
|
||||
- We will output this image in the format we set above.
|
||||
|
||||
So let's do that.
|
||||
|
||||
```python
|
||||
from typing import Literal, Union
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocation, InvocationContext
|
||||
from ..models.image import ImageField, ResourceOrigin, ImageCategory
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from .image import ImageOutput
|
||||
|
||||
@invocation("resize")
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
"""Resizes an image"""
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
image: ImageField = InputField(description="The input image")
|
||||
width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# Load the image using InvokeAI's predefined Image Service.
|
||||
image = context.services.images.get_pil_image(self.image.image_origin, self.image.image_name)
|
||||
# Load the image using InvokeAI's predefined Image Service. Returns the PIL image.
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
# Resizing the image
|
||||
# Because we used the above service, we already have a PIL image. So we can simply resize.
|
||||
resized_image = image.resize((self.width, self.height))
|
||||
|
||||
# Preparing the image for output using InvokeAI's predefined Image Service.
|
||||
# Save the image using InvokeAI's predefined Image Service. Returns the prepared PIL image.
|
||||
output_image = context.services.images.create(
|
||||
image=resized_image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
@@ -241,7 +228,6 @@ class ResizeInvocation(BaseInvocation):
|
||||
return ImageOutput(
|
||||
image=ImageField(
|
||||
image_name=output_image.image_name,
|
||||
image_origin=output_image.image_origin,
|
||||
),
|
||||
width=output_image.width,
|
||||
height=output_image.height,
|
||||
@@ -253,6 +239,24 @@ certain way that the images need to be dispatched in order to be stored and read
|
||||
correctly. In 99% of the cases when dealing with an image output, you can simply
|
||||
copy-paste the template above.
|
||||
|
||||
### Customization
|
||||
|
||||
We can use the `@invocation` decorator to provide some additional info to the
|
||||
UI, like a custom title, tags and category.
|
||||
|
||||
We also encourage providing a version. This must be a
|
||||
[semver](https://semver.org/) version string ("$MAJOR.$MINOR.$PATCH"). The UI
|
||||
will let users know if their workflow is using a mismatched version of the node.
|
||||
|
||||
```python
|
||||
@invocation("resize", title="My Resizer", tags=["resize", "image"], category="My Invocations", version="1.0.0")
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
"""Resizes an image"""
|
||||
|
||||
image: ImageField = InputField(description="The input image")
|
||||
...
|
||||
```
|
||||
|
||||
That's it. You made your own **Resize Invocation**.
|
||||
|
||||
## Result
|
||||
@@ -270,9 +274,57 @@ new Invocation ready to be used.
|
||||
|
||||

|
||||
|
||||
# Advanced
|
||||
## Contributing Nodes
|
||||
|
||||
## Custom Input Fields
|
||||
Once you've created a Node, the next step is to share it with the community! The
|
||||
best way to do this is to submit a Pull Request to add the Node to the
|
||||
[Community Nodes](nodes/communityNodes) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](contributingNodes).
|
||||
|
||||
## Advanced
|
||||
|
||||
### Custom Output Types
|
||||
|
||||
Like with custom inputs, sometimes you might find yourself needing custom
|
||||
outputs that InvokeAI does not provide. We can easily set one up.
|
||||
|
||||
Now that you are familiar with Invocations and Inputs, let us use that knowledge
|
||||
to create an output that has an `image` field, a `color` field and a `string`
|
||||
field.
|
||||
|
||||
- An invocation output is a class that derives from the parent class of
|
||||
`BaseInvocationOutput`.
|
||||
- All invocation outputs must use the `@invocation_output` decorator to provide
|
||||
their unique output type.
|
||||
- Output fields must use the provided `OutputField` function. This is very
|
||||
similar to the `InputField` function described earlier - it's a wrapper around
|
||||
`pydantic`'s `Field()`.
|
||||
- It is not mandatory but we recommend using names ending with `Output` for
|
||||
output types.
|
||||
- It is not mandatory but we highly recommend adding a `docstring` to describe
|
||||
what your output type is for.
|
||||
|
||||
Now that we know the basic rules for creating a new output type, let us go ahead
|
||||
and make it.
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocationOutput, OutputField, invocation_output
|
||||
from .primitives import ImageField, ColorField
|
||||
|
||||
@invocation_output('image_color_string_output')
|
||||
class ImageColorStringOutput(BaseInvocationOutput):
|
||||
'''Base class for nodes that output a single image'''
|
||||
|
||||
image: ImageField = OutputField(description="The image")
|
||||
color: ColorField = OutputField(description="The color")
|
||||
text: str = OutputField(description="The string")
|
||||
```
|
||||
|
||||
That's all there is to it.
|
||||
|
||||
<!-- TODO: DANGER - we probably do not want people to create their own field types, because this requires a lot of work on the frontend to accomodate.
|
||||
|
||||
### Custom Input Fields
|
||||
|
||||
Now that you know how to create your own Invocations, let us dive into slightly
|
||||
more advanced topics.
|
||||
@@ -326,173 +378,7 @@ like this.
|
||||
color: ColorField = Field(default=ColorField(r=0, g=0, b=0, a=0), description='Background color of an image')
|
||||
```
|
||||
|
||||
**Extra Config**
|
||||
|
||||
All input fields also take an additional `Config` class that you can use to do
|
||||
various advanced things like setting required parameters and etc.
|
||||
|
||||
Let us do that for our _ColorField_ and enforce all the values because we did
|
||||
not define any defaults for our fields.
|
||||
|
||||
```python
|
||||
class ColorField(BaseModel):
|
||||
'''A field that holds the rgba values of a color'''
|
||||
r: int = Field(ge=0, le=255, description="The red channel")
|
||||
g: int = Field(ge=0, le=255, description="The green channel")
|
||||
b: int = Field(ge=0, le=255, description="The blue channel")
|
||||
a: int = Field(ge=0, le=255, description="The alpha channel")
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["r", "g", "b", "a"]}
|
||||
```
|
||||
|
||||
Now it becomes mandatory for the user to supply all the values required by our
|
||||
input field.
|
||||
|
||||
We will discuss the `Config` class in extra detail later in this guide and how
|
||||
you can use it to make your Invocations more robust.
|
||||
|
||||
## Custom Output Types
|
||||
|
||||
Like with custom inputs, sometimes you might find yourself needing custom
|
||||
outputs that InvokeAI does not provide. We can easily set one up.
|
||||
|
||||
Now that you are familiar with Invocations and Inputs, let us use that knowledge
|
||||
to put together a custom output type for an Invocation that returns _width_,
|
||||
_height_ and _background_color_ that we need to create a blank image.
|
||||
|
||||
- A custom output type is a class that derives from the parent class of
|
||||
`BaseInvocationOutput`.
|
||||
- It is not mandatory but we recommend using names ending with `Output` for
|
||||
output types. So we'll call our class `BlankImageOutput`
|
||||
- It is not mandatory but we highly recommend adding a `docstring` to describe
|
||||
what your output type is for.
|
||||
- Like Invocations, each output type should have a `type` variable that is
|
||||
**unique**
|
||||
|
||||
Now that we know the basic rules for creating a new output type, let us go ahead
|
||||
and make it.
|
||||
|
||||
```python
|
||||
from typing import Literal
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocationOutput
|
||||
|
||||
class BlankImageOutput(BaseInvocationOutput):
|
||||
'''Base output type for creating a blank image'''
|
||||
type: Literal['blank_image_output'] = 'blank_image_output'
|
||||
|
||||
# Inputs
|
||||
width: int = Field(description='Width of blank image')
|
||||
height: int = Field(description='Height of blank image')
|
||||
bg_color: ColorField = Field(description='Background color of blank image')
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["type", "width", "height", "bg_color"]}
|
||||
```
|
||||
|
||||
All set. We now have an output type that requires what we need to create a
|
||||
blank_image. And if you noticed it, we even used the `Config` class to ensure
|
||||
the fields are required.
|
||||
|
||||
## Custom Configuration
|
||||
|
||||
As you might have noticed when making inputs and outputs, we used a class called
|
||||
`Config` from _pydantic_ to further customize them. Because our inputs and
|
||||
outputs essentially inherit from _pydantic_'s `BaseModel` class, all
|
||||
[configuration options](https://docs.pydantic.dev/latest/usage/schema/#schema-customization)
|
||||
that are valid for _pydantic_ classes are also valid for our inputs and outputs.
|
||||
You can do the same for your Invocations too but InvokeAI makes our life a
|
||||
little bit easier on that end.
|
||||
|
||||
InvokeAI provides a custom configuration class called `InvocationConfig`
|
||||
particularly for configuring Invocations. This is exactly the same as the raw
|
||||
`Config` class from _pydantic_ with some extra stuff on top to help faciliate
|
||||
parsing of the scheme in the frontend UI.
|
||||
|
||||
At the current moment, tihs `InvocationConfig` class is further improved with
|
||||
the following features related the `ui`.
|
||||
|
||||
| Config Option | Field Type | Example |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
|
||||
| type_hints | `Dict[str, Literal["integer", "float", "boolean", "string", "enum", "image", "latents", "model", "control"]]` | `type_hint: "model"` provides type hints related to the model like displaying a list of available models |
|
||||
| tags | `List[str]` | `tags: ['resize', 'image']` will classify your invocation under the tags of resize and image. |
|
||||
| title | `str` | `title: 'Resize Image` will rename your to this custom title rather than infer from the name of the Invocation class. |
|
||||
|
||||
So let us update your `ResizeInvocation` with some extra configuration and see
|
||||
how that works.
|
||||
|
||||
```python
|
||||
from typing import Literal, Union
|
||||
from pydantic import Field
|
||||
|
||||
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||
from ..models.image import ImageField, ResourceOrigin, ImageCategory
|
||||
from .image import ImageOutput
|
||||
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
'''Resizes an image'''
|
||||
type: Literal['resize'] = 'resize'
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
|
||||
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra: {
|
||||
ui: {
|
||||
tags: ['resize', 'image'],
|
||||
title: ['My Custom Resize']
|
||||
}
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# Load the image using InvokeAI's predefined Image Service.
|
||||
image = context.services.images.get_pil_image(self.image.image_origin, self.image.image_name)
|
||||
|
||||
# Resizing the image
|
||||
# Because we used the above service, we already have a PIL image. So we can simply resize.
|
||||
resized_image = image.resize((self.width, self.height))
|
||||
|
||||
# Preparing the image for output using InvokeAI's predefined Image Service.
|
||||
output_image = context.services.images.create(
|
||||
image=resized_image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
# Returning the Image
|
||||
return ImageOutput(
|
||||
image=ImageField(
|
||||
image_name=output_image.image_name,
|
||||
image_origin=output_image.image_origin,
|
||||
),
|
||||
width=output_image.width,
|
||||
height=output_image.height,
|
||||
)
|
||||
```
|
||||
|
||||
We now customized our code to let the frontend know that our Invocation falls
|
||||
under `resize` and `image` categories. So when the user searches for these
|
||||
particular words, our Invocation will show up too.
|
||||
|
||||
We also set a custom title for our Invocation. So instead of being called
|
||||
`Resize`, it will be called `My Custom Resize`.
|
||||
|
||||
As simple as that.
|
||||
|
||||
As time goes by, InvokeAI will further improve and add more customizability for
|
||||
Invocation configuration. We will have more documentation regarding this at a
|
||||
later time.
|
||||
|
||||
# **[TODO]**
|
||||
|
||||
## Custom Components For Frontend
|
||||
### Custom Components For Frontend
|
||||
|
||||
Every backend input type should have a corresponding frontend component so the
|
||||
UI knows what to render when you use a particular field type.
|
||||
@@ -510,281 +396,4 @@ Let us create a new component for our custom color field we created above. When
|
||||
we use a color field, let us say we want the UI to display a color picker for
|
||||
the user to pick from rather than entering values. That is what we will build
|
||||
now.
|
||||
|
||||
---
|
||||
|
||||
# OLD -- TO BE DELETED OR MOVED LATER
|
||||
|
||||
---
|
||||
|
||||
## Creating a new invocation
|
||||
|
||||
To create a new invocation, either find the appropriate module file in
|
||||
`/ldm/invoke/app/invocations` to add your invocation to, or create a new one in
|
||||
that folder. All invocations in that folder will be discovered and made
|
||||
available to the CLI and API automatically. Invocations make use of
|
||||
[typing](https://docs.python.org/3/library/typing.html) and
|
||||
[pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration
|
||||
into the CLI and API.
|
||||
|
||||
An invocation looks like this:
|
||||
|
||||
```py
|
||||
class UpscaleInvocation(BaseInvocation):
|
||||
"""Upscales an image."""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["upscale"] = "upscale"
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||
level: Literal[2, 4] = Field(default=2, description="The upscale level")
|
||||
# fmt: on
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["upscaling", "image"],
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(
|
||||
self.image.image_origin, self.image.image_name
|
||||
)
|
||||
results = context.services.restoration.upscale_and_reconstruct(
|
||||
image_list=[[image, 0]],
|
||||
upscale=(self.level, self.strength),
|
||||
strength=0.0, # GFPGAN strength
|
||||
save_original=False,
|
||||
image_callback=None,
|
||||
)
|
||||
|
||||
# Results are image and seed, unwrap for now
|
||||
# TODO: can this return multiple results?
|
||||
image_dto = context.services.images.create(
|
||||
image=results[0][0],
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(
|
||||
image_name=image_dto.image_name,
|
||||
image_origin=image_dto.image_origin,
|
||||
),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
Each portion is important to implement correctly.
|
||||
|
||||
### Class definition and type
|
||||
|
||||
```py
|
||||
class UpscaleInvocation(BaseInvocation):
|
||||
"""Upscales an image."""
|
||||
type: Literal['upscale'] = 'upscale'
|
||||
```
|
||||
|
||||
All invocations must derive from `BaseInvocation`. They should have a docstring
|
||||
that declares what they do in a single, short line. They should also have a
|
||||
`type` with a type hint that's `Literal["command_name"]`, where `command_name`
|
||||
is what the user will type on the CLI or use in the API to create this
|
||||
invocation. The `command_name` must be unique. The `type` must be assigned to
|
||||
the value of the literal in the type hint.
|
||||
|
||||
### Inputs
|
||||
|
||||
```py
|
||||
# Inputs
|
||||
image: Union[ImageField,None] = Field(description="The input image")
|
||||
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
||||
```
|
||||
|
||||
Inputs consist of three parts: a name, a type hint, and a `Field` with default,
|
||||
description, and validation information. For example:
|
||||
|
||||
| Part | Value | Description |
|
||||
| --------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Name | `strength` | This field is referred to as `strength` |
|
||||
| Type Hint | `float` | This field must be of type `float` |
|
||||
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
||||
|
||||
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this
|
||||
field to be parsed with `None` as a value, which enables linking to previous
|
||||
invocations. All fields should either provide a default value or allow `None` as
|
||||
a value, so that they can be overwritten with a linked output from another
|
||||
invocation.
|
||||
|
||||
The special type `ImageField` is also used here. All images are passed as
|
||||
`ImageField`, which protects them from pydantic validation errors (since images
|
||||
only ever come from links).
|
||||
|
||||
Finally, note that for all linking, the `type` of the linked fields must match.
|
||||
If the `name` also matches, then the field can be **automatically linked** to a
|
||||
previous invocation by name and matching.
|
||||
|
||||
### Config
|
||||
|
||||
```py
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["upscaling", "image"],
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
This is an optional configuration for the invocation. It inherits from
|
||||
pydantic's model `Config` class, and it used primarily to customize the
|
||||
autogenerated OpenAPI schema.
|
||||
|
||||
The UI relies on the OpenAPI schema in two ways:
|
||||
|
||||
- An API client & Typescript types are generated from it. This happens at build
|
||||
time.
|
||||
- The node editor parses the schema into a template used by the UI to create the
|
||||
node editor UI. This parsing happens at runtime.
|
||||
|
||||
In this example, a `ui` key has been added to the `schema_extra` dict to provide
|
||||
some tags for the UI, to facilitate filtering nodes.
|
||||
|
||||
See the Schema Generation section below for more information.
|
||||
|
||||
### Invoke Function
|
||||
|
||||
```py
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(
|
||||
self.image.image_origin, self.image.image_name
|
||||
)
|
||||
results = context.services.restoration.upscale_and_reconstruct(
|
||||
image_list=[[image, 0]],
|
||||
upscale=(self.level, self.strength),
|
||||
strength=0.0, # GFPGAN strength
|
||||
save_original=False,
|
||||
image_callback=None,
|
||||
)
|
||||
|
||||
# Results are image and seed, unwrap for now
|
||||
# TODO: can this return multiple results?
|
||||
image_dto = context.services.images.create(
|
||||
image=results[0][0],
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(
|
||||
image_name=image_dto.image_name,
|
||||
image_origin=image_dto.image_origin,
|
||||
),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
```
|
||||
|
||||
The `invoke` function is the last portion of an invocation. It is provided an
|
||||
`InvocationContext` which contains services to perform work as well as a
|
||||
`session_id` for use as needed. It should return a class with output values that
|
||||
derives from `BaseInvocationOutput`.
|
||||
|
||||
Before being called, the invocation will have all of its fields set from
|
||||
defaults, inputs, and finally links (overriding in that order).
|
||||
|
||||
Assume that this invocation may be running simultaneously with other
|
||||
invocations, may be running on another machine, or in other interesting
|
||||
scenarios. If you need functionality, please provide it as a service in the
|
||||
`InvocationServices` class, and make sure it can be overridden.
|
||||
|
||||
### Outputs
|
||||
|
||||
```py
|
||||
class ImageOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["image_output"] = "image_output"
|
||||
image: ImageField = Field(default=None, description="The output image")
|
||||
width: int = Field(description="The width of the image in pixels")
|
||||
height: int = Field(description="The height of the image in pixels")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||
```
|
||||
|
||||
Output classes look like an invocation class without the invoke method. Prefer
|
||||
to use an existing output class if available, and prefer to name inputs the same
|
||||
as outputs when possible, to promote automatic invocation linking.
|
||||
|
||||
## Schema Generation
|
||||
|
||||
Invocation, output and related classes are used to generate an OpenAPI schema.
|
||||
|
||||
### Required Properties
|
||||
|
||||
The schema generation treat all properties with default values as optional. This
|
||||
makes sense internally, but when when using these classes via the generated
|
||||
schema, we end up with e.g. the `ImageOutput` class having its `image` property
|
||||
marked as optional.
|
||||
|
||||
We know that this property will always be present, so the additional logic
|
||||
needed to always check if the property exists adds a lot of extraneous cruft.
|
||||
|
||||
To fix this, we can leverage `pydantic`'s
|
||||
[schema customisation](https://docs.pydantic.dev/usage/schema/#schema-customization)
|
||||
to mark properties that we know will always be present as required.
|
||||
|
||||
Here's that `ImageOutput` class, without the needed schema customisation:
|
||||
|
||||
```python
|
||||
class ImageOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["image_output"] = "image_output"
|
||||
image: ImageField = Field(default=None, description="The output image")
|
||||
width: int = Field(description="The width of the image in pixels")
|
||||
height: int = Field(description="The height of the image in pixels")
|
||||
# fmt: on
|
||||
```
|
||||
|
||||
The OpenAPI schema that results from this `ImageOutput` will have the `type`,
|
||||
`image`, `width` and `height` properties marked as optional, even though we know
|
||||
they will always have a value.
|
||||
|
||||
```python
|
||||
class ImageOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["image_output"] = "image_output"
|
||||
image: ImageField = Field(default=None, description="The output image")
|
||||
width: int = Field(description="The width of the image in pixels")
|
||||
height: int = Field(description="The height of the image in pixels")
|
||||
# fmt: on
|
||||
|
||||
# Add schema customization
|
||||
class Config:
|
||||
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||
```
|
||||
|
||||
With the customization in place, the schema will now show these properties as
|
||||
required, obviating the need for extensive null checks in client code.
|
||||
|
||||
See this `pydantic` issue for discussion on this solution:
|
||||
<https://github.com/pydantic/pydantic/discussions/4577>
|
||||
-->
|
||||
|
||||
@@ -35,18 +35,17 @@ access.
|
||||
|
||||
## Backend
|
||||
|
||||
The backend is contained within the `./invokeai/backend` folder structure. To
|
||||
get started however please install the development dependencies.
|
||||
The backend is contained within the `./invokeai/backend` and `./invokeai/app` directories.
|
||||
To get started please install the development dependencies.
|
||||
|
||||
From the root of the repository run the following command. Note the use of `"`.
|
||||
|
||||
```zsh
|
||||
pip install ".[test]"
|
||||
pip install ".[dev,test]"
|
||||
```
|
||||
|
||||
This in an optional group of packages which is defined within the
|
||||
`pyproject.toml` and will be required for testing the changes you make the the
|
||||
code.
|
||||
These are optional groups of packages which are defined within the `pyproject.toml`
|
||||
and will be required for testing the changes you make to the code.
|
||||
|
||||
### Running Tests
|
||||
|
||||
@@ -76,6 +75,20 @@ pytest --cov; open ./coverage/html/index.html
|
||||
|
||||

|
||||
|
||||
### Reloading Changes
|
||||
|
||||
Experimenting with changes to the Python source code is a drag if you have to re-start the server —
|
||||
and re-load those multi-gigabyte models —
|
||||
after every change.
|
||||
|
||||
For a faster development workflow, add the `--dev_reload` flag when starting the server.
|
||||
The server will watch for changes to all the Python files in the `invokeai` directory and apply those changes to the
|
||||
running server on the fly.
|
||||
|
||||
This will allow you to avoid restarting the server (and reloading models) in most cases, but there are some caveats; see
|
||||
the [jurigged documentation](https://github.com/breuleux/jurigged#caveats) for details.
|
||||
|
||||
|
||||
## Front End
|
||||
|
||||
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
|
||||
|
||||
@@ -16,7 +16,7 @@ If you don't feel ready to make a code contribution yet, no problem! You can als
|
||||
There are two paths to making a development contribution:
|
||||
|
||||
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
|
||||
1. Additional items can be found on our roadmap <******************************link to roadmap>******************************. The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
|
||||
|
||||
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no one’s time is being misspent.*
|
||||
|
||||
@@ -65,7 +65,6 @@ InvokeAI:
|
||||
esrgan: true
|
||||
internet_available: true
|
||||
log_tokenization: false
|
||||
nsfw_checker: false
|
||||
patchmatch: true
|
||||
restore: true
|
||||
...
|
||||
@@ -136,19 +135,16 @@ command-line options by giving the `--help` argument:
|
||||
|
||||
```
|
||||
(.venv) > invokeai-web --help
|
||||
usage: InvokeAI [-h] [--host HOST] [--port PORT] [--allow_origins [ALLOW_ORIGINS ...]] [--allow_credentials | --no-allow_credentials]
|
||||
[--allow_methods [ALLOW_METHODS ...]] [--allow_headers [ALLOW_HEADERS ...]] [--esrgan | --no-esrgan]
|
||||
[--internet_available | --no-internet_available] [--log_tokenization | --no-log_tokenization]
|
||||
[--nsfw_checker | --no-nsfw_checker] [--patchmatch | --no-patchmatch] [--restore | --no-restore]
|
||||
[--always_use_cpu | --no-always_use_cpu] [--free_gpu_mem | --no-free_gpu_mem] [--max_cache_size MAX_CACHE_SIZE]
|
||||
[--max_vram_cache_size MAX_VRAM_CACHE_SIZE] [--precision {auto,float16,float32,autocast}]
|
||||
[--sequential_guidance | --no-sequential_guidance] [--xformers_enabled | --no-xformers_enabled]
|
||||
[--tiled_decode | --no-tiled_decode] [--root ROOT] [--autoimport_dir AUTOIMPORT_DIR] [--lora_dir LORA_DIR]
|
||||
[--embedding_dir EMBEDDING_DIR] [--controlnet_dir CONTROLNET_DIR] [--conf_path CONF_PATH] [--models_dir MODELS_DIR]
|
||||
[--legacy_conf_dir LEGACY_CONF_DIR] [--db_dir DB_DIR] [--outdir OUTDIR] [--from_file FROM_FILE]
|
||||
[--use_memory_db | --no-use_memory_db] [--model MODEL] [--log_handlers [LOG_HANDLERS ...]]
|
||||
[--log_format {plain,color,syslog,legacy}] [--log_level {debug,info,warning,error,critical}]
|
||||
...
|
||||
usage: InvokeAI [-h] [--host HOST] [--port PORT] [--allow_origins [ALLOW_ORIGINS ...]] [--allow_credentials | --no-allow_credentials] [--allow_methods [ALLOW_METHODS ...]]
|
||||
[--allow_headers [ALLOW_HEADERS ...]] [--esrgan | --no-esrgan] [--internet_available | --no-internet_available] [--log_tokenization | --no-log_tokenization]
|
||||
[--patchmatch | --no-patchmatch] [--restore | --no-restore]
|
||||
[--always_use_cpu | --no-always_use_cpu] [--free_gpu_mem | --no-free_gpu_mem] [--max_loaded_models MAX_LOADED_MODELS] [--max_cache_size MAX_CACHE_SIZE]
|
||||
[--max_vram_cache_size MAX_VRAM_CACHE_SIZE] [--gpu_mem_reserved GPU_MEM_RESERVED] [--precision {auto,float16,float32,autocast}]
|
||||
[--sequential_guidance | --no-sequential_guidance] [--xformers_enabled | --no-xformers_enabled] [--tiled_decode | --no-tiled_decode] [--root ROOT]
|
||||
[--autoimport_dir AUTOIMPORT_DIR] [--lora_dir LORA_DIR] [--embedding_dir EMBEDDING_DIR] [--controlnet_dir CONTROLNET_DIR] [--conf_path CONF_PATH]
|
||||
[--models_dir MODELS_DIR] [--legacy_conf_dir LEGACY_CONF_DIR] [--db_dir DB_DIR] [--outdir OUTDIR] [--from_file FROM_FILE]
|
||||
[--use_memory_db | --no-use_memory_db] [--model MODEL] [--log_handlers [LOG_HANDLERS ...]] [--log_format {plain,color,syslog,legacy}]
|
||||
[--log_level {debug,info,warning,error,critical}] [--version | --no-version]
|
||||
```
|
||||
|
||||
## The Configuration Settings
|
||||
@@ -178,24 +174,28 @@ These configuration settings allow you to enable and disable various InvokeAI fe
|
||||
| `esrgan` | `true` | Activate the ESRGAN upscaling options|
|
||||
| `internet_available` | `true` | When a resource is not available locally, try to fetch it via the internet |
|
||||
| `log_tokenization` | `false` | Before each text2image generation, print a color-coded representation of the prompt to the console; this can help understand why a prompt is not working as expected |
|
||||
| `nsfw_checker` | `true` | Activate the NSFW checker to blur out risque images |
|
||||
| `patchmatch` | `true` | Activate the "patchmatch" algorithm for improved inpainting |
|
||||
| `restore` | `true` | Activate the facial restoration features (DEPRECATED; restoration features will be removed in 3.0.0) |
|
||||
|
||||
### Memory/Performance
|
||||
### Generation
|
||||
|
||||
These options tune InvokeAI's memory and performance characteristics.
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|----------|----------------|--------------|
|
||||
| `always_use_cpu` | `false` | Use the CPU to generate images, even if a GPU is available |
|
||||
| `free_gpu_mem` | `false` | Aggressively free up GPU memory after each operation; this will allow you to run in low-VRAM environments with some performance penalties |
|
||||
| `max_cache_size` | `6` | Amount of CPU RAM (in GB) to reserve for caching models in memory; more cache allows you to keep models in memory and switch among them quickly |
|
||||
| `max_vram_cache_size` | `2.75` | Amount of GPU VRAM (in GB) to reserve for caching models in VRAM; more cache speeds up generation but reduces the size of the images that can be generated. This can be set to zero to maximize the amount of memory available for generation. |
|
||||
| `precision` | `auto` | Floating point precision. One of `auto`, `float16` or `float32`. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system |
|
||||
| `sequential_guidance` | `false` | Calculate guidance in serial rather than in parallel, lowering memory requirements at the cost of some performance loss |
|
||||
| `xformers_enabled` | `true` | If the x-formers memory-efficient attention module is installed, activate it for better memory usage and generation speed|
|
||||
| `tiled_decode` | `false` | If true, then during the VAE decoding phase the image will be decoded a section at a time, reducing memory consumption at the cost of a performance hit |
|
||||
| Setting | Default Value | Description |
|
||||
|-----------------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `sequential_guidance` | `false` | Calculate guidance in serial rather than in parallel, lowering memory requirements at the cost of some performance loss |
|
||||
| `attention_type` | `auto` | Select the type of attention to use. One of `auto`,`normal`,`xformers`,`sliced`, or `torch-sdp` |
|
||||
| `attention_slice_size` | `auto` | When "sliced" attention is selected, set the slice size. One of `auto`, `balanced`, `max` or the integers 1-8|
|
||||
| `force_tiled_decode` | `false` | Force the VAE step to decode in tiles, reducing memory consumption at the cost of performance |
|
||||
|
||||
### Device
|
||||
|
||||
These options configure the generation execution device.
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|-----------------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `device` | `auto` | Preferred execution device. One of `auto`, `cpu`, `cuda`, `cuda:1`, `mps`. `auto` will choose the device depending on the hardware platform and the installed torch capabilities. |
|
||||
| `precision` | `auto` | Floating point precision. One of `auto`, `float16` or `float32`. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system |
|
||||
|
||||
|
||||
### Paths
|
||||
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
# Nodes Editor (Experimental)
|
||||
|
||||
🚨
|
||||
*The node editor is experimental. We've made it accessible because we use it to develop the application, but we have not addressed the many known rough edges. It's very easy to shoot yourself in the foot, and we cannot offer support for it until it sees full release (ETA v3.1). Everything is subject to change without warning.*
|
||||
🚨
|
||||
|
||||
The nodes editor is a blank canvas allowing for the use of individual functions and image transformations to control the image generation workflow. The node processing flow is usually done from left (inputs) to right (outputs), though linearity can become abstracted the more complex the node graph becomes. Nodes inputs and outputs are connected by dragging connectors from node to node.
|
||||
|
||||
To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onward like a power bar passes electricity. Not all outputs are compatible with all inputs, however - Each node has different constraints on how it is expecting to input/output information. In general, node outputs are colour-coded to match compatible inputs of other nodes.
|
||||
|
||||
## Anatomy of a Node
|
||||
|
||||
Individual nodes are made up of the following:
|
||||
|
||||
- Inputs: Edge points on the left side of the node window where you connect outputs from other nodes.
|
||||
- Outputs: Edge points on the right side of the node window where you connect to inputs on other nodes.
|
||||
- Options: Various options which are either manually configured, or overridden by connecting an output from another node to the input.
|
||||
|
||||
## Diffusion Overview
|
||||
|
||||
Taking the time to understand the diffusion process will help you to understand how to set up your nodes in the nodes editor.
|
||||
|
||||
There are two main spaces Stable Diffusion works in: image space and latent space.
|
||||
|
||||
Image space represents images in pixel form that you look at. Latent space represents compressed inputs. It’s in latent space that Stable Diffusion processes images. A VAE (Variational Auto Encoder) is responsible for compressing and encoding inputs into latent space, as well as decoding outputs back into image space.
|
||||
|
||||
When you generate an image using text-to-image, multiple steps occur in latent space:
|
||||
1. Random noise is generated at the chosen height and width. The noise’s characteristics are dictated by the chosen (or not chosen) seed. This noise tensor is passed into latent space. We’ll call this noise A.
|
||||
1. Using a model’s U-Net, a noise predictor examines noise A, and the words tokenized by CLIP from your prompt (conditioning). It generates its own noise tensor to predict what the final image might look like in latent space. We’ll call this noise B.
|
||||
1. Noise B is subtracted from noise A in an attempt to create a final latent image indicative of the inputs. This step is repeated for the number of sampler steps chosen.
|
||||
1. The VAE decodes the final latent image from latent space into image space.
|
||||
|
||||
image-to-image is a similar process, with only step 1 being different:
|
||||
1. The input image is decoded from image space into latent space by the VAE. Noise is then added to the input latent image. Denoising Strength dictates how much noise is added, 0 being none, and 1 being all-encompassing. We’ll call this noise A. The process is then the same as steps 2-4 in the text-to-image explanation above.
|
||||
|
||||
Furthermore, a model provides the CLIP prompt tokenizer, the VAE, and a U-Net (where noise prediction occurs given a prompt and initial noise tensor).
|
||||
|
||||
A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from the latent image across the sampler steps chosen (step 3 above). Less noise is usually subtracted at higher sampler steps.
|
||||
|
||||
## Node Types (Base Nodes)
|
||||
|
||||
| Node <img width=160 align="right"> | Function |
|
||||
| ---------------------------------- | --------------------------------------------------------------------------------------|
|
||||
| Add | Adds two numbers |
|
||||
| CannyImageProcessor | Canny edge detection for ControlNet |
|
||||
| ClipSkip | Skip layers in clip text_encoder model |
|
||||
| Collect | Collects values into a collection |
|
||||
| Prompt (Compel) | Parse prompt using compel package to conditioning |
|
||||
| ContentShuffleImageProcessor | Applies content shuffle processing to image |
|
||||
| ControlNet | Collects ControlNet info to pass to other nodes |
|
||||
| CvInpaint | Simple inpaint using opencv |
|
||||
| Divide | Divides two numbers |
|
||||
| DynamicPrompt | Parses a prompt using adieyal/dynamic prompt's random or combinatorial generator |
|
||||
| FloatLinearRange | Creates a range |
|
||||
| HedImageProcessor | Applies HED edge detection to image |
|
||||
| ImageBlur | Blurs an image |
|
||||
| ImageChannel | Gets a channel from an image |
|
||||
| ImageCollection | Load a collection of images and provide it as output |
|
||||
| ImageConvert | Converts an image to a different mode |
|
||||
| ImageCrop | Crops an image to a specified box. The box can be outside of the image. |
|
||||
| ImageInverseLerp | Inverse linear interpolation of all pixels of an image |
|
||||
| ImageLerp | Linear interpolation of all pixels of an image |
|
||||
| ImageMultiply | Multiplies two images together using `PIL.ImageChops.Multiply()` |
|
||||
| ImagePaste | Pastes an image into another image |
|
||||
| ImageProcessor | Base class for invocations that reprocess images for ControlNet |
|
||||
| ImageResize | Resizes an image to specific dimensions |
|
||||
| ImageScale | Scales an image by a factor |
|
||||
| ImageToLatents | Scales latents by a given factor |
|
||||
| InfillColor | Infills transparent areas of an image with a solid color |
|
||||
| InfillPatchMatch | Infills transparent areas of an image using the PatchMatch algorithm |
|
||||
| InfillTile | Infills transparent areas of an image with tiles of the image |
|
||||
| Inpaint | Generates an image using inpaint |
|
||||
| Iterate | Iterates over a list of items |
|
||||
| LatentsToImage | Generates an image from latents |
|
||||
| LatentsToLatents | Generates latents using latents as base image |
|
||||
| LeresImageProcessor | Applies leres processing to image |
|
||||
| LineartAnimeImageProcessor | Applies line art anime processing to image |
|
||||
| LineartImageProcessor | Applies line art processing to image |
|
||||
| LoadImage | Load an image and provide it as output |
|
||||
| Lora Loader | Apply selected lora to unet and text_encoder |
|
||||
| Model Loader | Loads a main model, outputting its submodels |
|
||||
| MaskFromAlpha | Extracts the alpha channel of an image as a mask |
|
||||
| MediapipeFaceProcessor | Applies mediapipe face processing to image |
|
||||
| MidasDepthImageProcessor | Applies Midas depth processing to image |
|
||||
| MlsdImageProcessor | Applied MLSD processing to image |
|
||||
| Multiply | Multiplies two numbers |
|
||||
| Noise | Generates latent noise |
|
||||
| NormalbaeImageProcessor | Applies NormalBAE processing to image |
|
||||
| OpenposeImageProcessor | Applies Openpose processing to image |
|
||||
| ParamFloat | A float parameter |
|
||||
| ParamInt | An integer parameter |
|
||||
| PidiImageProcessor | Applies PIDI processing to an image |
|
||||
| Progress Image | Displays the progress image in the Node Editor |
|
||||
| RandomInit | Outputs a single random integer |
|
||||
| RandomRange | Creates a collection of random numbers |
|
||||
| Range | Creates a range of numbers from start to stop with step |
|
||||
| RangeOfSize | Creates a range from start to start + size with step |
|
||||
| ResizeLatents | Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. |
|
||||
| RestoreFace | Restores faces in the image |
|
||||
| ScaleLatents | Scales latents by a given factor |
|
||||
| SegmentAnythingProcessor | Applies segment anything processing to image |
|
||||
| ShowImage | Displays a provided image, and passes it forward in the pipeline |
|
||||
| StepParamEasing | Experimental per-step parameter for easing for denoising steps |
|
||||
| Subtract | Subtracts two numbers |
|
||||
| TextToLatents | Generates latents from conditionings |
|
||||
| TileResampleProcessor | Bass class for invocations that preprocess images for ControlNet |
|
||||
| Upscale | Upscales an image |
|
||||
| VAE Loader | Loads a VAE model, outputting a VaeLoaderOutput |
|
||||
| ZoeDepthImageProcessor | Applies Zoe depth processing to image |
|
||||
|
||||
## Node Grouping Concepts
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
### Noise
|
||||
|
||||
As described, an initial noise tensor is necessary for the latent diffusion process. As a result, all non-image *ToLatents nodes require a noise node input.
|
||||
|
||||
<img width="654" alt="groupsnoise" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/2e8d297e-ad55-4d27-bc93-c119dad2a2c5">
|
||||
|
||||
### Conditioning
|
||||
|
||||
As described, conditioning is necessary for the latent diffusion process, whether empty or not. As a result, all non-image *ToLatents nodes require positive and negative conditioning inputs. Conditioning is reliant on a CLIP tokenizer provided by the Model Loader node.
|
||||
|
||||
<img width="1024" alt="groupsconditioning" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/f8f7ad8a-8d9c-418e-b5ad-1437b774b27e">
|
||||
|
||||
### Image Space & VAE
|
||||
|
||||
The ImageToLatents node doesn't require a noise node input, but requires a VAE input to convert the image from image space into latent space. In reverse, the LatentsToImage node requires a VAE input to convert from latent space back into image space.
|
||||
|
||||
<img width="637" alt="groupsimgvae" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/dd99969c-e0a8-4f78-9b17-3ffe179cef9a">
|
||||
|
||||
### Defined & Random Seeds
|
||||
|
||||
It is common to want to use both the same seed (for continuity) and random seeds (for variance). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||
|
||||
<img width="922" alt="groupsrandseed" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/af55bc20-60f6-438e-aba5-3ec871443710">
|
||||
|
||||
### Control
|
||||
|
||||
Control means to guide the diffusion process to adhere to a defined input or structure. Control can be provided as input to non-image *ToLatents nodes from ControlNet nodes. ControlNet nodes usually require an image processor which converts an input image for use with ControlNet.
|
||||
|
||||
<img width="805" alt="groupscontrol" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/cc9c5de7-23a7-46c8-bbad-1f3609d999a6">
|
||||
|
||||
### LoRA
|
||||
|
||||
The Lora Loader node lets you load a LoRA (say that ten times fast) and pass it as output to both the Prompt (Compel) and non-image *ToLatents nodes. A model's CLIP tokenizer is passed through the LoRA into Prompt (Compel), where it affects conditioning. A model's U-Net is also passed through the LoRA into a non-image *ToLatents node, where it affects noise prediction.
|
||||
|
||||
<img width="993" alt="groupslora" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/630962b0-d914-4505-b3ea-ccae9b0269da">
|
||||
|
||||
### Scaling
|
||||
|
||||
Use the ImageScale, ScaleLatents, and Upscale nodes to upscale images and/or latent images. The chosen method differs across contexts. However, be aware that latents are already noisy and compressed at their original resolution; scaling an image could produce more detailed results.
|
||||
|
||||
<img width="644" alt="groupsallscale" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/99314f05-dd9f-4b6d-b378-31de55346a13">
|
||||
|
||||
### Iteration + Multiple Images as Input
|
||||
|
||||
Iteration is a common concept in any processing, and means to repeat a process with given input. In nodes, you're able to use the Iterate node to iterate through collections usually gathered by the Collect node. The Iterate node has many potential uses, from processing a collection of images one after another, to varying seeds across multiple image generations and more. This screenshot demonstrates how to collect several images and pass them out one at a time.
|
||||
|
||||
<img width="788" alt="groupsiterate" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/4af5ca27-82c9-4018-8c5b-024d3ee0a121">
|
||||
|
||||
### Multiple Image Generation + Random Seeds
|
||||
|
||||
Multiple image generation in the node editor is done using the RandomRange node. In this case, the 'Size' field represents the number of images to generate. As RandomRange produces a collection of integers, we need to add the Iterate node to iterate through the collection.
|
||||
|
||||
To control seeds across generations takes some care. The first row in the screenshot will generate multiple images with different seeds, but using the same RandomRange parameters across invocations will result in the same group of random seeds being used across the images, producing repeatable results. In the second row, adding the RandomInt node as input to RandomRange's 'Seed' edge point will ensure that seeds are varied across all images across invocations, producing varied results.
|
||||
|
||||
<img width="1027" alt="groupsmultigenseeding" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/518d1b2b-fed1-416b-a052-ab06552521b3">
|
||||
|
||||
## Examples
|
||||
|
||||
With our knowledge of node grouping and the diffusion process, let’s break down some basic graphs in the nodes editor. Note that a node's options can be overridden by inputs from other nodes. These examples aren't strict rules to follow and only demonstrate some basic configurations.
|
||||
|
||||
### Basic text-to-image Node Graph
|
||||
|
||||
<img width="875" alt="nodest2i" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/17c67720-c376-4db8-94f0-5e00381a61ee">
|
||||
|
||||
- Model Loader: A necessity to generating images (as we’ve read above). We choose our model from the dropdown. It outputs a U-Net, CLIP tokenizer, and VAE.
|
||||
- Prompt (Compel): Another necessity. Two prompt nodes are created. One will output positive conditioning (what you want, ‘dog’), one will output negative (what you don’t want, ‘cat’). They both input the CLIP tokenizer that the Model Loader node outputs.
|
||||
- Noise: Consider this noise A from step one of the text-to-image explanation above. Choose a seed number, width, and height.
|
||||
- TextToLatents: This node takes many inputs for converting and processing text & noise from image space into latent space, hence the name TextTo**Latents**. In this setup, it inputs positive and negative conditioning from the prompt nodes for processing (step 2 above). It inputs noise from the noise node for processing (steps 2 & 3 above). Lastly, it inputs a U-Net from the Model Loader node for processing (step 2 above). It outputs latents for use in the next LatentsToImage node. Choose number of sampler steps, CFG scale, and scheduler.
|
||||
- LatentsToImage: This node takes in processed latents from the TextToLatents node, and the model’s VAE from the Model Loader node which is responsible for decoding latents back into the image space, hence the name LatentsTo**Image**. This node is the last stop, and once the image is decoded, it is saved to the gallery.
|
||||
|
||||
### Basic image-to-image Node Graph
|
||||
|
||||
<img width="998" alt="nodesi2i" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/3f2c95d5-cee7-4415-9b79-b46ee60a92fe">
|
||||
|
||||
- Model Loader: Choose a model from the dropdown.
|
||||
- Prompt (Compel): Two prompt nodes. One positive (dog), one negative (dog). Same CLIP inputs from the Model Loader node as before.
|
||||
- ImageToLatents: Upload a source image directly in the node window, via drag'n'drop from the gallery, or passed in as input. The ImageToLatents node inputs the VAE from the Model Loader node to decode the chosen image from image space into latent space, hence the name ImageTo**Latents**. It outputs latents for use in the next LatentsToLatents node. It also outputs the source image's width and height for use in the next Noise node if the final image is to be the same dimensions as the source image.
|
||||
- Noise: A noise tensor is created with the width and height of the source image, and connected to the next LatentsToLatents node. Notice the width and height fields are overridden by the input from the ImageToLatents width and height outputs.
|
||||
- LatentsToLatents: The inputs and options are nearly identical to TextToLatents, except that LatentsToLatents also takes latents as an input. Considering our source image is already converted to latents in the last ImageToLatents node, and text + noise are no longer the only inputs to process, we use the LatentsToLatents node.
|
||||
- LatentsToImage: Like previously, the LatentsToImage node will use the VAE from the Model Loader as input to decode the latents from LatentsToLatents into image space, and save it to the gallery.
|
||||
|
||||
### Basic ControlNet Node Graph
|
||||
|
||||
<img width="703" alt="nodescontrol" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/b02ded86-ceb4-44a2-9910-e19ad184d471">
|
||||
|
||||
- Model Loader
|
||||
- Prompt (Compel)
|
||||
- Noise: Width and height of the CannyImageProcessor ControlNet image is passed in to set the dimensions of the noise passed to TextToLatents.
|
||||
- CannyImageProcessor: The CannyImageProcessor node is used to process the source image being used as a ControlNet. Each ControlNet processor node applies control in different ways, and has some different options to configure. Width and height are passed to noise, as mentioned. The processed ControlNet image is output to the ControlNet node.
|
||||
- ControlNet: Select the type of control model. In this case, canny is chosen as the CannyImageProcessor was used to generate the ControlNet image. Configure the control node options, and pass the control output to TextToLatents.
|
||||
- TextToLatents: Similar to the basic text-to-image example, except ControlNet is passed to the control input edge point.
|
||||
- LatentsToImage
|
||||
@@ -16,21 +16,24 @@ Output Example:
|
||||
|
||||
---
|
||||
|
||||
## **Seamless Tiling**
|
||||
## **Invisible Watermark**
|
||||
|
||||
The seamless tiling mode causes generated images to seamlessly tile
|
||||
with itself creating repetitive wallpaper-like patterns. To use it,
|
||||
activate the Seamless Tiling option in the Web GUI and then select
|
||||
whether to tile on the X (horizontal) and/or Y (vertical) axes. Tiling
|
||||
will then be active for the next set of generations.
|
||||
In keeping with the principles for responsible AI generation, and to
|
||||
help AI researchers avoid synthetic images contaminating their
|
||||
training sets, InvokeAI adds an invisible watermark to each of the
|
||||
final images it generates. The watermark consists of the text
|
||||
"InvokeAI" and can be viewed using the
|
||||
[invisible-watermarks](https://github.com/ShieldMnt/invisible-watermark)
|
||||
tool.
|
||||
|
||||
A nice prompt to test seamless tiling with is:
|
||||
Watermarking is controlled using the `invisible-watermark` setting in
|
||||
`invokeai.yaml`. To turn it off, add the following line under the `Features`
|
||||
category.
|
||||
|
||||
```
|
||||
pond garden with lotus by claude monet"
|
||||
invisible_watermark: false
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## **Weighted Prompts**
|
||||
|
||||
@@ -39,34 +42,10 @@ priority to them, by adding `:<percent>` to the end of the section you wish to u
|
||||
example consider this prompt:
|
||||
|
||||
```bash
|
||||
tabby cat:0.25 white duck:0.75 hybrid
|
||||
(tabby cat):0.25 (white duck):0.75 hybrid
|
||||
```
|
||||
|
||||
This will tell the sampler to invest 25% of its effort on the tabby cat aspect of the image and 75%
|
||||
on the white duck aspect (surprisingly, this example actually works). The prompt weights can use any
|
||||
combination of integers and floating point numbers, and they do not need to add up to 1.
|
||||
|
||||
## **Thresholding and Perlin Noise Initialization Options**
|
||||
|
||||
Under the Noise section of the Web UI, you will find two options named
|
||||
Perlin Noise and Noise Threshold. [Perlin
|
||||
noise](https://en.wikipedia.org/wiki/Perlin_noise) is a type of
|
||||
structured noise used to simulate terrain and other natural
|
||||
textures. The slider controls the percentage of perlin noise that will
|
||||
be mixed into the image at the beginning of generation. Adding a little
|
||||
perlin noise to a generation will alter the image substantially.
|
||||
|
||||
The noise threshold limits the range of the latent values during
|
||||
sampling and helps combat the oversharpening seem with higher CFG
|
||||
scale values.
|
||||
|
||||
For better intuition into what these options do in practice:
|
||||
|
||||

|
||||
|
||||
In generating this graphic, perlin noise at initialization was
|
||||
programmatically varied going across on the diagram by values 0.0,
|
||||
0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied
|
||||
going down from 0, 1, 2, 3, 4, 5, 10, 20, 100. The other options are
|
||||
fixed using the prompt "a portrait of a beautiful young lady" a CFG of
|
||||
20, 100 steps, and a seed of 1950357039.
|
||||
|
||||
@@ -4,35 +4,13 @@ title: Postprocessing
|
||||
|
||||
# :material-image-edit: Postprocessing
|
||||
|
||||
## Intro
|
||||
|
||||
This extension provides the ability to restore faces and upscale images.
|
||||
This sections details the ability to improve faces and upscale images.
|
||||
|
||||
## Face Fixing
|
||||
|
||||
The default face restoration module is GFPGAN. The default upscale is
|
||||
Real-ESRGAN. For an alternative face restoration module, see
|
||||
[CodeFormer Support](#codeformer-support) below.
|
||||
As of InvokeAI 3.0, the easiest way to improve faces created during image generation is through the Inpainting functionality of the Unified Canvas. Simply add the image containing the faces that you would like to improve to the canvas, mask the face to be improved and run the invocation. For best results, make sure to use an inpainting specific model; these are usually identified by the "-inpainting" term in the model name.
|
||||
|
||||
As of version 1.14, environment.yaml will install the Real-ESRGAN package into
|
||||
the standard install location for python packages, and will put GFPGAN into a
|
||||
subdirectory of "src" in the InvokeAI directory. Upscaling with Real-ESRGAN
|
||||
should "just work" without further intervention. Simply indicate the desired scale on
|
||||
the popup in the Web GUI.
|
||||
|
||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
||||
loaded when you run `invokeai-configure`. If GFPAN is failing with an
|
||||
error, please run the following from the InvokeAI directory:
|
||||
|
||||
```bash
|
||||
invokeai-configure
|
||||
```
|
||||
|
||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
||||
download the models files the first time you try to perform facial
|
||||
reconstruction.
|
||||
|
||||
### Upscaling
|
||||
## Upscaling
|
||||
|
||||
Open the upscaling dialog by clicking on the "expand" icon located
|
||||
above the image display area in the Web UI:
|
||||
@@ -41,82 +19,23 @@ above the image display area in the Web UI:
|
||||

|
||||
</figure>
|
||||
|
||||
There are three different upscaling parameters that you can
|
||||
adjust. The first is the scale itself, either 2x or 4x.
|
||||
The default upscaling option is Real-ESRGAN x2 Plus, which will scale your image by a factor of two. This means upscaling a 512x512 image will result in a new 1024x1024 image.
|
||||
|
||||
The second is the "Denoising Strength." Higher values will smooth out
|
||||
the image and remove digital chatter, but may lose fine detail at
|
||||
higher values.
|
||||
Other options are the x4 upscalers, which will scale your image by a factor of 4.
|
||||
|
||||
Third, "Upscale Strength" allows you to adjust how the You can set the
|
||||
scaling stength between `0` and `1.0` to control the intensity of the
|
||||
scaling. AI upscalers generally tend to smooth out texture details. If
|
||||
you wish to retain some of those for natural looking results, we
|
||||
recommend using values between `0.5 to 0.8`.
|
||||
|
||||
[This figure](../assets/features/upscaling-montage.png) illustrates
|
||||
the effects of denoising and strength. The original image was 512x512,
|
||||
4x scaled to 2048x2048. The "original" version on the upper left was
|
||||
scaled using simple pixel averaging. The remainder use the ESRGAN
|
||||
upscaling algorithm at different levels of denoising and strength.
|
||||
|
||||
<figure markdown>
|
||||
{ width=720 }
|
||||
</figure>
|
||||
|
||||
Both denoising and strength default to 0.75.
|
||||
|
||||
### Face Restoration
|
||||
|
||||
InvokeAI offers alternative two face restoration algorithms,
|
||||
[GFPGAN](https://github.com/TencentARC/GFPGAN) and
|
||||
[CodeFormer](https://huggingface.co/spaces/sczhou/CodeFormer). These
|
||||
algorithms improve the appearance of faces, particularly eyes and
|
||||
mouths. Issues with faces are less common with the latest set of
|
||||
Stable Diffusion models than with the original 1.4 release, but the
|
||||
restoration algorithms can still make a noticeable improvement in
|
||||
certain cases. You can also apply restoration to old photographs you
|
||||
upload.
|
||||
|
||||
To access face restoration, click the "smiley face" icon in the
|
||||
toolbar above the InvokeAI image panel. You will be presented with a
|
||||
dialog that offers a choice between the two algorithm and sliders that
|
||||
allow you to adjust their parameters. Alternatively, you may open the
|
||||
left-hand accordion panel labeled "Face Restoration" and have the
|
||||
restoration algorithm of your choice applied to generated images
|
||||
automatically.
|
||||
|
||||
|
||||
Like upscaling, there are a number of parameters that adjust the face
|
||||
restoration output. GFPGAN has a single parameter, `strength`, which
|
||||
controls how much the algorithm is allowed to adjust the
|
||||
image. CodeFormer has two parameters, `strength`, and `fidelity`,
|
||||
which together control the quality of the output image as described in
|
||||
the [CodeFormer project
|
||||
page](https://shangchenzhou.com/projects/CodeFormer/). Default values
|
||||
are 0.75 for both parameters, which achieves a reasonable balance
|
||||
between changing the image too much and not enough.
|
||||
|
||||
[This figure](../assets/features/restoration-montage.png) illustrates
|
||||
the effects of adjusting GFPGAN and CodeFormer parameters.
|
||||
|
||||
<figure markdown>
|
||||
{ width=720 }
|
||||
</figure>
|
||||
|
||||
!!! note
|
||||
|
||||
GFPGAN and Real-ESRGAN are both memory intensive. In order to avoid crashes and memory overloads
|
||||
Real-ESRGAN is memory intensive. In order to avoid crashes and memory overloads
|
||||
during the Stable Diffusion process, these effects are applied after Stable Diffusion has completed
|
||||
its work.
|
||||
|
||||
In single image generations, you will see the output right away but when you are using multiple
|
||||
iterations, the images will first be generated and then upscaled and face restored after that
|
||||
iterations, the images will first be generated and then upscaled after that
|
||||
process is complete. While the image generation is taking place, you will still be able to preview
|
||||
the base images.
|
||||
|
||||
## How to disable
|
||||
|
||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||
`--no_esrgan` options, respectively.
|
||||
If, for some reason, you do not wish to load the ESRGAN libraries,
|
||||
you can disable them on the invoke.py command line with the `--no_esrgan` options.
|
||||
|
||||
@@ -4,80 +4,6 @@ title: Prompting-Features
|
||||
|
||||
# :octicons-command-palette-24: Prompting-Features
|
||||
|
||||
## **Negative and Unconditioned Prompts**
|
||||
|
||||
Any words between a pair of square brackets will instruct Stable
|
||||
Diffusion to attempt to ban the concept from the generated image. The
|
||||
same effect is achieved by placing words in the "Negative Prompts"
|
||||
textbox in the Web UI.
|
||||
|
||||
```text
|
||||
this is a test prompt [not really] to make you understand [cool] how this works.
|
||||
```
|
||||
|
||||
In the above statement, the words 'not really cool` will be ignored by Stable
|
||||
Diffusion.
|
||||
|
||||
Here's a prompt that depicts what it does.
|
||||
|
||||
original prompt:
|
||||
|
||||
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve"`
|
||||
|
||||
`#!bash parameters: steps=20, dimensions=512x768, CFG=7.5, Scheduler=k_euler_a, seed=1654590180`
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
That image has a woman, so if we want the horse without a rider, we can
|
||||
influence the image not to have a woman by putting [woman] in the prompt, like
|
||||
this:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]"`
|
||||
(same parameters as above)
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
That's nice - but say we also don't want the image to be quite so blue. We can
|
||||
add "blue" to the list of negative prompts, so it's now [woman blue]:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]"`
|
||||
(same parameters as above)
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Getting close - but there's no sense in having a saddle when our horse doesn't
|
||||
have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]"`
|
||||
(same parameters as above)
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
!!! notes "Notes about this feature:"
|
||||
|
||||
* The only requirement for words to be ignored is that they are in between a pair of square brackets.
|
||||
* You can provide multiple words within the same bracket.
|
||||
* You can provide multiple brackets with multiple words in different places of your prompt. That works just fine.
|
||||
* To improve typical anatomy problems, you can add negative prompts like `[bad anatomy, extra legs, extra arms, extra fingers, poorly drawn hands, poorly drawn feet, disfigured, out of frame, tiling, bad art, deformed, mutated]`.
|
||||
|
||||
---
|
||||
|
||||
## **Prompt Syntax Features**
|
||||
|
||||
The InvokeAI prompting language has the following features:
|
||||
@@ -102,9 +28,6 @@ The following syntax is recognised:
|
||||
`a tall thin man (picking (apricots)1.3)1.1`. (`+` is equivalent to 1.1, `++`
|
||||
is pow(1.1,2), `+++` is pow(1.1,3), etc; `-` means 0.9, `--` means pow(0.9,2),
|
||||
etc.)
|
||||
- attention also applies to `[unconditioning]` so
|
||||
`a tall thin man picking apricots [(ladder)0.01]` will _very gently_ nudge SD
|
||||
away from trying to draw the man on a ladder
|
||||
|
||||
You can use this to increase or decrease the amount of something. Starting from
|
||||
this prompt of `a man picking apricots from a tree`, let's see what happens if
|
||||
@@ -150,7 +73,7 @@ Or, alternatively, with more man:
|
||||
| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- |
|
||||
|  |  |  |  |
|
||||
|
||||
### Blending between prompts
|
||||
### Prompt Blending
|
||||
|
||||
- `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)`
|
||||
- The existing prompt blending using `:<weight>` will continue to be supported -
|
||||
@@ -168,6 +91,24 @@ Or, alternatively, with more man:
|
||||
See the section below on "Prompt Blending" for more information about how this
|
||||
works.
|
||||
|
||||
### Prompt Conjunction
|
||||
Join multiple clauses together to create a conjoined prompt. Each clause will be passed to CLIP separately.
|
||||
|
||||
For example, the prompt:
|
||||
|
||||
```bash
|
||||
"A mystical valley surround by towering granite cliffs, watercolor, warm"
|
||||
```
|
||||
|
||||
Can be used with .and():
|
||||
```bash
|
||||
("A mystical valley", "surround by towering granite cliffs", "watercolor", "warm").and()
|
||||
```
|
||||
|
||||
Each will give you different results - try them out and see what you prefer!
|
||||
|
||||
|
||||
|
||||
### Cross-Attention Control ('prompt2prompt')
|
||||
|
||||
Sometimes an image you generate is almost right, and you just want to change one
|
||||
@@ -190,7 +131,7 @@ For example, consider the prompt `a cat.swap(dog) playing with a ball in the for
|
||||
|
||||
- For multiple word swaps, use parentheses: `a (fluffy cat).swap(barking dog) playing with a ball in the forest`.
|
||||
- To swap a comma, use quotes: `a ("fluffy, grey cat").swap("big, barking dog") playing with a ball in the forest`.
|
||||
- Supports options `t_start` and `t_end` (each 0-1) loosely corresponding to bloc97's `prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
|
||||
- Supports options `t_start` and `t_end` (each 0-1) loosely corresponding to (bloc97's)[(https://github.com/bloc97/CrossAttentionControl)] `prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
|
||||
intuitively understand. `t_start` and `t_end` are used to control on which steps cross-attention control should run. With the default values `t_start=0` and `t_end=1`, cross-attention control is active on every step of image generation. Other values can be used to turn cross-attention control off for part of the image generation process.
|
||||
- For example, if doing a diffusion with 10 steps for the prompt is `a cat.swap(dog, t_start=0.3, t_end=1.0) playing with a ball in the forest`, the first 3 steps will be run as `a cat playing with a ball in the forest`, while the last 7 steps will run as `a dog playing with a ball in the forest`, but the pixels that represent `dog` will be locked to the pixels that would have represented `cat` if the `cat` prompt had been used instead.
|
||||
- Conversely, for `a cat.swap(dog, t_start=0, t_end=0.7) playing with a ball in the forest`, the first 7 steps will run as `a dog playing with a ball in the forest` with the pixels that represent `dog` locked to the same pixels that would have represented `cat` if the `cat` prompt was being used instead. The final 3 steps will just run `a cat playing with a ball in the forest`.
|
||||
@@ -201,7 +142,7 @@ Prompt2prompt `.swap()` is not compatible with xformers, which will be temporari
|
||||
The `prompt2prompt` code is based off
|
||||
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).
|
||||
|
||||
### Escaping parantheses () and speech marks ""
|
||||
### Escaping parentheses () and speech marks ""
|
||||
|
||||
If the model you are using has parentheses () or speech marks "" as part of its
|
||||
syntax, you will need to "escape" these using a backslash, so that`(my_keyword)`
|
||||
@@ -212,23 +153,16 @@ the parentheses as part of the prompt syntax and it will get confused.
|
||||
|
||||
## **Prompt Blending**
|
||||
|
||||
You may blend together different sections of the prompt to explore the AI's
|
||||
You may blend together prompts to explore the AI's
|
||||
latent semantic space and generate interesting (and often surprising!)
|
||||
variations. The syntax is:
|
||||
|
||||
```bash
|
||||
blue sphere:0.25 red cube:0.75 hybrid
|
||||
("prompt #1", "prompt #2").blend(0.25, 0.75)
|
||||
```
|
||||
|
||||
This will tell the sampler to blend 25% of the concept of a blue sphere with 75%
|
||||
of the concept of a red cube. The blend weights can use any combination of
|
||||
integers and floating point numbers, and they do not need to add up to 1.
|
||||
Everything to the left of the `:XX` up to the previous `:XX` is used for
|
||||
merging, so the overall effect is:
|
||||
|
||||
```bash
|
||||
0.25 * "blue sphere" + 0.75 * "white duck" + hybrid
|
||||
```
|
||||
This will tell the sampler to blend 25% of the concept of prompt #1 with 75%
|
||||
of the concept of prompt #2. It is recommended to keep the sum of the weights to around 1.0, but interesting things might happen if you go outside of this range.
|
||||
|
||||
Because you are exploring the "mind" of the AI, the AI's way of mixing two
|
||||
concepts may not match yours, leading to surprising effects. To illustrate, here
|
||||
@@ -236,13 +170,14 @@ are three images generated using various combinations of blend weights. As
|
||||
usual, unless you fix the seed, the prompts will give you different results each
|
||||
time you run them.
|
||||
|
||||
<figure markdown>
|
||||
Let's examine how this affects image generation results:
|
||||
|
||||
### "blue sphere, red cube, hybrid"
|
||||
|
||||
</figure>
|
||||
```bash
|
||||
"blue sphere, red cube, hybrid"
|
||||
```
|
||||
|
||||
This example doesn't use melding at all and represents the default way of mixing
|
||||
This example doesn't use blending at all and represents the default way of mixing
|
||||
concepts.
|
||||
|
||||
<figure markdown>
|
||||
@@ -251,55 +186,47 @@ concepts.
|
||||
|
||||
</figure>
|
||||
|
||||
It's interesting to see how the AI expressed the concept of "cube" as the four
|
||||
quadrants of the enclosing frame. If you look closely, there is depth there, so
|
||||
the enclosing frame is actually a cube.
|
||||
It's interesting to see how the AI expressed the concept of "cube" within the sphere. If you look closely, there is depth there, so the enclosing frame is actually a cube.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||
```bash
|
||||
("blue sphere", "red cube").blend(0.25, 0.75)
|
||||
```
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Now that's interesting. We get neither a blue sphere nor a red cube, but a red
|
||||
sphere embedded in a brick wall, which represents a melding of concepts within
|
||||
the AI's "latent space" of semantic representations. Where is Ludwig
|
||||
Wittgenstein when you need him?
|
||||
Now that's interesting. We get an image with a resemblance of a red cube, with a hint of blue shadows which represents a melding of concepts within the AI's "latent space" of semantic representations.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||
```bash
|
||||
("blue sphere", "red cube").blend(0.75, 0.25)
|
||||
```
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Definitely more blue-spherey. The cube is gone entirely, but it's really cool
|
||||
abstract art.
|
||||
Definitely more blue-spherey.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||
```bash
|
||||
("blue sphere", "red cube").blend(0.5, 0.5)
|
||||
```
|
||||
</figure>
|
||||
|
||||
<figure markdown>
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Whoa...! I see blue and red, but no spheres or cubes. Is the word "hybrid"
|
||||
summoning up the concept of some sort of scifi creature? Let's find out.
|
||||
|
||||
<figure markdown>
|
||||
Whoa...! I see blue and red, and if I squint, spheres and cubes.
|
||||
|
||||
### "blue sphere:0.5 red cube:0.5"
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Indeed, removing the word "hybrid" produces an image that is more like what we'd
|
||||
expect.
|
||||
|
||||
## Dynamic Prompts
|
||||
|
||||
|
||||
@@ -1,12 +1,40 @@
|
||||
---
|
||||
title: The NSFW Checker
|
||||
title: Watermarking, NSFW Image Checking
|
||||
---
|
||||
|
||||
# :material-image-off: NSFW Checker
|
||||
# :material-image-off: Invisible Watermark and the NSFW Checker
|
||||
|
||||
## Watermarking
|
||||
|
||||
InvokeAI does not apply watermarking to images by default. However,
|
||||
many computer scientists working in the field of generative AI worry
|
||||
that a flood of computer-generated imagery will contaminate the image
|
||||
data sets needed to train future generations of generative models.
|
||||
|
||||
InvokeAI offers an optional watermarking mode that writes a small bit
|
||||
of text, **InvokeAI**, into each image that it generates using an
|
||||
"invisible" watermarking library that spreads the information
|
||||
throughout the image in a way that is not perceptible to the human
|
||||
eye. If you are planning to share your generated images on
|
||||
internet-accessible services, we encourage you to activate the
|
||||
invisible watermark mode in order to help preserve the digital image
|
||||
environment.
|
||||
|
||||
The downside of watermarking is that it increases the size of the
|
||||
image moderately, and has been reported by some individuals to degrade
|
||||
image quality. Your mileage may vary.
|
||||
|
||||
To read the watermark in an image, activate the InvokeAI virtual
|
||||
environment (called the "developer's console" in the launcher) and run
|
||||
the command:
|
||||
|
||||
```
|
||||
invisible-watermark -a decode -t bytes -m dwtDct -l 64 /path/to/image.png
|
||||
```
|
||||
|
||||
## The NSFW ("Safety") Checker
|
||||
|
||||
The Stable Diffusion image generation models will produce sexual
|
||||
Stable Diffusion 1.5-based image generation models will produce sexual
|
||||
imagery if deliberately prompted, and will occasionally produce such
|
||||
images when this is not intended. Such images are colloquially known
|
||||
as "Not Safe for Work" (NSFW). This behavior is due to the nature of
|
||||
@@ -18,35 +46,17 @@ jurisdictions it may be illegal to publicly distribute such imagery,
|
||||
including mounting a publicly-available server that provides
|
||||
unfiltered images to the public. Furthermore, the [Stable Diffusion
|
||||
weights
|
||||
License](https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE-ModelWeights.txt)
|
||||
forbids the model from being used to "exploit any of the
|
||||
License](https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE-SD1+SD2.txt),
|
||||
and the [Stable Diffusion XL
|
||||
License][https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE-SDXL.txt]
|
||||
both forbid the models from being used to "exploit any of the
|
||||
vulnerabilities of a specific group of persons."
|
||||
|
||||
For these reasons Stable Diffusion offers a "safety checker," a
|
||||
machine learning model trained to recognize potentially disturbing
|
||||
imagery. When a potentially NSFW image is detected, the checker will
|
||||
blur the image and paste a warning icon on top. The checker can be
|
||||
turned on and off on the command line using `--nsfw_checker` and
|
||||
`--no-nsfw_checker`.
|
||||
|
||||
At installation time, InvokeAI will ask whether the checker should be
|
||||
activated by default (neither argument given on the command line). The
|
||||
response is stored in the InvokeAI initialization file
|
||||
(`invokeai.yaml` in the InvokeAI root directory). You can change the
|
||||
default at any time by opening this file in a text editor and
|
||||
changing the line `nsfw_checker:` from true to false or vice-versa:
|
||||
|
||||
|
||||
```
|
||||
...
|
||||
Features:
|
||||
esrgan: true
|
||||
internet_available: true
|
||||
log_tokenization: false
|
||||
nsfw_checker: true
|
||||
patchmatch: true
|
||||
restore: true
|
||||
```
|
||||
turned on and off in the Web interface under Settings.
|
||||
|
||||
## Caveats
|
||||
|
||||
@@ -84,10 +94,3 @@ are encouraged to turn **off** intermediate image rendering when you
|
||||
are using the checker. Future versions of InvokeAI will apply
|
||||
additional blurring to intermediate images when the checker is active.
|
||||
|
||||
### Watermarking
|
||||
|
||||
InvokeAI does not apply any sort of watermark to images it
|
||||
generates. However, it does write metadata into the PNG data area,
|
||||
including the prompt used to generate the image and relevant parameter
|
||||
settings. These fields can be examined using the `sd-metadata.py`
|
||||
script that comes with the InvokeAI package.
|
||||
@@ -4,6 +4,9 @@ title: Overview
|
||||
|
||||
Here you can find the documentation for InvokeAI's various features.
|
||||
|
||||
## The [Getting Started Guide](../help/gettingStartedWithAI)
|
||||
A getting started guide for those new to AI image generation.
|
||||
|
||||
## The Basics
|
||||
### * The [Web User Interface](WEB.md)
|
||||
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||
@@ -27,10 +30,6 @@ image output.
|
||||
### * [Image-to-Image Guide](IMG2IMG.md)
|
||||
Use a seed image to build new creations in the CLI.
|
||||
|
||||
### * [Generating Variations](VARIATIONS.md)
|
||||
Have an image you like and want to generate many more like it? Variations
|
||||
are the ticket.
|
||||
|
||||
## Model Management
|
||||
|
||||
### * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
@@ -46,7 +45,7 @@ Personalize models by adding your own style or subjects.
|
||||
|
||||
## Other Features
|
||||
|
||||
### * [The NSFW Checker](NSFW.md)
|
||||
### * [The NSFW Checker](WATERMARK+NSFW.md)
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
|
||||
### * [Controlling Logging](LOGGING.md)
|
||||
|
||||
27
docs/help/diffusion.md
Normal file
@@ -0,0 +1,27 @@
|
||||
Taking the time to understand the diffusion process will help you to understand how to more effectively use InvokeAI.
|
||||
|
||||
There are two main ways Stable Diffusion works - with images, and latents.
|
||||
|
||||
Image space represents images in pixel form that you look at. Latent space represents compressed inputs. It’s in latent space that Stable Diffusion processes images. A VAE (Variational Auto Encoder) is responsible for compressing and encoding inputs into latent space, as well as decoding outputs back into image space.
|
||||
|
||||
To fully understand the diffusion process, we need to understand a few more terms: UNet, CLIP, and conditioning.
|
||||
|
||||
A U-Net is a model trained on a large number of latent images with with known amounts of random noise added. This means that the U-Net can be given a slightly noisy image and it will predict the pattern of noise needed to subtract from the image in order to recover the original.
|
||||
|
||||
CLIP is a model that tokenizes and encodes text into conditioning. This conditioning guides the model during the denoising steps to produce a new image.
|
||||
|
||||
The U-Net and CLIP work together during the image generation process at each denoising step, with the U-Net removing noise in such a way that the result is similar to images in the U-Net’s training set, while CLIP guides the U-Net towards creating images that are most similar to the prompt.
|
||||
|
||||
|
||||
When you generate an image using text-to-image, multiple steps occur in latent space:
|
||||
1. Random noise is generated at the chosen height and width. The noise’s characteristics are dictated by seed. This noise tensor is passed into latent space. We’ll call this noise A.
|
||||
2. Using a model’s U-Net, a noise predictor examines noise A, and the words tokenized by CLIP from your prompt (conditioning). It generates its own noise tensor to predict what the final image might look like in latent space. We’ll call this noise B.
|
||||
3. Noise B is subtracted from noise A in an attempt to create a latent image consistent with the prompt. This step is repeated for the number of sampler steps chosen.
|
||||
4. The VAE decodes the final latent image from latent space into image space.
|
||||
|
||||
Image-to-image is a similar process, with only step 1 being different:
|
||||
1. The input image is encoded from image space into latent space by the VAE. Noise is then added to the input latent image. Denoising Strength dictates how may noise steps are added, and the amount of noise added at each step. A Denoising Strength of 0 means there are 0 steps and no noise added, resulting in an unchanged image, while a Denoising Strength of 1 results in the image being completely replaced with noise and a full set of denoising steps are performance. The process is then the same as steps 2-4 in the text-to-image process.
|
||||
|
||||
Furthermore, a model provides the CLIP prompt tokenizer, the VAE, and a U-Net (where noise prediction occurs given a prompt and initial noise tensor).
|
||||
|
||||
A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from the latent image across the sampler steps chosen (step 3 above). Less noise is usually subtracted at higher sampler steps.
|
||||
95
docs/help/gettingStartedWithAI.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Getting Started with AI Image Generation
|
||||
|
||||
New to image generation with AI? You’re in the right place!
|
||||
|
||||
This is a high level walkthrough of some of the concepts and terms you’ll see as you start using InvokeAI. Please note, this is not an exhaustive guide and may be out of date due to the rapidly changing nature of the space.
|
||||
|
||||
## Using InvokeAI
|
||||
|
||||
### **Prompt Crafting**
|
||||
|
||||
- Prompts are the basis of using InvokeAI, providing the models directions on what to generate. As a general rule of thumb, the more detailed your prompt is, the better your result will be.
|
||||
|
||||
*To get started, here’s an easy template to use for structuring your prompts:*
|
||||
|
||||
- Subject, Style, Quality, Aesthetic
|
||||
- **Subject:** What your image will be about. E.g. “a futuristic city with trains”, “penguins floating on icebergs”, “friends sharing beers”
|
||||
- **Style:** The style or medium in which your image will be in. E.g. “photograph”, “pencil sketch”, “oil paints”, or “pop art”, “cubism”, “abstract”
|
||||
- **Quality:** A particular aspect or trait that you would like to see emphasized in your image. E.g. "award-winning", "featured in {relevant set of high quality works}", "professionally acclaimed". Many people often use "masterpiece".
|
||||
- **Aesthetics:** The visual impact and design of the artwork. This can be colors, mood, lighting, setting, etc.
|
||||
- There are two prompt boxes: *Positive Prompt* & *Negative Prompt*.
|
||||
- A **Positive** Prompt includes words you want the model to reference when creating an image.
|
||||
- Negative Prompt is for anything you want the model to eliminate when creating an image. It doesn’t always interpret things exactly the way you would, but helps control the generation process. Always try to include a few terms - you can typically use lower quality image terms like “blurry” or “distorted” with good success.
|
||||
- Some examples prompts you can try on your own:
|
||||
- A detailed oil painting of a tranquil forest at sunset with vibrant+ colors and soft, golden light filtering through the trees
|
||||
- friends sharing beers in a busy city, realistic colored pencil sketch, twilight, masterpiece, bright, lively
|
||||
|
||||
### Generation Workflows
|
||||
|
||||
- Invoke offers a number of different workflows for interacting with models to produce images. Each is extremely powerful on its own, but together provide you an unparalleled way of producing high quality creative outputs that align with your vision.
|
||||
- **Text to Image:** The text to image tab focuses on the key workflow of using a prompt to generate a new image. It includes other features that help control the generation process as well.
|
||||
- **Image to Image:** With image to image, you provide an image as a reference (called the “initial image”), which provides more guidance around color and structure to the AI as it generates a new image. This is provided alongside the same features as Text to Image.
|
||||
- **Unified Canvas:** The Unified Canvas is an advanced AI-first image editing tool that is easy to use, but hard to master. Drag an image onto the canvas from your gallery in order to regenerate certain elements, edit content or colors (known as inpainting), or extend the image with an exceptional degree of consistency and clarity (called outpainting).
|
||||
|
||||
### Improving Image Quality
|
||||
|
||||
- Fine tuning your prompt - the more specific you are, the closer the image will turn out to what is in your head! Adding more details in the Positive Prompt or Negative Prompt can help add / remove pieces of your image to improve it - You can also use advanced techniques like upweighting and downweighting to control the influence of certain words. [Learn more here](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-syntax-features).
|
||||
- **Tip: If you’re seeing poor results, try adding the things you don’t like about the image to your negative prompt may help. E.g. distorted, low quality, unrealistic, etc.**
|
||||
- Explore different models - Other models can produce different results due to the data they’ve been trained on. Each model has specific language and settings it works best with; a model’s documentation is your friend here. Play around with some and see what works best for you!
|
||||
- Increasing Steps - The number of steps used controls how much time the model is given to produce an image, and depends on the “Scheduler” used. The schedule controls how each step is processed by the model. More steps tends to mean better results, but will take longer - We recommend at least 30 steps for most
|
||||
- Tweak and Iterate - Remember, it’s best to change one thing at a time so you know what is working and what isn't. Sometimes you just need to try a new image, and other times using a new prompt might be the ticket. For testing, consider turning off the “random” Seed - Using the same seed with the same settings will produce the same image, which makes it the perfect way to learn exactly what your changes are doing.
|
||||
- Explore Advanced Settings - InvokeAI has a full suite of tools available to allow you complete control over your image creation process - Check out our [docs if you want to learn more](https://invoke-ai.github.io/InvokeAI/features/).
|
||||
|
||||
|
||||
## Terms & Concepts
|
||||
|
||||
If you're interested in learning more, check out [this presentation](https://docs.google.com/presentation/d/1IO78i8oEXFTZ5peuHHYkVF-Y3e2M6iM5tCnc-YBfcCM/edit?usp=sharing) from one of our maintainers (@lstein).
|
||||
|
||||
### Stable Diffusion
|
||||
|
||||
Stable Diffusion is deep learning, text-to-image model that is the foundation of the capabilities found in InvokeAI. Since the release of Stable Diffusion, there have been many subsequent models created based on Stable Diffusion that are designed to generate specific types of images.
|
||||
|
||||
### Prompts
|
||||
|
||||
Prompts provide the models directions on what to generate. As a general rule of thumb, the more detailed your prompt is, the better your result will be.
|
||||
|
||||
### Models
|
||||
|
||||
Models are the magic that power InvokeAI. These files represent the output of training a machine on understanding massive amounts of images - providing them with the capability to generate new images using just a text description of what you’d like to see. (Like Stable Diffusion!)
|
||||
|
||||
Invoke offers a simple way to download several different models upon installation, but many more can be discovered online, including at ****. Each model can produce a unique style of output, based on the images it was trained on - Try out different models to see which best fits your creative vision!
|
||||
|
||||
- *Models that contain “inpainting” in the name are designed for use with the inpainting feature of the Unified Canvas*
|
||||
|
||||
### Scheduler
|
||||
|
||||
Schedulers guide the process of removing noise (de-noising) from data. They determine:
|
||||
|
||||
1. The number of steps to take to remove the noise.
|
||||
2. Whether the steps are random (stochastic) or predictable (deterministic).
|
||||
3. The specific method (algorithm) used for de-noising.
|
||||
|
||||
Experimenting with different schedulers is recommended as each will produce different outputs!
|
||||
|
||||
### Steps
|
||||
|
||||
The number of de-noising steps each generation through.
|
||||
|
||||
Schedulers can be intricate and there's often a balance to strike between how quickly they can de-noise data and how well they can do it. It's typically advised to experiment with different schedulers to see which one gives the best results. There has been a lot written on the internet about different schedulers, as well as exploring what the right level of "steps" are for each. You can save generation time by reducing the number of steps used, but you'll want to make sure that you are satisfied with the quality of images produced!
|
||||
|
||||
### Low-Rank Adaptations / LoRAs
|
||||
|
||||
Low-Rank Adaptations (LoRAs) are like a smaller, more focused version of models, intended to focus on training a better understanding of how a specific character, style, or concept looks.
|
||||
|
||||
### Textual Inversion Embeddings
|
||||
|
||||
Textual Inversion Embeddings, like LoRAs, assist with more easily prompting for certain characters, styles, or concepts. However, embeddings are trained to update the relationship between a specific word (known as the “trigger”) and the intended output.
|
||||
|
||||
### ControlNet
|
||||
|
||||
ControlNets are neural network models that are able to extract key features from an existing image and use these features to guide the output of the image generation model.
|
||||
|
||||
### VAE
|
||||
|
||||
Variational auto-encoder (VAE) is a encode/decode model that translates the "latents" image produced during the image generation procees to the large pixel images that we see.
|
||||
|
||||
100
docs/index.md
@@ -11,6 +11,33 @@ title: Home
|
||||
```
|
||||
-->
|
||||
|
||||
<!-- CSS styling -->
|
||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free@6.2.1/css/fontawesome.min.css">
|
||||
<style>
|
||||
.button {
|
||||
width: 300px;
|
||||
height: 50px;
|
||||
background-color: #448AFF;
|
||||
color: #fff;
|
||||
font-size: 16px;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
border-radius: 0.2rem;
|
||||
}
|
||||
|
||||
.button-container {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, 300px);
|
||||
gap: 20px;
|
||||
}
|
||||
|
||||
.button:hover {
|
||||
background-color: #526CFE;
|
||||
}
|
||||
</style>
|
||||
|
||||
|
||||
|
||||
<div align="center" markdown>
|
||||
|
||||
|
||||
@@ -22,9 +49,9 @@ title: Home
|
||||
[![github stars badge]][github stars link]
|
||||
[![github forks badge]][github forks link]
|
||||
|
||||
[![CI checks on main badge]][ci checks on main link]
|
||||
<!-- [![CI checks on main badge]][ci checks on main link]
|
||||
[![CI checks on dev badge]][ci checks on dev link]
|
||||
[![latest commit to dev badge]][latest commit to dev link]
|
||||
[![latest commit to dev badge]][latest commit to dev link] -->
|
||||
|
||||
[![github open issues badge]][github open issues link]
|
||||
[![github open prs badge]][github open prs link]
|
||||
@@ -54,10 +81,10 @@ title: Home
|
||||
[github stars badge]:
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||
[latest commit to dev badge]:
|
||||
<!-- [latest commit to dev badge]:
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
https://github.com/invoke-ai/InvokeAI/commits/main -->
|
||||
[latest release badge]:
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
@@ -70,61 +97,24 @@ image-to-image generator. It provides a streamlined process with various new
|
||||
features and options to aid the image generation process. It runs on Windows,
|
||||
Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
||||
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>]
|
||||
[<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a
|
||||
href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a
|
||||
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas &
|
||||
Q&A</a>]
|
||||
|
||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
!!! note
|
||||
!!! Note
|
||||
|
||||
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||
This project is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates as it will help aid response time.
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
## :octicons-link-24: Quick Links
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
<div class="button-container">
|
||||
<a href="installation/INSTALLATION"> <button class="button">Installation</button> </a>
|
||||
<a href="features/"> <button class="button">Features</button> </a>
|
||||
<a href="help/gettingStartedWithAI/"> <button class="button">Getting Started</button> </a>
|
||||
<a href="contributing/CONTRIBUTING/"> <button class="button">Contributing</button> </a>
|
||||
<a href="https://github.com/invoke-ai/InvokeAI/"> <button class="button">Code and Downloads</button> </a>
|
||||
<a href="https://github.com/invoke-ai/InvokeAI/issues"> <button class="button">Bug Reports </button> </a>
|
||||
<a href="https://discord.gg/ZmtBAhwWhy"> <button class="button"> Join the Discord Server!</button> </a>
|
||||
</div>
|
||||
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with their
|
||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||
images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory and Disk
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
This method is recommended for 1st time users
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
@@ -147,7 +137,7 @@ This method is recommended for those familiar with running Docker containers
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [ControlNet Models](features/CONTROLNET.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
- [Watermarking and the Not Safe for Work (NSFW) Checker](features/WATERMARK+NSFW.md)
|
||||
<!-- seperator -->
|
||||
### Prompt Engineering
|
||||
- [Prompt Syntax](features/PROMPTS.md)
|
||||
@@ -229,7 +219,7 @@ encouraged to do so.
|
||||
|
||||
## :octicons-person-24: Contributors
|
||||
|
||||
This fork is a combined effort of various people from across the world.
|
||||
This software is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](other/CONTRIBUTORS.md). We
|
||||
thank them for their time, hard work and effort.
|
||||
|
||||
|
||||
@@ -40,10 +40,8 @@ experimental versions later.
|
||||
this, open up a command-line window ("Terminal" on Linux and
|
||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||
--version`. If Python is installed, it will print out the version
|
||||
number. If it is version `3.9.*` or `3.10.*`, you meet
|
||||
requirements. We do not recommend using Python 3.11 or higher,
|
||||
as not all the libraries that InvokeAI depends on work properly
|
||||
with this version.
|
||||
number. If it is version `3.9.*`, `3.10.*` or `3.11.*` you meet
|
||||
requirements.
|
||||
|
||||
!!! warning "What to do if you have an unsupported version"
|
||||
|
||||
@@ -124,9 +122,9 @@ experimental versions later.
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
|
||||
and look for a file named:
|
||||
|
||||
- InvokeAI-installer-v2.X.X.zip
|
||||
- InvokeAI-installer-v3.X.X.zip
|
||||
|
||||
where "2.X.X" is the latest released version. The file is located
|
||||
where "3.X.X" is the latest released version. The file is located
|
||||
at the very bottom of the release page, under **Assets**.
|
||||
|
||||
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
|
||||
@@ -215,17 +213,6 @@ experimental versions later.
|
||||
Generally the defaults are fine, and you can come back to this screen at
|
||||
any time to tweak your system. Here are the options you can adjust:
|
||||
|
||||
- ***Output directory for images***
|
||||
This is the path to a directory in which InvokeAI will store all its
|
||||
generated images.
|
||||
|
||||
- ***NSFW checker***
|
||||
If checked, InvokeAI will test images for potential sexual content
|
||||
and blur them out if found. Note that the NSFW checker consumes
|
||||
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
|
||||
by most image models. If you have a low VRAM GPU (4-6 GB), you
|
||||
can reduce out of memory errors by disabling the checker.
|
||||
|
||||
- ***HuggingFace Access Token***
|
||||
InvokeAI has the ability to download embedded styles and subjects
|
||||
from the HuggingFace Concept Library on-demand. However, some of
|
||||
@@ -257,20 +244,30 @@ experimental versions later.
|
||||
and graphics cards. The "autocast" option is deprecated and
|
||||
shouldn't be used unless you are asked to by a member of the team.
|
||||
|
||||
- ***Number of models to cache in CPU memory***
|
||||
- **Size of the RAM cache used for fast model switching***
|
||||
This allows you to keep models in memory and switch rapidly among
|
||||
them rather than having them load from disk each time. This slider
|
||||
controls how many models to keep loaded at once. Each
|
||||
model will use 2-4 GB of RAM, so use this cautiously
|
||||
controls how many models to keep loaded at once. A typical SD-1 or SD-2 model
|
||||
uses 2-3 GB of memory. A typical SDXL model uses 6-7 GB. Providing more
|
||||
RAM will allow more models to be co-resident.
|
||||
|
||||
- ***Directory containing embedding/textual inversion files***
|
||||
This is the directory in which you can place custom embedding
|
||||
files (.pt or .bin). During startup, this directory will be
|
||||
scanned and InvokeAI will print out the text terms that
|
||||
are available to trigger the embeddings.
|
||||
- ***Output directory for images***
|
||||
This is the path to a directory in which InvokeAI will store all its
|
||||
generated images.
|
||||
|
||||
- ***Autoimport Folder***
|
||||
This is the directory in which you can place models you have
|
||||
downloaded and wish to load into InvokeAI. You can place a variety
|
||||
of models in this directory, including diffusers folders, .ckpt files,
|
||||
.safetensors files, as well as LoRAs, ControlNet and Textual Inversion
|
||||
files (both folder and file versions). To help organize this folder,
|
||||
you can create several levels of subfolders and drop your models into
|
||||
whichever ones you want.
|
||||
|
||||
- ***LICENSE***
|
||||
|
||||
At the bottom of the screen you will see a checkbox for accepting
|
||||
the CreativeML Responsible AI License. You need to accept the license
|
||||
the CreativeML Responsible AI Licenses. You need to accept the license
|
||||
in order to download Stable Diffusion models from the next screen.
|
||||
|
||||
_You can come back to the startup options form_ as many times as you like.
|
||||
@@ -375,8 +372,71 @@ experimental versions later.
|
||||
Once InvokeAI is installed, do not move or remove this directory."
|
||||
|
||||
|
||||
<a name="troubleshooting"></a>
|
||||
## Troubleshooting
|
||||
|
||||
### _OSErrors on Windows while installing dependencies_
|
||||
|
||||
During a zip file installation or an online update, installation stops
|
||||
with an error like this:
|
||||
|
||||
{:width="800px"}
|
||||
|
||||
This seems to happen particularly often with the `pydantic` and
|
||||
`numpy` packages. The most reliable solution requires several manual
|
||||
steps to complete installation.
|
||||
|
||||
Open up a Powershell window and navigate to the `invokeai` directory
|
||||
created by the installer. Then give the following series of commands:
|
||||
|
||||
```cmd
|
||||
rm .\.venv -r -force
|
||||
python -mvenv .venv
|
||||
.\.venv\Scripts\activate
|
||||
pip install invokeai
|
||||
invokeai-configure --yes --root .
|
||||
```
|
||||
|
||||
If you see anything marked as an error during this process please stop
|
||||
and seek help on the Discord [installation support
|
||||
channel](https://discord.com/channels/1020123559063990373/1041391462190956654). A
|
||||
few warning messages are OK.
|
||||
|
||||
If you are updating from a previous version, this should restore your
|
||||
system to a working state. If you are installing from scratch, there
|
||||
is one additional command to give:
|
||||
|
||||
```cmd
|
||||
wget -O invoke.bat https://raw.githubusercontent.com/invoke-ai/InvokeAI/main/installer/templates/invoke.bat.in
|
||||
```
|
||||
|
||||
This will create the `invoke.bat` script needed to launch InvokeAI and
|
||||
its related programs.
|
||||
|
||||
|
||||
### _Stable Diffusion XL Generation Fails after Trying to Load unet_
|
||||
|
||||
InvokeAI is working in other respects, but when trying to generate
|
||||
images with Stable Diffusion XL you get a "Server Error". The text log
|
||||
in the launch window contains this log line above several more lines of
|
||||
error messages:
|
||||
|
||||
```INFO --> Loading model:D:\LONG\PATH\TO\MODEL, type sdxl:main:unet```
|
||||
|
||||
This failure mode occurs when there is a network glitch during
|
||||
downloading the very large SDXL model.
|
||||
|
||||
To address this, first go to the Web Model Manager and delete the
|
||||
Stable-Diffusion-XL-base-1.X model. Then navigate to HuggingFace and
|
||||
manually download the .safetensors version of the model. The 1.0
|
||||
version is located at
|
||||
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main
|
||||
and the file is named `sd_xl_base_1.0.safetensors`.
|
||||
|
||||
Save this file to disk and then reenter the Model Manager. Navigate to
|
||||
Import Models->Add Model, then type (or drag-and-drop) the path to the
|
||||
.safetensors file. Press "Add Model".
|
||||
|
||||
### _Package dependency conflicts_
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion
|
||||
@@ -411,7 +471,7 @@ Then type the following commands:
|
||||
|
||||
=== "NVIDIA System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu118
|
||||
pip install xformers
|
||||
```
|
||||
|
||||
|
||||
@@ -8,9 +8,9 @@ title: Installing Manually
|
||||
|
||||
</figure>
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
!!! warning "This is for Advanced Users"
|
||||
|
||||
**python experience is mandatory**
|
||||
**Python experience is mandatory**
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -32,7 +32,7 @@ gaming):
|
||||
|
||||
* **Python**
|
||||
|
||||
version 3.9 or 3.10 (3.11 is not recommended).
|
||||
version 3.9 through 3.11
|
||||
|
||||
* **CUDA Tools**
|
||||
|
||||
@@ -65,7 +65,7 @@ gaming):
|
||||
To install InvokeAI with virtual environments and the PIP package
|
||||
manager, please follow these steps:
|
||||
|
||||
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
|
||||
1. Please make sure you are using Python 3.9 through 3.11. The rest of the install
|
||||
procedure depends on this and will not work with other versions:
|
||||
|
||||
```bash
|
||||
@@ -148,7 +148,7 @@ manager, please follow these steps:
|
||||
=== "CUDA (NVidia)"
|
||||
|
||||
```bash
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
||||
```
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
@@ -192,8 +192,10 @@ manager, please follow these steps:
|
||||
your outputs.
|
||||
|
||||
```terminal
|
||||
invokeai-configure
|
||||
invokeai-configure --root .
|
||||
```
|
||||
|
||||
Don't miss the dot at the end of the command!
|
||||
|
||||
The script `invokeai-configure` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
@@ -225,12 +227,6 @@ manager, please follow these steps:
|
||||
|
||||
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invokeai
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
@@ -243,6 +239,12 @@ manager, please follow these steps:
|
||||
invokeai --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invokeai
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
http://localhost:9090 in order to load the GUI.
|
||||
|
||||
@@ -310,7 +312,7 @@ installation protocol (important!)
|
||||
|
||||
=== "CUDA (NVidia)"
|
||||
```bash
|
||||
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
||||
```
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
@@ -354,7 +356,7 @@ you can do so using this unsupported recipe:
|
||||
mkdir ~/invokeai
|
||||
conda create -n invokeai python=3.10
|
||||
conda activate invokeai
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
||||
invokeai-configure --root ~/invokeai
|
||||
invokeai --root ~/invokeai --web
|
||||
```
|
||||
|
||||
@@ -34,11 +34,11 @@ directly from NVIDIA. **Do not try to install Ubuntu's
|
||||
nvidia-cuda-toolkit package. It is out of date and will cause
|
||||
conflicts among the NVIDIA driver and binaries.**
|
||||
|
||||
Go to [CUDA Toolkit 11.7
|
||||
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive),
|
||||
and use the target selection wizard to choose your operating system,
|
||||
hardware platform, and preferred installation method (e.g. "local"
|
||||
versus "network").
|
||||
Go to [CUDA Toolkit
|
||||
Downloads](https://developer.nvidia.com/cuda-downloads), and use the
|
||||
target selection wizard to choose your operating system, hardware
|
||||
platform, and preferred installation method (e.g. "local" versus
|
||||
"network").
|
||||
|
||||
This will provide you with a downloadable install file or, depending
|
||||
on your choices, a recipe for downloading and running a install shell
|
||||
@@ -57,11 +57,35 @@ familiar with containerization technologies such as Docker.
|
||||
For downloads and instructions, visit the [NVIDIA CUDA Container
|
||||
Runtime Site](https://developer.nvidia.com/nvidia-container-runtime)
|
||||
|
||||
### cuDNN Installation for 40/30 Series Optimization* (Optional)
|
||||
|
||||
1. Find the InvokeAI folder
|
||||
2. Click on .venv folder - e.g., YourInvokeFolderHere\\.venv
|
||||
3. Click on Lib folder - e.g., YourInvokeFolderHere\\.venv\Lib
|
||||
4. Click on site-packages folder - e.g., YourInvokeFolderHere\\.venv\Lib\site-packages
|
||||
5. Click on Torch directory - e.g., YourInvokeFolderHere\InvokeAI\\.venv\Lib\site-packages\torch
|
||||
6. Click on the lib folder - e.g., YourInvokeFolderHere\\.venv\Lib\site-packages\torch\lib
|
||||
7. Copy everything inside the folder and save it elsewhere as a backup.
|
||||
8. Go to __https://developer.nvidia.com/cudnn__
|
||||
9. Login or create an Account.
|
||||
10. Choose the newer version of cuDNN. **Note:**
|
||||
There are two versions, 11.x or 12.x for the differents architectures(Turing,Maxwell Etc...) of GPUs.
|
||||
You can find which version you should download from [this link](https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html).
|
||||
13. Download the latest version and extract it from the download location
|
||||
14. Find the bin folder E\cudnn-windows-x86_64-__Whatever Version__\bin
|
||||
15. Copy and paste the .dll files into YourInvokeFolderHere\\.venv\Lib\site-packages\torch\lib **Make sure to copy, and not move the files**
|
||||
16. If prompted, replace any existing files
|
||||
|
||||
**Notes:**
|
||||
* If no change is seen or any issues are encountered, follow the same steps as above and paste the torch/lib backup folder you made earlier and replace it. If you didn't make a backup, you can also uninstall and reinstall torch through the command line to repair this folder.
|
||||
* This optimization is intended for the newer version of graphics card (40/30 series) but results have been seen with older graphics card.
|
||||
|
||||
|
||||
### Torch Installation
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/cu117` as described in the [Manual
|
||||
https://download.pytorch.org/whl/cu118` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
## :simple-amd: ROCm
|
||||
|
||||
@@ -4,9 +4,9 @@ title: Installing with Docker
|
||||
|
||||
# :fontawesome-brands-docker: Docker
|
||||
|
||||
!!! warning "For end users"
|
||||
!!! warning "For most users"
|
||||
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](INSTALLATION.md)
|
||||
|
||||
!!! tip "For developers"
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ installation. Examples:
|
||||
invokeai-model-install --list controlnet
|
||||
|
||||
# (install the model at the indicated URL)
|
||||
invokeai-model-install --add http://civitai.com/2860
|
||||
invokeai-model-install --add https://civitai.com/api/download/models/128713
|
||||
|
||||
# (delete the named model)
|
||||
invokeai-model-install --delete sd-1/main/analog-diffusion
|
||||
@@ -170,4 +170,4 @@ elsewhere on disk and they will be autoimported. You can also create
|
||||
subfolders and organize them as you wish.
|
||||
|
||||
The location of the autoimport directories are controlled by settings
|
||||
in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md).
|
||||
in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md).
|
||||
|
||||
@@ -28,18 +28,21 @@ command line, then just be sure to activate it's virtual environment.
|
||||
Then run the following three commands:
|
||||
|
||||
```sh
|
||||
pip install xformers==0.0.16rc425
|
||||
pip install triton
|
||||
pip install xformers~=0.0.19
|
||||
pip install triton # WON'T WORK ON WINDOWS
|
||||
python -m xformers.info output
|
||||
```
|
||||
|
||||
The first command installs `xformers`, the second installs the
|
||||
`triton` training accelerator, and the third prints out the `xformers`
|
||||
installation status. If all goes well, you'll see a report like the
|
||||
installation status. On Windows, please omit the `triton` package,
|
||||
which is not available on that platform.
|
||||
|
||||
If all goes well, you'll see a report like the
|
||||
following:
|
||||
|
||||
```sh
|
||||
xFormers 0.0.16rc425
|
||||
xFormers 0.0.20
|
||||
memory_efficient_attention.cutlassF: available
|
||||
memory_efficient_attention.cutlassB: available
|
||||
memory_efficient_attention.flshattF: available
|
||||
@@ -48,22 +51,28 @@ memory_efficient_attention.smallkF: available
|
||||
memory_efficient_attention.smallkB: available
|
||||
memory_efficient_attention.tritonflashattF: available
|
||||
memory_efficient_attention.tritonflashattB: available
|
||||
indexing.scaled_index_addF: available
|
||||
indexing.scaled_index_addB: available
|
||||
indexing.index_select: available
|
||||
swiglu.dual_gemm_silu: available
|
||||
swiglu.gemm_fused_operand_sum: available
|
||||
swiglu.fused.p.cpp: available
|
||||
is_triton_available: True
|
||||
is_functorch_available: False
|
||||
pytorch.version: 1.13.1+cu117
|
||||
pytorch.version: 2.0.1+cu118
|
||||
pytorch.cuda: available
|
||||
gpu.compute_capability: 8.6
|
||||
gpu.name: NVIDIA RTX A2000 12GB
|
||||
gpu.compute_capability: 8.9
|
||||
gpu.name: NVIDIA GeForce RTX 4070
|
||||
build.info: available
|
||||
build.cuda_version: 1107
|
||||
build.python_version: 3.10.9
|
||||
build.torch_version: 1.13.1+cu117
|
||||
build.cuda_version: 1108
|
||||
build.python_version: 3.10.11
|
||||
build.torch_version: 2.0.1+cu118
|
||||
build.env.TORCH_CUDA_ARCH_LIST: 5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6
|
||||
build.env.XFORMERS_BUILD_TYPE: Release
|
||||
build.env.XFORMERS_ENABLE_DEBUG_ASSERTIONS: None
|
||||
build.env.NVCC_FLAGS: None
|
||||
build.env.XFORMERS_PACKAGE_FROM: wheel-v0.0.16rc425
|
||||
build.env.XFORMERS_PACKAGE_FROM: wheel-v0.0.20
|
||||
build.nvcc_version: 11.8.89
|
||||
source.privacy: open source
|
||||
```
|
||||
|
||||
@@ -83,14 +92,14 @@ installed from source. These instructions were written for a system
|
||||
running Ubuntu 22.04, but other Linux distributions should be able to
|
||||
adapt this recipe.
|
||||
|
||||
#### 1. Install CUDA Toolkit 11.7
|
||||
#### 1. Install CUDA Toolkit 11.8
|
||||
|
||||
You will need the CUDA developer's toolkit in order to compile and
|
||||
install xFormers. **Do not try to install Ubuntu's nvidia-cuda-toolkit
|
||||
package.** It is out of date and will cause conflicts among the NVIDIA
|
||||
driver and binaries. Instead install the CUDA Toolkit package provided
|
||||
by NVIDIA itself. Go to [CUDA Toolkit 11.7
|
||||
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive)
|
||||
by NVIDIA itself. Go to [CUDA Toolkit 11.8
|
||||
Downloads](https://developer.nvidia.com/cuda-11-8-0-download-archive)
|
||||
and use the target selection wizard to choose your platform and Linux
|
||||
distribution. Select an installer type of "runfile (local)" at the
|
||||
last step.
|
||||
@@ -101,17 +110,17 @@ example, the install script recipe for Ubuntu 22.04 running on a
|
||||
x86_64 system is:
|
||||
|
||||
```
|
||||
wget https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_515.43.04_linux.run
|
||||
sudo sh cuda_11.7.0_515.43.04_linux.run
|
||||
wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
|
||||
sudo sh cuda_11.8.0_520.61.05_linux.run
|
||||
```
|
||||
|
||||
Rather than cut-and-paste this example, We recommend that you walk
|
||||
through the toolkit wizard in order to get the most up to date
|
||||
installer for your system.
|
||||
|
||||
#### 2. Confirm/Install pyTorch 1.13 with CUDA 11.7 support
|
||||
#### 2. Confirm/Install pyTorch 2.01 with CUDA 11.8 support
|
||||
|
||||
If you are using InvokeAI 2.3 or higher, these will already be
|
||||
If you are using InvokeAI 3.0.2 or higher, these will already be
|
||||
installed. If not, you can check whether you have the needed libraries
|
||||
using a quick command. Activate the invokeai virtual environment,
|
||||
either by entering the "developer's console", or manually with a
|
||||
@@ -124,7 +133,7 @@ Then run the command:
|
||||
python -c 'exec("import torch\nprint(torch.__version__)")'
|
||||
```
|
||||
|
||||
If it prints __1.13.1+cu117__ you're good. If not, you can install the
|
||||
If it prints __1.13.1+cu118__ you're good. If not, you can install the
|
||||
most up to date libraries with this command:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
---
|
||||
title: Overview
|
||||
---
|
||||
# Overview
|
||||
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences. We suggest that everyone start by
|
||||
@@ -15,7 +13,57 @@ See the [troubleshooting
|
||||
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
|
||||
install guide for frequently-encountered installation issues.
|
||||
|
||||
## Main Application
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### **[Automated Installer](010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
#### [Manual Installation](020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](050_INSTALLING_MODELS.md)
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
** SDXL 1.0 Requirements*
|
||||
To use SDXL, user must have one of the following:
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 8 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 16 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
|
||||
### :fontawesome-solid-memory: Memory and Disk
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with their
|
||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||
images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
## Installation options
|
||||
|
||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||
|
||||
@@ -24,6 +72,9 @@ install guide for frequently-encountered installation issues.
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
|
||||
|
||||
✅ This is the recommended option for first time users.
|
||||
|
||||
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
7
docs/javascripts/tablesort.js
Normal file
@@ -0,0 +1,7 @@
|
||||
document$.subscribe(function() {
|
||||
var tables = document.querySelectorAll("article table:not([class])")
|
||||
tables.forEach(function(table) {
|
||||
new Tablesort(table)
|
||||
})
|
||||
})
|
||||
|
||||
68
docs/nodes/NODES.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Using the Node Editor
|
||||
|
||||
The nodes editor is a blank canvas allowing for the use of individual functions and image transformations to control the image generation workflow. Nodes take in inputs on the left side of the node, and return an output on the right side of the node. A node graph is composed of multiple nodes that are connected together to create a workflow. Nodes' inputs and outputs are connected by dragging connectors from node to node. Inputs and outputs are color coded for ease of use.
|
||||
|
||||
To better understand how nodes are used, think of how an electric power bar works. It takes in one input (electricity from a wall outlet) and passes it to multiple devices through multiple outputs. Similarly, a node could have multiple inputs and outputs functioning at the same (or different) time, but all node outputs pass information onward like a power bar passes electricity. Not all outputs are compatible with all inputs, however - Each node has different constraints on how it is expecting to input/output information. In general, node outputs are colour-coded to match compatible inputs of other nodes.
|
||||
|
||||
|
||||
If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](../help/diffusion.md) Understanding how diffusion works will enable you to more easily use the Nodes Editor and build workflows to suit your needs.
|
||||
|
||||
## Important Concepts
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
### Noise
|
||||
|
||||
An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input.
|
||||
|
||||

|
||||
|
||||
### Text Prompt Conditioning
|
||||
|
||||
Conditioning is necessary for the latent diffusion process, whether empty or not. As a result, the Denoising node requires positive and negative conditioning inputs. Conditioning is reliant on a CLIP text encoder provided by the Model Loader node.
|
||||
|
||||

|
||||
|
||||
### Image to Latents & VAE
|
||||
|
||||
The ImageToLatents node takes in a pixel image and a VAE and outputs a latents. The LatentsToImage node does the opposite, taking in a latents and a VAE and outpus a pixel image.
|
||||
|
||||

|
||||
|
||||
### Defined & Random Seeds
|
||||
|
||||
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||
|
||||

|
||||
|
||||
### ControlNet
|
||||
|
||||
The ControlNet node outputs a Control, which can be provided as input to non-image *ToLatents nodes. Depending on the type of ControlNet desired, ControlNet nodes usually require an image processor node, such as a Canny Processor or Depth Processor, which prepares an input image for use with ControlNet.
|
||||
|
||||

|
||||
|
||||
### LoRA
|
||||
|
||||
The Lora Loader node lets you load a LoRA and pass it as output.A LoRA provides fine-tunes to the UNet and text encoder weights that augment the base model’s image and text vocabularies.
|
||||
|
||||

|
||||
|
||||
### Scaling
|
||||
|
||||
Use the ImageScale, ScaleLatents, and Upscale nodes to upscale images and/or latent images. Upscaling is the process of enlarging an image and adding more detail. The chosen method differs across contexts. However, be aware that latents are already noisy and compressed at their original resolution; scaling an image could produce more detailed results.
|
||||
|
||||

|
||||
|
||||
### Iteration + Multiple Images as Input
|
||||
|
||||
Iteration is a common concept in any processing, and means to repeat a process with given input. In nodes, you're able to use the Iterate node to iterate through collections usually gathered by the Collect node. The Iterate node has many potential uses, from processing a collection of images one after another, to varying seeds across multiple image generations and more. This screenshot demonstrates how to collect several images and use them in an image generation workflow.
|
||||
|
||||

|
||||
|
||||
### Multiple Image Generation + Random Seeds
|
||||
|
||||
Multiple image generation in the node editor is done using the RandomRange node. In this case, the 'Size' field represents the number of images to generate. As RandomRange produces a collection of integers, we need to add the Iterate node to iterate through the collection.
|
||||
|
||||
To control seeds across generations takes some care. The first row in the screenshot will generate multiple images with different seeds, but using the same RandomRange parameters across invocations will result in the same group of random seeds being used across the images, producing repeatable results. In the second row, adding the RandomInt node as input to RandomRange's 'Seed' edge point will ensure that seeds are varied across all images across invocations, producing varied results.
|
||||
|
||||

|
||||
80
docs/nodes/comfyToInvoke.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# ComfyUI to InvokeAI
|
||||
|
||||
If you're coming to InvokeAI from ComfyUI, welcome! You'll find things are similar but different - the good news is that you already know how things should work, and it's just a matter of wiring them up!
|
||||
|
||||
Some things to note:
|
||||
|
||||
- InvokeAI's nodes tend to be more granular than default nodes in Comfy. This means each node in Invoke will do a specific task and you might need to use multiple nodes to achieve the same result. The added granularity improves the control you have have over your workflows.
|
||||
- InvokeAI's backend and ComfyUI's backend are very different which means Comfy workflows are not able to be imported into InvokeAI. However, we have created a [list of popular workflows](exampleWorkflows.md) for you to get started with Nodes in InvokeAI!
|
||||
|
||||
## Node Equivalents:
|
||||
|
||||
| Comfy UI Category | ComfyUI Node | Invoke Equivalent |
|
||||
|:---------------------------------- |:---------------------------------- | :----------------------------------|
|
||||
| Sampling |KSampler |Denoise Latents|
|
||||
| Sampling |Ksampler Advanced|Denoise Latents |
|
||||
| Loaders |Load Checkpoint | Main Model Loader _or_ SDXL Main Model Loader|
|
||||
| Loaders |Load VAE | VAE Loader |
|
||||
| Loaders |Load Lora | LoRA Loader _or_ SDXL Lora Loader|
|
||||
| Loaders |Load ControlNet Model | ControlNet|
|
||||
| Loaders |Load ControlNet Model (diff) | ControlNet|
|
||||
| Loaders |Load Style Model | Reference Only ControlNet will be coming in a future version of InvokeAI|
|
||||
| Loaders |unCLIPCheckpointLoader | N/A |
|
||||
| Loaders |GLIGENLoader | N/A |
|
||||
| Loaders |Hypernetwork Loader | N/A |
|
||||
| Loaders |Load Upscale Model | Occurs within "Upscale (RealESRGAN)"|
|
||||
|Conditioning |CLIP Text Encode (Prompt) | Compel (Prompt) or SDXL Compel (Prompt) |
|
||||
|Conditioning |CLIP Set Last Layer | CLIP Skip|
|
||||
|Conditioning |Conditioning (Average) | Use the .blend() feature of prompts |
|
||||
|Conditioning |Conditioning (Combine) | N/A |
|
||||
|Conditioning |Conditioning (Concat) | See the Prompt Tools Community Node|
|
||||
|Conditioning |Conditioning (Set Area) | N/A |
|
||||
|Conditioning |Conditioning (Set Mask) | Mask Edge |
|
||||
|Conditioning |CLIP Vision Encode | N/A |
|
||||
|Conditioning |unCLIPConditioning | N/A |
|
||||
|Conditioning |Apply ControlNet | ControlNet |
|
||||
|Conditioning |Apply ControlNet (Advanced) | ControlNet |
|
||||
|Latent |VAE Decode | Latents to Image|
|
||||
|Latent |VAE Encode | Image to Latents |
|
||||
|Latent |Empty Latent Image | Noise |
|
||||
|Latent |Upscale Latent |Resize Latents |
|
||||
|Latent |Upscale Latent By |Scale Latents |
|
||||
|Latent |Latent Composite | Blend Latents |
|
||||
|Latent |LatentCompositeMasked | N/A |
|
||||
|Image |Save Image | Image |
|
||||
|Image |Preview Image |Current |
|
||||
|Image |Load Image | Image|
|
||||
|Image |Empty Image| Blank Image |
|
||||
|Image |Invert Image | Invert Lerp Image |
|
||||
|Image |Batch Images | Link "Image" nodes into an "Image Collection" node |
|
||||
|Image |Pad Image for Outpainting | Outpainting is easily accomplished in the Unified Canvas |
|
||||
|Image |ImageCompositeMasked | Paste Image |
|
||||
|Image | Upscale Image | Resize Image |
|
||||
|Image | Upscale Image By | Upscale Image |
|
||||
|Image | Upscale Image (using Model) | Upscale Image |
|
||||
|Image | ImageBlur | Blur Image |
|
||||
|Image | ImageQuantize | N/A |
|
||||
|Image | ImageSharpen | N/A |
|
||||
|Image | Canny | Canny Processor |
|
||||
|Mask |Load Image (as Mask) | Image |
|
||||
|Mask |Convert Mask to Image | Image|
|
||||
|Mask |Convert Image to Mask | Image |
|
||||
|Mask |SolidMask | N/A |
|
||||
|Mask |InvertMask |Invert Lerp Image |
|
||||
|Mask |CropMask | Crop Image |
|
||||
|Mask |MaskComposite | Combine Mask |
|
||||
|Mask |FeatherMask | Blur Image |
|
||||
|Advanced | Load CLIP | Main Model Loader _or_ SDXL Main Model Loader|
|
||||
|Advanced | UNETLoader | Main Model Loader _or_ SDXL Main Model Loader|
|
||||
|Advanced | DualCLIPLoader | Main Model Loader _or_ SDXL Main Model Loader|
|
||||
|Advanced | Load Checkpoint | Main Model Loader _or_ SDXL Main Model Loader |
|
||||
|Advanced | ConditioningZeroOut | N/A |
|
||||
|Advanced | ConditioningSetTimestepRange | N/A |
|
||||
|Advanced | CLIPTextEncodeSDXLRefiner | Compel (Prompt) or SDXL Compel (Prompt) |
|
||||
|Advanced | CLIPTextEncodeSDXL |Compel (Prompt) or SDXL Compel (Prompt) |
|
||||
|Advanced | ModelMergeSimple | Model Merging is available in the Model Manager |
|
||||
|Advanced | ModelMergeBlocks | Model Merging is available in the Model Manager|
|
||||
|Advanced | CheckpointSave | Model saving is available in the Model Manager|
|
||||
|Advanced | CLIPMergeSimple | N/A |
|
||||
|
||||
|
||||
@@ -1,17 +1,202 @@
|
||||
# Community Nodes
|
||||
|
||||
These are nodes that have been developed by the community for the community. If you're not sure what a node is, you can learn more about nodes [here](overview.md).
|
||||
These are nodes that have been developed by the community, for the community. If you're not sure what a node is, you can learn more about nodes [here](overview.md).
|
||||
|
||||
If you'd like to submit a node for the community, please refer to the [node creation overview](overview.md).
|
||||
If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md).
|
||||
|
||||
To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations/` folder in your Invoke AI install location. Along with the node, an example node graph should be provided to help you get started with the node.
|
||||
To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations` folder in your Invoke AI install location. Along with the node, an example node graph should be provided to help you get started with the node.
|
||||
|
||||
To use a community node graph, download the the `.json` node graph file and load it into Invoke AI via the **Load Nodes** button on the Node Editor.
|
||||
|
||||
## List of Nodes
|
||||
## Community Nodes
|
||||
|
||||
### FaceTools
|
||||
|
||||
**Description:** FaceTools is a collection of nodes created to manipulate faces as you would in Unified Canvas. It includes FaceMask, FaceOff, and FacePlace. FaceMask autodetects a face in the image using MediaPipe and creates a mask from it. FaceOff similarly detects a face, then takes the face off of the image by adding a square bounding box around it and cropping/scaling it. FacePlace puts the bounded face image from FaceOff back onto the original image. Using these nodes with other inpainting node(s), you can put new faces on existing things, put new things around existing faces, and work closer with a face as a bounded image. Additionally, you can supply X and Y offset values to scale/change the shape of the mask for finer control on FaceMask and FaceOff. See GitHub repository below for usage examples.
|
||||
|
||||
**Node Link:** https://github.com/ymgenesis/FaceTools/
|
||||
|
||||
**FaceMask Output Examples**
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
### Ideal Size
|
||||
|
||||
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/ideal-size-node
|
||||
|
||||
--------------------------------
|
||||
### Film Grain
|
||||
|
||||
**Description:** This node adds a film grain effect to the input image based on the weights, seeds, and blur radii parameters. It works with RGB input images only.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||
|
||||
--------------------------------
|
||||
### Image Picker
|
||||
|
||||
**Description:** This InvokeAI node takes in a collection of images and randomly chooses one. This can be useful when you have a number of poses to choose from for a ControlNet node, or a number of input images for another purpose.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/image-picker-node
|
||||
|
||||
--------------------------------
|
||||
### Retroize
|
||||
|
||||
**Description:** Retroize is a collection of nodes for InvokeAI to "Retroize" images. Any image can be given a fresh coat of retro paint with these nodes, either from your gallery or from within the graph itself. It includes nodes to pixelize, quantize, palettize, and ditherize images; as well as to retrieve palettes from existing images.
|
||||
|
||||
**Node Link:** https://github.com/Ar7ific1al/invokeai-retroizeinode/
|
||||
|
||||
**Retroize Output Examples**
|
||||
|
||||

|
||||
|
||||
--------------------------------
|
||||
### GPT2RandomPromptMaker
|
||||
|
||||
**Description:** A node for InvokeAI utilizes the GPT-2 language model to generate random prompts based on a provided seed and context.
|
||||
|
||||
**Node Link:** https://github.com/mickr777/GPT2RandomPromptMaker
|
||||
|
||||
**Output Examples**
|
||||
|
||||
Generated Prompt: An enchanted weapon will be usable by any character regardless of their alignment.
|
||||
|
||||

|
||||
|
||||
--------------------------------
|
||||
### Load Video Frame
|
||||
|
||||
**Description:** This is a video frame image provider + indexer/video creation nodes for hooking up to iterators and ranges and ControlNets and such for invokeAI node experimentation. Think animation + ControlNet outputs.
|
||||
|
||||
**Node Link:** https://github.com/helix4u/load_video_frame
|
||||
|
||||
**Example Node Graph:** https://github.com/helix4u/load_video_frame/blob/main/Example_Workflow.json
|
||||
|
||||
**Output Example:**
|
||||
=======
|
||||

|
||||
[Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4)
|
||||
|
||||
--------------------------------
|
||||
|
||||
### Oobabooga
|
||||
|
||||
**Description:** asks a local LLM running in Oobabooga's Text-Generation-Webui to write a prompt based on the user input.
|
||||
|
||||
**Link:** https://github.com/sammyf/oobabooga-node
|
||||
|
||||
|
||||
**Example:**
|
||||
|
||||
"describe a new mystical creature in its natural environment"
|
||||
|
||||
*can return*
|
||||
|
||||
"The mystical creature I am describing to you is called the "Glimmerwing". It is a majestic, iridescent being that inhabits the depths of the most enchanted forests and glimmering lakes. Its body is covered in shimmering scales that reflect every color of the rainbow, and it has delicate, translucent wings that sparkle like diamonds in the sunlight. The Glimmerwing's home is a crystal-clear lake, surrounded by towering trees with leaves that shimmer like jewels. In this serene environment, the Glimmerwing spends its days swimming gracefully through the water, chasing schools of glittering fish and playing with the gentle ripples of the lake's surface.
|
||||
As the sun sets, the Glimmerwing perches on a branch of one of the trees, spreading its wings to catch the last rays of light. The creature's scales glow softly, casting a rainbow of colors across the forest floor. The Glimmerwing sings a haunting melody, its voice echoing through the stillness of the night air. Its song is said to have the power to heal the sick and bring peace to troubled souls. Those who are lucky enough to hear the Glimmerwing's song are forever changed by its beauty and grace."
|
||||
|
||||

|
||||
|
||||
**Requirement**
|
||||
|
||||
a Text-Generation-Webui instance (might work remotely too, but I never tried it) and obviously InvokeAI 3.x
|
||||
|
||||
**Note**
|
||||
|
||||
This node works best with SDXL models, especially as the style can be described independantly of the LLM's output.
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
**Description:** Render depth maps from Wavefront .obj files (triangulated) using this simple 3D renderer utilizing numpy and matplotlib to compute and color the scene. There are simple parameters to change the FOV, camera position, and model orientation.
|
||||
|
||||
To be imported, an .obj must use triangulated meshes, so make sure to enable that option if exporting from a 3D modeling program. This renderer makes each triangle a solid color based on its average depth, so it will cause anomalies if your .obj has large triangles. In Blender, the Remesh modifier can be helpful to subdivide a mesh into small pieces that work well given these limitations.
|
||||
|
||||
**Node Link:** https://github.com/dwringer/depth-from-obj-node
|
||||
|
||||
**Example Usage:**
|
||||

|
||||
|
||||
--------------------------------
|
||||
### Enhance Image (simple adjustments)
|
||||
|
||||
**Description:** Boost or reduce color saturation, contrast, brightness, sharpness, or invert colors of any image at any stage with this simple wrapper for pillow [PIL]'s ImageEnhance module.
|
||||
|
||||
Color inversion is toggled with a simple switch, while each of the four enhancer modes are activated by entering a value other than 1 in each corresponding input field. Values less than 1 will reduce the corresponding property, while values greater than 1 will enhance it.
|
||||
|
||||
**Node Link:** https://github.com/dwringer/image-enhance-node
|
||||
|
||||
**Example Usage:**
|
||||

|
||||
|
||||
--------------------------------
|
||||
### Generative Grammar-Based Prompt Nodes
|
||||
|
||||
**Description:** This set of 3 nodes generates prompts from simple user-defined grammar rules (loaded from custom files - examples provided below). The prompts are made by recursively expanding a special template string, replacing nonterminal "parts-of-speech" until no more nonterminal terms remain in the string.
|
||||
|
||||
This includes 3 Nodes:
|
||||
- *Lookup Table from File* - loads a YAML file "prompt" section (or of a whole folder of YAML's) into a JSON-ified dictionary (Lookups output)
|
||||
- *Lookups Entry from Prompt* - places a single entry in a new Lookups output under the specified heading
|
||||
- *Prompt from Lookup Table* - uses a Collection of Lookups as grammar rules from which to randomly generate prompts.
|
||||
|
||||
**Node Link:** https://github.com/dwringer/generative-grammar-prompt-nodes
|
||||
|
||||
**Example Usage:**
|
||||

|
||||
|
||||
--------------------------------
|
||||
### Image and Mask Composition Pack
|
||||
|
||||
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
||||
|
||||
This includes 4 Nodes:
|
||||
- *Text Mask (simple 2D)* - create and position a white on black (or black on white) line of text using any font locally available to Invoke.
|
||||
- *Image Compositor* - Take a subject from an image with a flat backdrop and layer it on another image using a chroma key or flood select background removal.
|
||||
- *Offset Latents* - Offset a latents tensor in the vertical and/or horizontal dimensions, wrapping it around.
|
||||
- *Offset Image* - Offset an image in the vertical and/or horizontal dimensions, wrapping it around.
|
||||
|
||||
**Node Link:** https://github.com/dwringer/composition-nodes
|
||||
|
||||
**Example Usage:**
|
||||

|
||||
|
||||
--------------------------------
|
||||
### Size Stepper Nodes
|
||||
|
||||
**Description:** This is a set of nodes for calculating the necessary size increments for doing upscaling workflows. Use the *Final Size & Orientation* node to enter your full size dimensions and orientation (portrait/landscape/random), then plug that and your initial generation dimensions into the *Ideal Size Stepper* and get 1, 2, or 3 intermediate pairs of dimensions for upscaling. Note this does not output the initial size or full size dimensions: the 1, 2, or 3 outputs of this node are only the intermediate sizes.
|
||||
|
||||
A third node is included, *Random Switch (Integers)*, which is just a generic version of Final Size with no orientation selection.
|
||||
|
||||
**Node Link:** https://github.com/dwringer/size-stepper-nodes
|
||||
|
||||
**Example Usage:**
|
||||

|
||||
|
||||
--------------------------------
|
||||
|
||||
### Text font to Image
|
||||
|
||||
**Description:** text font to text image node for InvokeAI, download a font to use (or if in font cache uses it from there), the text is always resized to the image size, but can control that with padding, optional 2nd line
|
||||
|
||||
**Node Link:** https://github.com/mickr777/textfontimage
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
|
||||
Results after using the depth controlnet
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
--------------------------------
|
||||
|
||||
### Example Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
@@ -21,8 +206,14 @@ To use a community node graph, download the the `.json` node graph file and load
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
{: style="height:115px;width:240px"}
|
||||
|
||||
|
||||
## Disclaimer
|
||||
|
||||
The nodes linked have been developed and contributed by members of the Invoke AI community. While we strive to ensure the quality and safety of these contributions, we do not guarantee the reliability or security of the nodes. If you have issues or concerns with any of the nodes below, please raise it on GitHub or in the Discord.
|
||||
|
||||
|
||||
## Help
|
||||
If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
|
||||
27
docs/nodes/contributingNodes.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Contributing Nodes
|
||||
|
||||
To learn about the specifics of creating a new node, please visit our [Node creation documentation](../contributing/INVOCATIONS.md).
|
||||
|
||||
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
|
||||
|
||||
- Make sure the node is contained in a new Python (.py) file
|
||||
- Submit a pull request with a link to your node in GitHub against the `nodes` branch to add the node to the [Community Nodes](Community Nodes) list
|
||||
- Make sure you are following the template below and have provided all relevant details about the node and what it does.
|
||||
- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you might be asked for permission to include it in the core project.
|
||||
|
||||
### Community Node Template
|
||||
|
||||
```markdown
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
||||
|
||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
```
|
||||
97
docs/nodes/defaultNodes.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# List of Default Nodes
|
||||
|
||||
The table below contains a list of the default nodes shipped with InvokeAI and their descriptions.
|
||||
|
||||
| Node <img width=160 align="right"> | Function |
|
||||
|: ---------------------------------- | :--------------------------------------------------------------------------------------|
|
||||
|Add Integers | Adds two numbers|
|
||||
|Boolean Primitive Collection | A collection of boolean primitive values|
|
||||
|Boolean Primitive | A boolean primitive value|
|
||||
|Canny Processor | Canny edge detection for ControlNet|
|
||||
|CLIP Skip | Skip layers in clip text_encoder model.|
|
||||
|Collect | Collects values into a collection|
|
||||
|Color Correct | Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image.|
|
||||
|Color Primitive | A color primitive value|
|
||||
|Compel Prompt | Parse prompt using compel package to conditioning.|
|
||||
|Conditioning Primitive Collection | A collection of conditioning tensor primitive values|
|
||||
|Conditioning Primitive | A conditioning tensor primitive value|
|
||||
|Content Shuffle Processor | Applies content shuffle processing to image|
|
||||
|ControlNet | Collects ControlNet info to pass to other nodes|
|
||||
|OpenCV Inpaint | Simple inpaint using opencv.|
|
||||
|Denoise Latents | Denoises noisy latents to decodable images|
|
||||
|Divide Integers | Divides two numbers|
|
||||
|Dynamic Prompt | Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator|
|
||||
|Upscale (RealESRGAN) | Upscales an image using RealESRGAN.|
|
||||
|Float Primitive Collection | A collection of float primitive values|
|
||||
|Float Primitive | A float primitive value|
|
||||
|Float Range | Creates a range|
|
||||
|HED (softedge) Processor | Applies HED edge detection to image|
|
||||
|Blur Image | Blurs an image|
|
||||
|Extract Image Channel | Gets a channel from an image.|
|
||||
|Image Primitive Collection | A collection of image primitive values|
|
||||
|Convert Image Mode | Converts an image to a different mode.|
|
||||
|Crop Image | Crops an image to a specified box. The box can be outside of the image.|
|
||||
|Image Hue Adjustment | Adjusts the Hue of an image.|
|
||||
|Inverse Lerp Image | Inverse linear interpolation of all pixels of an image|
|
||||
|Image Primitive | An image primitive value|
|
||||
|Lerp Image | Linear interpolation of all pixels of an image|
|
||||
|Offset Image Channel | Add to or subtract from an image color channel by a uniform value.|
|
||||
|Multiply Image Channel | Multiply or Invert an image color channel by a scalar value.|
|
||||
|Multiply Images | Multiplies two images together using `PIL.ImageChops.multiply()`.|
|
||||
|Blur NSFW Image | Add blur to NSFW-flagged images|
|
||||
|Paste Image | Pastes an image into another image.|
|
||||
|ImageProcessor | Base class for invocations that preprocess images for ControlNet|
|
||||
|Resize Image | Resizes an image to specific dimensions|
|
||||
|Scale Image | Scales an image by a factor|
|
||||
|Image to Latents | Encodes an image into latents.|
|
||||
|Add Invisible Watermark | Add an invisible watermark to an image|
|
||||
|Solid Color Infill | Infills transparent areas of an image with a solid color|
|
||||
|PatchMatch Infill | Infills transparent areas of an image using the PatchMatch algorithm|
|
||||
|Tile Infill | Infills transparent areas of an image with tiles of the image|
|
||||
|Integer Primitive Collection | A collection of integer primitive values|
|
||||
|Integer Primitive | An integer primitive value|
|
||||
|Iterate | Iterates over a list of items|
|
||||
|Latents Primitive Collection | A collection of latents tensor primitive values|
|
||||
|Latents Primitive | A latents tensor primitive value|
|
||||
|Latents to Image | Generates an image from latents.|
|
||||
|Leres (Depth) Processor | Applies leres processing to image|
|
||||
|Lineart Anime Processor | Applies line art anime processing to image|
|
||||
|Lineart Processor | Applies line art processing to image|
|
||||
|LoRA Loader | Apply selected lora to unet and text_encoder.|
|
||||
|Main Model Loader | Loads a main model, outputting its submodels.|
|
||||
|Combine Mask | Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.|
|
||||
|Mask Edge | Applies an edge mask to an image|
|
||||
|Mask from Alpha | Extracts the alpha channel of an image as a mask.|
|
||||
|Mediapipe Face Processor | Applies mediapipe face processing to image|
|
||||
|Midas (Depth) Processor | Applies Midas depth processing to image|
|
||||
|MLSD Processor | Applies MLSD processing to image|
|
||||
|Multiply Integers | Multiplies two numbers|
|
||||
|Noise | Generates latent noise.|
|
||||
|Normal BAE Processor | Applies NormalBae processing to image|
|
||||
|ONNX Latents to Image | Generates an image from latents.|
|
||||
|ONNX Prompt (Raw) | A node to process inputs and produce outputs. May use dependency injection in __init__ to receive providers.|
|
||||
|ONNX Text to Latents | Generates latents from conditionings.|
|
||||
|ONNX Model Loader | Loads a main model, outputting its submodels.|
|
||||
|Openpose Processor | Applies Openpose processing to image|
|
||||
|PIDI Processor | Applies PIDI processing to image|
|
||||
|Prompts from File | Loads prompts from a text file|
|
||||
|Random Integer | Outputs a single random integer.|
|
||||
|Random Range | Creates a collection of random numbers|
|
||||
|Integer Range | Creates a range of numbers from start to stop with step|
|
||||
|Integer Range of Size | Creates a range from start to start + size with step|
|
||||
|Resize Latents | Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.|
|
||||
|SDXL Compel Prompt | Parse prompt using compel package to conditioning.|
|
||||
|SDXL LoRA Loader | Apply selected lora to unet and text_encoder.|
|
||||
|SDXL Main Model Loader | Loads an sdxl base model, outputting its submodels.|
|
||||
|SDXL Refiner Compel Prompt | Parse prompt using compel package to conditioning.|
|
||||
|SDXL Refiner Model Loader | Loads an sdxl refiner model, outputting its submodels.|
|
||||
|Scale Latents | Scales latents by a given factor.|
|
||||
|Segment Anything Processor | Applies segment anything processing to image|
|
||||
|Show Image | Displays a provided image, and passes it forward in the pipeline.|
|
||||
|Step Param Easing | Experimental per-step parameter easing for denoising steps|
|
||||
|String Primitive Collection | A collection of string primitive values|
|
||||
|String Primitive | A string primitive value|
|
||||
|Subtract Integers | Subtracts two numbers|
|
||||
|Tile Resample Processor | Tile resampler processor|
|
||||
|VAE Loader | Loads a VAE model, outputting a VaeLoaderOutput|
|
||||
|Zoe (Depth) Processor | Applies Zoe depth processing to image|
|
||||
15
docs/nodes/exampleWorkflows.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Example Workflows
|
||||
|
||||
TODO: Will update once uploading workflows is available.
|
||||
|
||||
## Text2Image
|
||||
|
||||
## Image2Image
|
||||
|
||||
## ControlNet
|
||||
|
||||
## Upscaling
|
||||
|
||||
## Inpainting / Outpainting
|
||||
|
||||
## LoRAs
|
||||
@@ -1,41 +1,26 @@
|
||||
# Nodes
|
||||
|
||||
## What are Nodes?
|
||||
An Node is simply a single operation that takes in some inputs and gives
|
||||
out some outputs. We can then chain multiple nodes together to create more
|
||||
An Node is simply a single operation that takes in inputs and returns
|
||||
out outputs. Multiple nodes can be linked together to create more
|
||||
complex functionality. All InvokeAI features are added through nodes.
|
||||
|
||||
This means nodes can be used to easily extend the image generation capabilities of InvokeAI, and allow you build workflows to suit your needs.
|
||||
### Anatomy of a Node
|
||||
|
||||
You can read more about nodes and the node editor [here](../features/NODES.md).
|
||||
Individual nodes are made up of the following:
|
||||
|
||||
- Inputs: Edge points on the left side of the node window where you connect outputs from other nodes.
|
||||
- Outputs: Edge points on the right side of the node window where you connect to inputs on other nodes.
|
||||
- Options: Various options which are either manually configured, or overridden by connecting an output from another node to the input.
|
||||
|
||||
|
||||
## Downloading Nodes
|
||||
To download a new node, visit our list of [Community Nodes](communityNodes.md). These are codes that have been created by the community, for the community.
|
||||
With nodes, you can can easily extend the image generation capabilities of InvokeAI, and allow you build workflows that suit your needs.
|
||||
|
||||
You can read more about nodes and the node editor [here](../nodes/NODES.md).
|
||||
|
||||
To get started with nodes, take a look at some of our examples for [common workflows](../nodes/exampleWorkflows.md)
|
||||
|
||||
## Downloading New Nodes
|
||||
To download a new node, visit our list of [Community Nodes](../nodes/communityNodes.md). These are nodes that have been created by the community, for the community.
|
||||
|
||||
|
||||
## Contributing Nodes
|
||||
|
||||
To learn about creating a new node, please visit our [Node creation documenation](../contributing/INVOCATIONS.md).
|
||||
|
||||
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
|
||||
- Make sure the node is contained in a new Python (.py) file
|
||||
- Submit a pull request with a link to your node in GitHub against the `nodes` branch to add the node to the [Community Nodes](Community Nodes) list
|
||||
- Make sure you are following the template below and have provided all relevant details about the node and what it does.
|
||||
- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you might be asked for permission to include it in the core project.
|
||||
|
||||
### Community Node Template
|
||||
|
||||
```markdown
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
||||
|
||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
```
|
||||
|
||||
@@ -17,67 +17,267 @@ We thank them for all of their time and hard work.
|
||||
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||
* @psychedelicious - Web Team Leader
|
||||
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
||||
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* @damian0815 - Attention Systems and Compel Maintainer
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
|
||||
* @genomancer (Gregg Helt) - Model training and merging
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
|
||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||
* @keturn (Kevin Turner) - Diffusers
|
||||
* @gogurt enjoyer - Discord moderator and end user support
|
||||
* @whosawhatsis - Discord moderator and end user support
|
||||
* @dwinrger - Discord moderator and end user support
|
||||
* @526christian - Discord moderator and end user support
|
||||
|
||||
## **Contributions by**
|
||||
## **Full List of Contributors by Commit Name**
|
||||
|
||||
- [Sean McLellan](https://github.com/Oceanswave)
|
||||
- [Kevin Gibbons](https://github.com/bakkot)
|
||||
- [Tesseract Cat](https://github.com/TesseractCat)
|
||||
- [blessedcoolant](https://github.com/blessedcoolant)
|
||||
- [David Ford](https://github.com/david-ford)
|
||||
- [yunsaki](https://github.com/yunsaki)
|
||||
- [James Reynolds](https://github.com/magnusviri)
|
||||
- [David Wager](https://github.com/maddavid123)
|
||||
- [Jason Toffaletti](https://github.com/toffaletti)
|
||||
- [tildebyte](https://github.com/tildebyte)
|
||||
- [Cragin Godley](https://github.com/cgodley)
|
||||
- [BlueAmulet](https://github.com/BlueAmulet)
|
||||
- [Benjamin Warner](https://github.com/warner-benjamin)
|
||||
- [Cora Johnson-Roberson](https://github.com/corajr)
|
||||
- [veprogames](https://github.com/veprogames)
|
||||
- [JigenD](https://github.com/JigenD)
|
||||
- [Niek van der Maas](https://github.com/Niek)
|
||||
- [Henry van Megen](https://github.com/hvanmegen)
|
||||
- [Håvard Gulldahl](https://github.com/havardgulldahl)
|
||||
- [greentext2](https://github.com/greentext2)
|
||||
- [Simon Vans-Colina](https://github.com/simonvc)
|
||||
- [Gabriel Rotbart](https://github.com/gabrielrotbart)
|
||||
- [Eric Khun](https://github.com/erickhun)
|
||||
- [Brent Ozar](https://github.com/BrentOzar)
|
||||
- [nderscore](https://github.com/nderscore)
|
||||
- [Mikhail Tishin](https://github.com/tishin)
|
||||
- [Tom Elovi Spruce](https://github.com/ilovecomputers)
|
||||
- [spezialspezial](https://github.com/spezialspezial)
|
||||
- [Yosuke Shinya](https://github.com/shinya7y)
|
||||
- [Andy Pilate](https://github.com/Cubox)
|
||||
- [Muhammad Usama](https://github.com/SMUsamaShah)
|
||||
- [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- [Paul Sajna](https://github.com/sajattack)
|
||||
- [Samuel Husso](https://github.com/shusso)
|
||||
- [nicolai256](https://github.com/nicolai256)
|
||||
- [Mihai](https://github.com/mh-dm)
|
||||
- [Any Winter](https://github.com/any-winter-4079)
|
||||
- [Doggettx](https://github.com/doggettx)
|
||||
- [Matthias Wild](https://github.com/mauwii)
|
||||
- [Kyle Schouviller](https://github.com/kyle0654)
|
||||
- [rabidcopy](https://github.com/rabidcopy)
|
||||
- [Dominic Letz](https://github.com/dominicletz)
|
||||
- [Dmitry T.](https://github.com/ArDiouscuros)
|
||||
- [Kent Keirsey](https://github.com/hipsterusername)
|
||||
- [psychedelicious](https://github.com/psychedelicious)
|
||||
- [damian0815](https://github.com/damian0815)
|
||||
- [Eugene Brodsky](https://github.com/ebr)
|
||||
- AbdBarho
|
||||
- ablattmann
|
||||
- AdamOStark
|
||||
- Adam Rice
|
||||
- Airton Silva
|
||||
- Alexander Eichhorn
|
||||
- Alexandre D. Roberge
|
||||
- Andreas Rozek
|
||||
- Andre LaBranche
|
||||
- Andy Bearman
|
||||
- Andy Luhrs
|
||||
- Andy Pilate
|
||||
- Any-Winter-4079
|
||||
- apolinario
|
||||
- ArDiouscuros
|
||||
- Armando C. Santisbon
|
||||
- Arthur Holstvoogd
|
||||
- artmen1516
|
||||
- Artur
|
||||
- Arturo Mendivil
|
||||
- Ben Alkov
|
||||
- Benjamin Warner
|
||||
- Bernard Maltais
|
||||
- blessedcoolant
|
||||
- blhook
|
||||
- BlueAmulet
|
||||
- Bouncyknighter
|
||||
- Brandon Rising
|
||||
- Brent Ozar
|
||||
- Brian Racer
|
||||
- bsilvereagle
|
||||
- c67e708d
|
||||
- CapableWeb
|
||||
- Carson Katri
|
||||
- Chloe
|
||||
- Chris Dawson
|
||||
- Chris Hayes
|
||||
- Chris Jones
|
||||
- chromaticist
|
||||
- Claus F. Strasburger
|
||||
- cmdr2
|
||||
- cody
|
||||
- Conor Reid
|
||||
- Cora Johnson-Roberson
|
||||
- coreco
|
||||
- cosmii02
|
||||
- cpacker
|
||||
- Cragin Godley
|
||||
- creachec
|
||||
- Damian Stewart
|
||||
- Daniel Manzke
|
||||
- Danny Beer
|
||||
- Dan Sully
|
||||
- David Burnett
|
||||
- David Ford
|
||||
- David Regla
|
||||
- David Wager
|
||||
- Daya Adianto
|
||||
- db3000
|
||||
- Denis Olshin
|
||||
- Dennis
|
||||
- Dominic Letz
|
||||
- DrGunnarMallon
|
||||
- Edward Johan
|
||||
- elliotsayes
|
||||
- Elrik
|
||||
- ElrikUnderlake
|
||||
- Eric Khun
|
||||
- Eric Wolf
|
||||
- Eugene Brodsky
|
||||
- ExperimentalCyborg
|
||||
- Fabian Bahl
|
||||
- Fabio 'MrWHO' Torchetti
|
||||
- fattire
|
||||
- Felipe Nogueira
|
||||
- Félix Sanz
|
||||
- figgefigge
|
||||
- Gabriel Mackievicz Telles
|
||||
- gabrielrotbart
|
||||
- gallegonovato
|
||||
- Gérald LONLAS
|
||||
- GitHub Actions Bot
|
||||
- gogurtenjoyer
|
||||
- greentext2
|
||||
- Gregg Helt
|
||||
- H4rk
|
||||
- Håvard Gulldahl
|
||||
- henry
|
||||
- Henry van Megen
|
||||
- hipsterusername
|
||||
- hj
|
||||
- Hosted Weblate
|
||||
- Iman Karim
|
||||
- ismail ihsan bülbül
|
||||
- Ivan Efimov
|
||||
- jakehl
|
||||
- Jakub Kolčář
|
||||
- JamDon2
|
||||
- James Reynolds
|
||||
- Jan Skurovec
|
||||
- Jari Vetoniemi
|
||||
- Jason Toffaletti
|
||||
- Jaulustus
|
||||
- Jeff Mahoney
|
||||
- jeremy
|
||||
- Jeremy Clark
|
||||
- JigenD
|
||||
- Jim Hays
|
||||
- Johan Roxendal
|
||||
- Johnathon Selstad
|
||||
- Jonathan
|
||||
- Joseph Dries III
|
||||
- JPPhoto
|
||||
- jspraul
|
||||
- Justin Wong
|
||||
- Juuso V
|
||||
- Kaspar Emanuel
|
||||
- Katsuyuki-Karasawa
|
||||
- Kent Keirsey
|
||||
- Kevin Coakley
|
||||
- Kevin Gibbons
|
||||
- Kevin Schaul
|
||||
- Kevin Turner
|
||||
- krummrey
|
||||
- Kyle Lacy
|
||||
- Kyle Schouviller
|
||||
- Lawrence Norton
|
||||
- LemonDouble
|
||||
- Leo Pasanen
|
||||
- Lincoln Stein
|
||||
- LoganPederson
|
||||
- Lynne Whitehorn
|
||||
- majick
|
||||
- Marco Labarile
|
||||
- Martin Kristiansen
|
||||
- Mary Hipp Rogers
|
||||
- mastercaster9000
|
||||
- Matthias Wild
|
||||
- michaelk71
|
||||
- mickr777
|
||||
- Mihai
|
||||
- Mihail Dumitrescu
|
||||
- Mikhail Tishin
|
||||
- Millun Atluri
|
||||
- Minjune Song
|
||||
- mitien
|
||||
- mofuzz
|
||||
- Muhammad Usama
|
||||
- Name
|
||||
- _nderscore
|
||||
- Netzer R
|
||||
- Nicholas Koh
|
||||
- Nicholas Körfer
|
||||
- nicolai256
|
||||
- Niek van der Maas
|
||||
- noodlebox
|
||||
- Nuno Coração
|
||||
- ofirkris
|
||||
- Olivier Louvignes
|
||||
- owenvincent
|
||||
- Patrick Esser
|
||||
- Patrick Tien
|
||||
- Patrick von Platen
|
||||
- Paul Sajna
|
||||
- pejotr
|
||||
- Peter Baylies
|
||||
- Peter Lin
|
||||
- plucked
|
||||
- prixt
|
||||
- psychedelicious
|
||||
- Rainer Bernhardt
|
||||
- Riccardo Giovanetti
|
||||
- Rich Jones
|
||||
- rmagur1203
|
||||
- Rob Baines
|
||||
- Robert Bolender
|
||||
- Robin Rombach
|
||||
- Rohan Barar
|
||||
- rpagliuca
|
||||
- rromb
|
||||
- Rupesh Sreeraman
|
||||
- Ryan Cao
|
||||
- Saifeddine
|
||||
- Saifeddine ALOUI
|
||||
- SammCheese
|
||||
- Sammy
|
||||
- sammyf
|
||||
- Samuel Husso
|
||||
- Scott Lahteine
|
||||
- Sean McLellan
|
||||
- Sebastian Aigner
|
||||
- Sergey Borisov
|
||||
- Sergey Krashevich
|
||||
- Shapor Naghibzadeh
|
||||
- Shawn Zhong
|
||||
- Simon Vans-Colina
|
||||
- skunkworxdark
|
||||
- slashtechno
|
||||
- spezialspezial
|
||||
- ssantos
|
||||
- StAlKeR7779
|
||||
- Stephan Koglin-Fischer
|
||||
- SteveCaruso
|
||||
- Steve Martinelli
|
||||
- Steven Frank
|
||||
- System X - Files
|
||||
- Taylor Kems
|
||||
- techicode
|
||||
- techybrain-dev
|
||||
- tesseractcat
|
||||
- thealanle
|
||||
- Thomas
|
||||
- tildebyte
|
||||
- Tim Cabbage
|
||||
- Tom
|
||||
- Tom Elovi Spruce
|
||||
- Tom Gouville
|
||||
- tomosuto
|
||||
- Travco
|
||||
- Travis Palmer
|
||||
- tyler
|
||||
- unknown
|
||||
- user1
|
||||
- Vedant Madane
|
||||
- veprogames
|
||||
- wa.code
|
||||
- wfng92
|
||||
- whosawhatsis
|
||||
- Will
|
||||
- William Becher
|
||||
- William Chong
|
||||
- xra
|
||||
- Yeung Yiu Hung
|
||||
- ymgenesis
|
||||
- Yorzaren
|
||||
- Yosuke Shinya
|
||||
- yun saki
|
||||
- Zadagu
|
||||
- zeptofine
|
||||
- 冯不游
|
||||
- 唐澤 克幸
|
||||
|
||||
## **Original CompVis Authors**
|
||||
|
||||
|
||||
25
flake.lock
generated
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1690630721,
|
||||
"narHash": "sha256-Y04onHyBQT4Erfr2fc82dbJTfXGYrf4V0ysLUYnPOP8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "d2b52322f35597c62abf56de91b0236746b2a03d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
91
flake.nix
Normal file
@@ -0,0 +1,91 @@
|
||||
# Important note: this flake does not attempt to create a fully isolated, 'pure'
|
||||
# Python environment for InvokeAI. Instead, it depends on local invocations of
|
||||
# virtualenv/pip to install the required (binary) packages, most importantly the
|
||||
# prebuilt binary pytorch packages with CUDA support.
|
||||
# ML Python packages with CUDA support, like pytorch, are notoriously expensive
|
||||
# to compile so it's purposefuly not what this flake does.
|
||||
|
||||
{
|
||||
description = "An (impure) flake to develop on InvokeAI.";
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
let
|
||||
system = "x86_64-linux";
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
config.allowUnfree = true;
|
||||
};
|
||||
|
||||
python = pkgs.python310;
|
||||
|
||||
mkShell = { dir, install }:
|
||||
let
|
||||
setupScript = pkgs.writeScript "setup-invokai" ''
|
||||
# This must be sourced using 'source', not executed.
|
||||
${python}/bin/python -m venv ${dir}
|
||||
${dir}/bin/python -m pip install ${install}
|
||||
# ${dir}/bin/python -c 'import torch; assert(torch.cuda.is_available())'
|
||||
source ${dir}/bin/activate
|
||||
'';
|
||||
in
|
||||
pkgs.mkShell rec {
|
||||
buildInputs = with pkgs; [
|
||||
# Backend: graphics, CUDA.
|
||||
cudaPackages.cudnn
|
||||
cudaPackages.cuda_nvrtc
|
||||
cudatoolkit
|
||||
pkgconfig
|
||||
libconfig
|
||||
cmake
|
||||
blas
|
||||
freeglut
|
||||
glib
|
||||
gperf
|
||||
procps
|
||||
libGL
|
||||
libGLU
|
||||
linuxPackages.nvidia_x11
|
||||
python
|
||||
(opencv4.override {
|
||||
enableGtk3 = true;
|
||||
enableFfmpeg = true;
|
||||
enableCuda = true;
|
||||
enableUnfree = true;
|
||||
})
|
||||
stdenv.cc
|
||||
stdenv.cc.cc.lib
|
||||
xorg.libX11
|
||||
xorg.libXext
|
||||
xorg.libXi
|
||||
xorg.libXmu
|
||||
xorg.libXrandr
|
||||
xorg.libXv
|
||||
zlib
|
||||
|
||||
# Pre-commit hooks.
|
||||
black
|
||||
|
||||
# Frontend.
|
||||
yarn
|
||||
nodejs
|
||||
];
|
||||
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs;
|
||||
CUDA_PATH = pkgs.cudatoolkit;
|
||||
EXTRA_LDFLAGS = "-L${pkgs.linuxPackages.nvidia_x11}/lib";
|
||||
shellHook = ''
|
||||
if [[ -f "${dir}/bin/activate" ]]; then
|
||||
source "${dir}/bin/activate"
|
||||
echo "Using Python: $(which python)"
|
||||
else
|
||||
echo "Use 'source ${setupScript}' to set up the environment."
|
||||
fi
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
devShells.${system} = rec {
|
||||
develop = mkShell { dir = "venv"; install = "-e '.[xformers]' --extra-index-url https://download.pytorch.org/whl/cu118"; };
|
||||
default = develop;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -46,6 +46,7 @@ if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build"
|
||||
pip install --user build
|
||||
fi
|
||||
|
||||
rm -r ../build
|
||||
python -m build --wheel --outdir dist/ ../.
|
||||
|
||||
# ----------------------
|
||||
|
||||
@@ -9,16 +9,20 @@ cd $scriptdir
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
MINIMUM_PYTHON_VERSION=3.9.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.100
|
||||
PYTHON=""
|
||||
for candidate in python3.10 python3.9 python3 python ; do
|
||||
for candidate in python3.11 python3.10 python3.9 python3 python ; do
|
||||
if ppath=`which $candidate`; then
|
||||
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
|
||||
# we check that this found executable can actually run
|
||||
if [ $($candidate --version &>/dev/null; echo ${PIPESTATUS}) -gt 0 ]; then continue; fi
|
||||
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
fi
|
||||
if [ $(version $python_version) -le $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -13,7 +13,7 @@ from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
from typing import Union
|
||||
|
||||
SUPPORTED_PYTHON = ">=3.9.0,<3.11"
|
||||
SUPPORTED_PYTHON = ">=3.9.0,<=3.11.100"
|
||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
||||
|
||||
@@ -141,15 +141,16 @@ class Installer:
|
||||
|
||||
# upgrade pip in Python 3.9 environments
|
||||
if int(platform.python_version_tuple()[1]) == 9:
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
pip = local[get_pip_from_venv(venv_dir)]
|
||||
pip[ "install", "--upgrade", "pip"] & FG
|
||||
pip["install", "--upgrade", "pip"] & FG
|
||||
|
||||
return venv_dir
|
||||
|
||||
def install(self, root: str = "~/invokeai-3", version: str = "latest", yes_to_all=False, find_links: Path = None) -> None:
|
||||
def install(
|
||||
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
|
||||
) -> None:
|
||||
"""
|
||||
Install the InvokeAI application into the given runtime path
|
||||
|
||||
@@ -167,7 +168,8 @@ class Installer:
|
||||
|
||||
messages.welcome()
|
||||
|
||||
self.dest = Path(root).expanduser().resolve() if yes_to_all else messages.dest_path(root)
|
||||
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
|
||||
self.dest = default_path if yes_to_all else messages.dest_path(root)
|
||||
|
||||
# create the venv for the app
|
||||
self.venv = self.app_venv()
|
||||
@@ -175,7 +177,7 @@ class Installer:
|
||||
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
||||
|
||||
# install dependencies and the InvokeAI application
|
||||
(extra_index_url,optional_modules) = get_torch_source() if not yes_to_all else (None,None)
|
||||
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
||||
self.instance.install(
|
||||
extra_index_url,
|
||||
optional_modules,
|
||||
@@ -188,6 +190,7 @@ class Installer:
|
||||
# run through the configuration flow
|
||||
self.instance.configure()
|
||||
|
||||
|
||||
class InvokeAiInstance:
|
||||
"""
|
||||
Manages an installed instance of InvokeAI, comprising a virtual environment and a runtime directory.
|
||||
@@ -196,7 +199,6 @@ class InvokeAiInstance:
|
||||
"""
|
||||
|
||||
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
||||
|
||||
self.runtime = runtime
|
||||
self.venv = venv
|
||||
self.pip = get_pip_from_venv(venv)
|
||||
@@ -247,6 +249,9 @@ class InvokeAiInstance:
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
|
||||
"urllib3~=1.26.0",
|
||||
"requests~=2.28.0",
|
||||
"torch~=2.0.0",
|
||||
"torchmetrics==0.11.4",
|
||||
"torchvision>=0.14.1",
|
||||
@@ -312,7 +317,7 @@ class InvokeAiInstance:
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"--use-pep517",
|
||||
str(src)+(optional_modules if optional_modules else ''),
|
||||
str(src) + (optional_modules if optional_modules else ""),
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
@@ -329,21 +334,21 @@ class InvokeAiInstance:
|
||||
|
||||
# set sys.argv to a consistent state
|
||||
new_argv = [sys.argv[0]]
|
||||
for i in range(1,len(sys.argv)):
|
||||
for i in range(1, len(sys.argv)):
|
||||
el = sys.argv[i]
|
||||
if el in ['-r','--root']:
|
||||
if el in ["-r", "--root"]:
|
||||
new_argv.append(el)
|
||||
new_argv.append(sys.argv[i+1])
|
||||
elif el in ['-y','--yes','--yes-to-all']:
|
||||
new_argv.append(sys.argv[i + 1])
|
||||
elif el in ["-y", "--yes", "--yes-to-all"]:
|
||||
new_argv.append(el)
|
||||
sys.argv = new_argv
|
||||
|
||||
|
||||
import requests # to catch download exceptions
|
||||
from messages import introduction
|
||||
|
||||
introduction()
|
||||
|
||||
from invokeai.frontend.install import invokeai_configure
|
||||
from invokeai.frontend.install.invokeai_configure import invokeai_configure
|
||||
|
||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||
# from the installer will also automatically propagate down to the config script.
|
||||
@@ -353,16 +358,16 @@ class InvokeAiInstance:
|
||||
invokeai_configure()
|
||||
succeeded = True
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||
print(f"\nA network error was encountered during configuration and download: {str(e)}")
|
||||
except OSError as e:
|
||||
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||
print(f"\nAn OS error was encountered during configuration and download: {str(e)}")
|
||||
except Exception as e:
|
||||
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||
print(f"\nA problem was encountered during the configuration and download steps: {str(e)}")
|
||||
finally:
|
||||
if not succeeded:
|
||||
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||
print('Alternatively you can relaunch the installer.')
|
||||
print("and choose option 7 to fix a broken install, optionally followed by option 5 to install models.")
|
||||
print("Alternatively you can relaunch the installer.")
|
||||
|
||||
def install_user_scripts(self):
|
||||
"""
|
||||
@@ -371,11 +376,11 @@ class InvokeAiInstance:
|
||||
|
||||
ext = "bat" if OS == "Windows" else "sh"
|
||||
|
||||
#scripts = ['invoke', 'update']
|
||||
scripts = ['invoke']
|
||||
|
||||
# scripts = ['invoke', 'update']
|
||||
scripts = ["invoke"]
|
||||
|
||||
for script in scripts:
|
||||
src = Path(__file__).parent / '..' / "templates" / f"{script}.{ext}.in"
|
||||
src = Path(__file__).parent / ".." / "templates" / f"{script}.{ext}.in"
|
||||
dest = self.runtime / f"{script}.{ext}"
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
||||
@@ -402,7 +407,7 @@ def get_pip_from_venv(venv_path: Path) -> str:
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
pip = "Scripts\pip.exe" if OS == "Windows" else "bin/pip"
|
||||
pip = "Scripts\\pip.exe" if OS == "Windows" else "bin/pip"
|
||||
return str(venv_path.expanduser().resolve() / pip)
|
||||
|
||||
|
||||
@@ -420,11 +425,7 @@ def set_sys_path(venv_path: Path) -> None:
|
||||
# filter out any paths in sys.path that may be system- or user-wide
|
||||
# but leave the temporary bootstrap virtualenv as it contains packages we
|
||||
# temporarily need at install time
|
||||
sys.path = list(filter(
|
||||
lambda p: not p.endswith("-packages")
|
||||
or p.find(BOOTSTRAP_VENV_PREFIX) != -1,
|
||||
sys.path
|
||||
))
|
||||
sys.path = list(filter(lambda p: not p.endswith("-packages") or p.find(BOOTSTRAP_VENV_PREFIX) != -1, sys.path))
|
||||
|
||||
# determine site-packages/lib directory location for the venv
|
||||
lib = "Lib" if OS == "Windows" else f"lib/python{sys.version_info.major}.{sys.version_info.minor}"
|
||||
@@ -433,7 +434,7 @@ def set_sys_path(venv_path: Path) -> None:
|
||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||
|
||||
|
||||
def get_torch_source() -> (Union[str, None],str):
|
||||
def get_torch_source() -> (Union[str, None], str):
|
||||
"""
|
||||
Determine the extra index URL for pip to use for torch installation.
|
||||
This depends on the OS and the graphics accelerator in use.
|
||||
@@ -454,16 +455,19 @@ def get_torch_source() -> (Union[str, None],str):
|
||||
device = graphical_accelerator()
|
||||
|
||||
url = None
|
||||
optional_modules = None
|
||||
optional_modules = "[onnx]"
|
||||
if OS == "Linux":
|
||||
if device == "rocm":
|
||||
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
||||
elif device == "cpu":
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
|
||||
if device == 'cuda':
|
||||
url = 'https://download.pytorch.org/whl/cu117'
|
||||
optional_modules = '[xformers]'
|
||||
if device == "cuda":
|
||||
url = "https://download.pytorch.org/whl/cu118"
|
||||
optional_modules = "[xformers,onnx-cuda]"
|
||||
if device == "cuda_and_dml":
|
||||
url = "https://download.pytorch.org/whl/cu118"
|
||||
optional_modules = "[xformers,onnx-directml]"
|
||||
|
||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ InvokeAI Installer
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
from installer import Installer
|
||||
|
||||
@@ -15,7 +16,7 @@ if __name__ == "__main__":
|
||||
dest="root",
|
||||
type=str,
|
||||
help="Destination path for installation",
|
||||
default="~/invokeai",
|
||||
default=os.environ.get("INVOKEAI_ROOT") or "~/invokeai",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y",
|
||||
@@ -41,14 +42,14 @@ if __name__ == "__main__":
|
||||
type=Path,
|
||||
default=None,
|
||||
)
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
inst = Installer()
|
||||
|
||||
try:
|
||||
inst.install(**args.__dict__)
|
||||
except KeyboardInterrupt as exc:
|
||||
except KeyboardInterrupt:
|
||||
print("\n")
|
||||
print("Ctrl-C pressed. Aborting.")
|
||||
print("Come back soon!")
|
||||
|
||||
@@ -36,13 +36,15 @@ else:
|
||||
|
||||
|
||||
def welcome():
|
||||
|
||||
@group()
|
||||
def text():
|
||||
if (platform_specific := _platform_specific_help()) != "":
|
||||
yield platform_specific
|
||||
yield ""
|
||||
yield Text.from_markup("Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", justify="center")
|
||||
yield Text.from_markup(
|
||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
||||
justify="center",
|
||||
)
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@@ -58,6 +60,7 @@ def welcome():
|
||||
)
|
||||
console.line()
|
||||
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
||||
@@ -67,7 +70,7 @@ def confirm_install(dest: Path) -> bool:
|
||||
)
|
||||
else:
|
||||
print(f"InvokeAI will be installed in {dest}")
|
||||
dest_confirmed = not Confirm.ask(f"Would you like to pick a different location?", default=False)
|
||||
dest_confirmed = not Confirm.ask("Would you like to pick a different location?", default=False)
|
||||
console.line()
|
||||
|
||||
return dest_confirmed
|
||||
@@ -87,12 +90,11 @@ def dest_path(dest=None) -> Path:
|
||||
dest = Path(dest).expanduser().resolve()
|
||||
else:
|
||||
dest = Path.cwd().expanduser().resolve()
|
||||
prev_dest = dest.expanduser().resolve()
|
||||
prev_dest = init_path = dest
|
||||
|
||||
dest_confirmed = confirm_install(dest)
|
||||
|
||||
while not dest_confirmed:
|
||||
|
||||
# if the given destination already exists, the starting point for browsing is its parent directory.
|
||||
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
||||
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
||||
@@ -107,9 +109,9 @@ def dest_path(dest=None) -> Path:
|
||||
)
|
||||
|
||||
console.line()
|
||||
print(f"[orange3]Please select the destination directory for the installation:[/] \[{browse_start}]: ")
|
||||
console.print(f"[orange3]Please select the destination directory for the installation:[/] \\[{browse_start}]: ")
|
||||
selected = prompt(
|
||||
f">>> ",
|
||||
">>> ",
|
||||
complete_in_thread=True,
|
||||
completer=path_completer,
|
||||
default=str(browse_start) + os.sep,
|
||||
@@ -132,14 +134,14 @@ def dest_path(dest=None) -> Path:
|
||||
try:
|
||||
dest.mkdir(exist_ok=True, parents=True)
|
||||
return dest
|
||||
except PermissionError as exc:
|
||||
print(
|
||||
except PermissionError:
|
||||
console.print(
|
||||
f"Failed to create directory {dest} due to insufficient permissions",
|
||||
style=Style(color="red"),
|
||||
highlight=True,
|
||||
)
|
||||
except OSError as exc:
|
||||
console.print_exception(exc)
|
||||
except OSError:
|
||||
console.print_exception()
|
||||
|
||||
if Confirm.ask("Would you like to try again?"):
|
||||
dest_path(init_path)
|
||||
@@ -165,6 +167,10 @@ def graphical_accelerator():
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||
"cuda",
|
||||
)
|
||||
nvidia_with_dml = (
|
||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
||||
"cuda_and_dml",
|
||||
)
|
||||
amd = (
|
||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||
"rocm",
|
||||
@@ -179,7 +185,7 @@ def graphical_accelerator():
|
||||
)
|
||||
|
||||
if OS == "Windows":
|
||||
options = [nvidia, cpu]
|
||||
options = [nvidia, nvidia_with_dml, cpu]
|
||||
if OS == "Linux":
|
||||
options = [nvidia, amd, cpu]
|
||||
elif OS == "Darwin":
|
||||
@@ -300,15 +306,20 @@ def introduction() -> None:
|
||||
)
|
||||
console.line(2)
|
||||
|
||||
def _platform_specific_help()->str:
|
||||
|
||||
def _platform_specific_help() -> str:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup("""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""")
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
||||
)
|
||||
elif OS == "Windows":
|
||||
text = Text.from_markup("""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
|
||||
text = Text.from_markup(
|
||||
"""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
|
||||
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
|
||||
enable long path support on your system.
|
||||
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""")
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
||||
)
|
||||
else:
|
||||
text = ""
|
||||
return text
|
||||
|
||||
@@ -8,16 +8,13 @@ Preparations:
|
||||
to work. Instructions are given here:
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
|
||||
NOTE: At this time we do not recommend Python 3.11. We recommend
|
||||
Version 3.10.9, which has been extensively tested with InvokeAI.
|
||||
|
||||
Before you start the installer, please open up your system's command
|
||||
line window (Terminal or Command) and type the commands:
|
||||
|
||||
python --version
|
||||
|
||||
If all is well, it will print "Python 3.X.X", where the version number
|
||||
is at least 3.9.1, and less than 3.11.
|
||||
is at least 3.9.*, and not higher than 3.11.*.
|
||||
|
||||
If this works, check the version of the Python package manager, pip:
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ IF /I "%choice%" == "1" (
|
||||
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||
) ELSE IF /I "%choice%" == "7" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||
python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight
|
||||
) ELSE IF /I "%choice%" == "8" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
|
||||
@@ -82,7 +82,7 @@ do_choice() {
|
||||
7)
|
||||
clear
|
||||
printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only --skip-sd-weights
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from logging import Logger
|
||||
import os
|
||||
from invokeai.app.services.board_image_record_storage import (
|
||||
SqliteBoardImageRecordStorage,
|
||||
)
|
||||
@@ -29,6 +28,7 @@ from ..services.invoker import Invoker
|
||||
from ..services.processor import DefaultInvocationProcessor
|
||||
from ..services.sqlite import SqliteItemStorage
|
||||
from ..services.model_manager_service import ModelManagerService
|
||||
from ..services.invocation_stats import InvocationStatsService
|
||||
from .events import FastAPIEventService
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ def check_internet() -> bool:
|
||||
try:
|
||||
urllib.request.urlopen(host, timeout=1)
|
||||
return True
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
@@ -54,11 +54,12 @@ logger = InvokeAILogger.getLogger()
|
||||
class ApiDependencies:
|
||||
"""Contains and initializes all dependencies for the API"""
|
||||
|
||||
invoker: Invoker = None
|
||||
invoker: Invoker
|
||||
|
||||
@staticmethod
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger):
|
||||
logger.debug(f"InvokeAI version {__version__}")
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
@@ -66,8 +67,9 @@ class ApiDependencies:
|
||||
output_folder = config.output_path
|
||||
|
||||
# TODO: build a file/path manager?
|
||||
db_location = config.db_path
|
||||
db_location.parent.mkdir(parents=True, exist_ok=True)
|
||||
db_path = config.db_path
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
db_location = str(db_path)
|
||||
|
||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](
|
||||
filename=db_location, table_name="graph_executions"
|
||||
@@ -77,9 +79,7 @@ class ApiDependencies:
|
||||
image_record_storage = SqliteImageRecordStorage(db_location)
|
||||
image_file_storage = DiskImageFileStorage(f"{output_folder}/images")
|
||||
names = SimpleNameService()
|
||||
latents = ForwardCacheLatentsStorage(
|
||||
DiskLatentsStorage(f"{output_folder}/latents")
|
||||
)
|
||||
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents"))
|
||||
|
||||
board_record_storage = SqliteBoardRecordStorage(db_location)
|
||||
board_image_record_storage = SqliteBoardImageRecordStorage(db_location)
|
||||
@@ -124,12 +124,11 @@ class ApiDependencies:
|
||||
boards=boards,
|
||||
board_images=board_images,
|
||||
queue=MemoryInvocationQueue(),
|
||||
graph_library=SqliteItemStorage[LibraryGraph](
|
||||
filename=db_location, table_name="graphs"
|
||||
),
|
||||
graph_library=SqliteItemStorage[LibraryGraph](filename=db_location, table_name="graphs"),
|
||||
graph_execution_manager=graph_execution_manager,
|
||||
processor=DefaultInvocationProcessor(),
|
||||
configuration=config,
|
||||
performance_statistics=InvocationStatsService(graph_execution_manager),
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import Body
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.util.logging import logging
|
||||
from invokeai.version import __version__
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
from invokeai.backend.util.logging import logging
|
||||
|
||||
|
||||
class LogLevel(int, Enum):
|
||||
NotSet = logging.NOTSET
|
||||
@@ -16,7 +23,13 @@ class LogLevel(int, Enum):
|
||||
Warning = logging.WARNING
|
||||
Error = logging.ERROR
|
||||
Critical = logging.CRITICAL
|
||||
|
||||
|
||||
|
||||
class Upscaler(BaseModel):
|
||||
upscaling_method: str = Field(description="Name of upscaling method")
|
||||
upscaling_models: list[str] = Field(description="List of upscaling models for this method")
|
||||
|
||||
|
||||
app_router = APIRouter(prefix="/v1/app", tags=["app"])
|
||||
|
||||
|
||||
@@ -30,43 +43,62 @@ class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
infill_methods: list[str] = Field(description="List of available infill methods")
|
||||
upscaling_methods: list[Upscaler] = Field(description="List of upscaling methods")
|
||||
nsfw_methods: list[str] = Field(description="List of NSFW checking methods")
|
||||
watermarking_methods: list[str] = Field(description="List of invisible watermark methods")
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/version", operation_id="app_version", status_code=200, response_model=AppVersion
|
||||
)
|
||||
@app_router.get("/version", operation_id="app_version", status_code=200, response_model=AppVersion)
|
||||
async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/config", operation_id="get_config", status_code=200, response_model=AppConfig
|
||||
)
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
async def get_config() -> AppConfig:
|
||||
infill_methods = ['tile']
|
||||
infill_methods = ["tile", "lama", "cv2"]
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append('patchmatch')
|
||||
return AppConfig(infill_methods=infill_methods)
|
||||
infill_methods.append("patchmatch")
|
||||
|
||||
upscaling_models = []
|
||||
for model in typing.get_args(ESRGAN_MODELS):
|
||||
upscaling_models.append(str(Path(model).stem))
|
||||
upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models)
|
||||
|
||||
nsfw_methods = []
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append("nsfw_checker")
|
||||
|
||||
watermarking_methods = []
|
||||
if InvisibleWatermark.invisible_watermark_available():
|
||||
watermarking_methods.append("invisible_watermark")
|
||||
|
||||
return AppConfig(
|
||||
infill_methods=infill_methods,
|
||||
upscaling_methods=[upscaler],
|
||||
nsfw_methods=nsfw_methods,
|
||||
watermarking_methods=watermarking_methods,
|
||||
)
|
||||
|
||||
|
||||
@app_router.get(
|
||||
"/logging",
|
||||
operation_id="get_log_level",
|
||||
responses={200: {"description" : "The operation was successful"}},
|
||||
response_model = LogLevel,
|
||||
responses={200: {"description": "The operation was successful"}},
|
||||
response_model=LogLevel,
|
||||
)
|
||||
async def get_log_level(
|
||||
) -> LogLevel:
|
||||
async def get_log_level() -> LogLevel:
|
||||
"""Returns the log level"""
|
||||
return LogLevel(ApiDependencies.invoker.services.logger.level)
|
||||
|
||||
|
||||
@app_router.post(
|
||||
"/logging",
|
||||
operation_id="set_log_level",
|
||||
responses={200: {"description" : "The operation was successful"}},
|
||||
response_model = LogLevel,
|
||||
responses={200: {"description": "The operation was successful"}},
|
||||
response_model=LogLevel,
|
||||
)
|
||||
async def set_log_level(
|
||||
level: LogLevel = Body(description="New log verbosity level"),
|
||||
level: LogLevel = Body(description="New log verbosity level"),
|
||||
) -> LogLevel:
|
||||
"""Sets the log verbosity level"""
|
||||
ApiDependencies.invoker.services.logger.setLevel(level)
|
||||
|
||||
@@ -1,24 +1,30 @@
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi import Body, HTTPException
|
||||
from fastapi.routing import APIRouter
|
||||
from invokeai.app.services.board_record_storage import BoardRecord, BoardChanges
|
||||
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
|
||||
from invokeai.app.services.models.board_record import BoardDTO
|
||||
from invokeai.app.services.models.image_record import ImageDTO
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"])
|
||||
|
||||
|
||||
class AddImagesToBoardResult(BaseModel):
|
||||
board_id: str = Field(description="The id of the board the images were added to")
|
||||
added_image_names: list[str] = Field(description="The image names that were added to the board")
|
||||
|
||||
|
||||
class RemoveImagesFromBoardResult(BaseModel):
|
||||
removed_image_names: list[str] = Field(description="The image names that were removed from their board")
|
||||
|
||||
|
||||
@board_images_router.post(
|
||||
"/",
|
||||
operation_id="create_board_image",
|
||||
operation_id="add_image_to_board",
|
||||
responses={
|
||||
201: {"description": "The image was added to a board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
)
|
||||
async def create_board_image(
|
||||
async def add_image_to_board(
|
||||
board_id: str = Body(description="The id of the board to add to"),
|
||||
image_name: str = Body(description="The name of the image to add"),
|
||||
):
|
||||
@@ -28,28 +34,79 @@ async def create_board_image(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Failed to add to board")
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to add image to board")
|
||||
|
||||
|
||||
@board_images_router.delete(
|
||||
"/",
|
||||
operation_id="remove_board_image",
|
||||
operation_id="remove_image_from_board",
|
||||
responses={
|
||||
201: {"description": "The image was removed from the board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
)
|
||||
async def remove_board_image(
|
||||
board_id: str = Body(description="The id of the board"),
|
||||
image_name: str = Body(description="The name of the image to remove"),
|
||||
async def remove_image_from_board(
|
||||
image_name: str = Body(description="The name of the image to remove", embed=True),
|
||||
):
|
||||
"""Deletes a board_image"""
|
||||
"""Removes an image from its board, if it had one"""
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Failed to update board")
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to remove image from board")
|
||||
|
||||
|
||||
@board_images_router.post(
|
||||
"/batch",
|
||||
operation_id="add_images_to_board",
|
||||
responses={
|
||||
201: {"description": "Images were added to board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=AddImagesToBoardResult,
|
||||
)
|
||||
async def add_images_to_board(
|
||||
board_id: str = Body(description="The id of the board to add to"),
|
||||
image_names: list[str] = Body(description="The names of the images to add", embed=True),
|
||||
) -> AddImagesToBoardResult:
|
||||
"""Adds a list of images to a board"""
|
||||
try:
|
||||
added_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.board_images.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
added_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return AddImagesToBoardResult(board_id=board_id, added_image_names=added_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to add images to board")
|
||||
|
||||
|
||||
@board_images_router.post(
|
||||
"/batch/delete",
|
||||
operation_id="remove_images_from_board",
|
||||
responses={
|
||||
201: {"description": "Images were removed from board successfully"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=RemoveImagesFromBoardResult,
|
||||
)
|
||||
async def remove_images_from_board(
|
||||
image_names: list[str] = Body(description="The names of the images to remove", embed=True),
|
||||
) -> RemoveImagesFromBoardResult:
|
||||
"""Removes a list of images from their board, if they had one"""
|
||||
try:
|
||||
removed_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.board_images.remove_image_from_board(image_name=image_name)
|
||||
removed_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return RemoveImagesFromBoardResult(removed_image_names=removed_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to remove images from board")
|
||||
|
||||
@@ -18,9 +18,7 @@ class DeleteBoardResult(BaseModel):
|
||||
deleted_board_images: list[str] = Field(
|
||||
description="The image names of the board-images relationships that were deleted."
|
||||
)
|
||||
deleted_images: list[str] = Field(
|
||||
description="The names of the images that were deleted."
|
||||
)
|
||||
deleted_images: list[str] = Field(description="The names of the images that were deleted.")
|
||||
|
||||
|
||||
@boards_router.post(
|
||||
@@ -39,7 +37,7 @@ async def create_board(
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.boards.create(board_name=board_name)
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to create board")
|
||||
|
||||
|
||||
@@ -52,7 +50,7 @@ async def get_board(
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.boards.get_dto(board_id=board_id)
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404, detail="Board not found")
|
||||
|
||||
|
||||
@@ -73,22 +71,16 @@ async def update_board(
|
||||
) -> BoardDTO:
|
||||
"""Updates a board"""
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.boards.update(
|
||||
board_id=board_id, changes=changes
|
||||
)
|
||||
result = ApiDependencies.invoker.services.boards.update(board_id=board_id, changes=changes)
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to update board")
|
||||
|
||||
|
||||
@boards_router.delete(
|
||||
"/{board_id}", operation_id="delete_board", response_model=DeleteBoardResult
|
||||
)
|
||||
@boards_router.delete("/{board_id}", operation_id="delete_board", response_model=DeleteBoardResult)
|
||||
async def delete_board(
|
||||
board_id: str = Path(description="The id of board to delete"),
|
||||
include_images: Optional[bool] = Query(
|
||||
description="Permanently delete all images on the board", default=False
|
||||
),
|
||||
include_images: Optional[bool] = Query(description="Permanently delete all images on the board", default=False),
|
||||
) -> DeleteBoardResult:
|
||||
"""Deletes a board"""
|
||||
try:
|
||||
@@ -96,9 +88,7 @@ async def delete_board(
|
||||
deleted_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.images.delete_images_on_board(
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.images.delete_images_on_board(board_id=board_id)
|
||||
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
|
||||
return DeleteBoardResult(
|
||||
board_id=board_id,
|
||||
@@ -115,7 +105,7 @@ async def delete_board(
|
||||
deleted_board_images=deleted_board_images,
|
||||
deleted_images=[],
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete board")
|
||||
|
||||
|
||||
@@ -127,9 +117,7 @@ async def delete_board(
|
||||
async def list_boards(
|
||||
all: Optional[bool] = Query(default=None, description="Whether to list all boards"),
|
||||
offset: Optional[int] = Query(default=None, description="The page offset"),
|
||||
limit: Optional[int] = Query(
|
||||
default=None, description="The number of boards per page"
|
||||
),
|
||||
limit: Optional[int] = Query(default=None, description="The number of boards per page"),
|
||||
) -> Union[OffsetPaginatedResults[BoardDTO], list[BoardDTO]]:
|
||||
"""Gets a list of boards"""
|
||||
if all:
|
||||
|
||||
@@ -1,31 +1,31 @@
|
||||
import io
|
||||
from typing import Optional
|
||||
|
||||
from PIL import Image
|
||||
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.invocations.metadata import ImageMetadata
|
||||
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
|
||||
from invokeai.app.services.item_storage import PaginatedResults
|
||||
from invokeai.app.services.models.image_record import (
|
||||
ImageDTO,
|
||||
ImageRecordChanges,
|
||||
ImageUrlsDTO,
|
||||
)
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
|
||||
|
||||
# images are immutable; set a high max-age
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/",
|
||||
"/upload",
|
||||
operation_id="upload_image",
|
||||
responses={
|
||||
201: {"description": "The image was uploaded successfully"},
|
||||
@@ -40,9 +40,9 @@ async def upload_image(
|
||||
response: Response,
|
||||
image_category: ImageCategory = Query(description="The category of the image"),
|
||||
is_intermediate: bool = Query(description="Whether this is an intermediate image"),
|
||||
session_id: Optional[str] = Query(
|
||||
default=None, description="The session ID associated with this upload, if any"
|
||||
),
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type.startswith("image"):
|
||||
@@ -52,7 +52,10 @@ async def upload_image(
|
||||
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
except:
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
# Error opening the image
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
@@ -62,6 +65,7 @@ async def upload_image(
|
||||
image_origin=ResourceOrigin.EXTERNAL,
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@@ -69,11 +73,11 @@ async def upload_image(
|
||||
response.headers["Location"] = image_dto.image_url
|
||||
|
||||
return image_dto
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to create image")
|
||||
|
||||
|
||||
@images_router.delete("/{image_name}", operation_id="delete_image")
|
||||
@images_router.delete("/i/{image_name}", operation_id="delete_image")
|
||||
async def delete_image(
|
||||
image_name: str = Path(description="The name of the image to delete"),
|
||||
) -> None:
|
||||
@@ -81,7 +85,7 @@ async def delete_image(
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# TODO: Does this need any exception handling at all?
|
||||
pass
|
||||
|
||||
@@ -93,32 +97,30 @@ async def clear_intermediates() -> int:
|
||||
try:
|
||||
count_deleted = ApiDependencies.invoker.services.images.delete_intermediates()
|
||||
return count_deleted
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to clear intermediates")
|
||||
pass
|
||||
|
||||
|
||||
@images_router.patch(
|
||||
"/{image_name}",
|
||||
"/i/{image_name}",
|
||||
operation_id="update_image",
|
||||
response_model=ImageDTO,
|
||||
)
|
||||
async def update_image(
|
||||
image_name: str = Path(description="The name of the image to update"),
|
||||
image_changes: ImageRecordChanges = Body(
|
||||
description="The changes to apply to the image"
|
||||
),
|
||||
image_changes: ImageRecordChanges = Body(description="The changes to apply to the image"),
|
||||
) -> ImageDTO:
|
||||
"""Updates an image"""
|
||||
|
||||
try:
|
||||
return ApiDependencies.invoker.services.images.update(image_name, image_changes)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Failed to update image")
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}",
|
||||
"/i/{image_name}",
|
||||
operation_id="get_image_dto",
|
||||
response_model=ImageDTO,
|
||||
)
|
||||
@@ -129,12 +131,12 @@ async def get_image_dto(
|
||||
|
||||
try:
|
||||
return ApiDependencies.invoker.services.images.get_dto(image_name)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}/metadata",
|
||||
"/i/{image_name}/metadata",
|
||||
operation_id="get_image_metadata",
|
||||
response_model=ImageMetadata,
|
||||
)
|
||||
@@ -145,12 +147,13 @@ async def get_image_metadata(
|
||||
|
||||
try:
|
||||
return ApiDependencies.invoker.services.images.get_metadata(image_name)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}/full",
|
||||
@images_router.api_route(
|
||||
"/i/{image_name}/full",
|
||||
methods=["GET", "HEAD"],
|
||||
operation_id="get_image_full",
|
||||
response_class=Response,
|
||||
responses={
|
||||
@@ -180,12 +183,12 @@ async def get_image_full(
|
||||
)
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
return response
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}/thumbnail",
|
||||
"/i/{image_name}/thumbnail",
|
||||
operation_id="get_image_thumbnail",
|
||||
response_class=Response,
|
||||
responses={
|
||||
@@ -202,23 +205,19 @@ async def get_image_thumbnail(
|
||||
"""Gets a thumbnail image file"""
|
||||
|
||||
try:
|
||||
path = ApiDependencies.invoker.services.images.get_path(
|
||||
image_name, thumbnail=True
|
||||
)
|
||||
path = ApiDependencies.invoker.services.images.get_path(image_name, thumbnail=True)
|
||||
if not ApiDependencies.invoker.services.images.validate_path(path):
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
response = FileResponse(
|
||||
path, media_type="image/webp", content_disposition_type="inline"
|
||||
)
|
||||
response = FileResponse(path, media_type="image/webp", content_disposition_type="inline")
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
return response
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}/urls",
|
||||
"/i/{image_name}/urls",
|
||||
operation_id="get_image_urls",
|
||||
response_model=ImageUrlsDTO,
|
||||
)
|
||||
@@ -229,15 +228,13 @@ async def get_image_urls(
|
||||
|
||||
try:
|
||||
image_url = ApiDependencies.invoker.services.images.get_url(image_name)
|
||||
thumbnail_url = ApiDependencies.invoker.services.images.get_url(
|
||||
image_name, thumbnail=True
|
||||
)
|
||||
thumbnail_url = ApiDependencies.invoker.services.images.get_url(image_name, thumbnail=True)
|
||||
return ImageUrlsDTO(
|
||||
image_name=image_name,
|
||||
image_url=image_url,
|
||||
thumbnail_url=thumbnail_url,
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@@ -247,15 +244,9 @@ async def get_image_urls(
|
||||
response_model=OffsetPaginatedResults[ImageDTO],
|
||||
)
|
||||
async def list_image_dtos(
|
||||
image_origin: Optional[ResourceOrigin] = Query(
|
||||
default=None, description="The origin of images to list."
|
||||
),
|
||||
categories: Optional[list[ImageCategory]] = Query(
|
||||
default=None, description="The categories of image to include."
|
||||
),
|
||||
is_intermediate: Optional[bool] = Query(
|
||||
default=None, description="Whether to list intermediate images."
|
||||
),
|
||||
image_origin: Optional[ResourceOrigin] = Query(default=None, description="The origin of images to list."),
|
||||
categories: Optional[list[ImageCategory]] = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: Optional[bool] = Query(default=None, description="Whether to list intermediate images."),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find images without a board.",
|
||||
@@ -275,3 +266,62 @@ async def list_image_dtos(
|
||||
)
|
||||
|
||||
return image_dtos
|
||||
|
||||
|
||||
class DeleteImagesFromListResult(BaseModel):
|
||||
deleted_images: list[str]
|
||||
|
||||
|
||||
@images_router.post("/delete", operation_id="delete_images_from_list", response_model=DeleteImagesFromListResult)
|
||||
async def delete_images_from_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to delete", embed=True),
|
||||
) -> DeleteImagesFromListResult:
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@images_router.post("/star", operation_id="star_images_in_list", response_model=ImagesUpdatedFromListResult)
|
||||
async def star_images_in_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to star", embed=True),
|
||||
) -> ImagesUpdatedFromListResult:
|
||||
try:
|
||||
updated_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=True))
|
||||
updated_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to star images")
|
||||
|
||||
|
||||
@images_router.post("/unstar", operation_id="unstar_images_in_list", response_model=ImagesUpdatedFromListResult)
|
||||
async def unstar_images_in_list(
|
||||
image_names: list[str] = Body(description="The list of names of images to unstar", embed=True),
|
||||
) -> ImagesUpdatedFromListResult:
|
||||
try:
|
||||
updated_image_names: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.update(image_name, changes=ImageRecordChanges(starred=False))
|
||||
updated_image_names.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return ImagesUpdatedFromListResult(updated_image_names=updated_image_names)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to unstar images")
|
||||
|
||||
@@ -28,49 +28,52 @@ ConvertModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)]
|
||||
MergeModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)]
|
||||
ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)]
|
||||
|
||||
|
||||
class ModelsList(BaseModel):
|
||||
models: list[Union[tuple(OPENAPI_MODEL_CONFIGS)]]
|
||||
|
||||
|
||||
@models_router.get(
|
||||
"/",
|
||||
operation_id="list_models",
|
||||
responses={200: {"model": ModelsList }},
|
||||
responses={200: {"model": ModelsList}},
|
||||
)
|
||||
async def list_models(
|
||||
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
||||
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
||||
) -> ModelsList:
|
||||
"""Gets a list of models"""
|
||||
if base_models and len(base_models)>0:
|
||||
if base_models and len(base_models) > 0:
|
||||
models_raw = list()
|
||||
for base_model in base_models:
|
||||
models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type))
|
||||
else:
|
||||
models_raw = ApiDependencies.invoker.services.model_manager.list_models(None, model_type)
|
||||
models = parse_obj_as(ModelsList, { "models": models_raw })
|
||||
models = parse_obj_as(ModelsList, {"models": models_raw})
|
||||
return models
|
||||
|
||||
|
||||
@models_router.patch(
|
||||
"/{base_model}/{model_type}/{model_name}",
|
||||
operation_id="update_model",
|
||||
responses={200: {"description" : "The model was updated successfully"},
|
||||
400: {"description" : "Bad request"},
|
||||
404: {"description" : "The model could not be found"},
|
||||
409: {"description" : "There is already a model corresponding to the new name"},
|
||||
},
|
||||
status_code = 200,
|
||||
response_model = UpdateModelResponse,
|
||||
responses={
|
||||
200: {"description": "The model was updated successfully"},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "The model could not be found"},
|
||||
409: {"description": "There is already a model corresponding to the new name"},
|
||||
},
|
||||
status_code=200,
|
||||
response_model=UpdateModelResponse,
|
||||
)
|
||||
async def update_model(
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_type: ModelType = Path(description="The type of model"),
|
||||
model_name: str = Path(description="model name"),
|
||||
info: Union[tuple(OPENAPI_MODEL_CONFIGS)] = Body(description="Model configuration"),
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_type: ModelType = Path(description="The type of model"),
|
||||
model_name: str = Path(description="model name"),
|
||||
info: Union[tuple(OPENAPI_MODEL_CONFIGS)] = Body(description="Model configuration"),
|
||||
) -> UpdateModelResponse:
|
||||
""" Update model contents with a new config. If the model name or base fields are changed, then the model is renamed. """
|
||||
"""Update model contents with a new config. If the model name or base fields are changed, then the model is renamed."""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
|
||||
try:
|
||||
previous_info = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
model_name=model_name,
|
||||
@@ -81,13 +84,13 @@ async def update_model(
|
||||
# rename operation requested
|
||||
if info.model_name != model_name or info.base_model != base_model:
|
||||
ApiDependencies.invoker.services.model_manager.rename_model(
|
||||
base_model = base_model,
|
||||
model_type = model_type,
|
||||
model_name = model_name,
|
||||
new_name = info.model_name,
|
||||
new_base = info.base_model,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
model_name=model_name,
|
||||
new_name=info.model_name,
|
||||
new_base=info.base_model,
|
||||
)
|
||||
logger.info(f'Successfully renamed {base_model}/{model_name}=>{info.base_model}/{info.model_name}')
|
||||
logger.info(f"Successfully renamed {base_model.value}/{model_name}=>{info.base_model}/{info.model_name}")
|
||||
# update information to support an update of attributes
|
||||
model_name = info.model_name
|
||||
base_model = info.base_model
|
||||
@@ -96,16 +99,19 @@ async def update_model(
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
)
|
||||
if new_info.get('path') != previous_info.get('path'): # model manager moved model path during rename - don't overwrite it
|
||||
info.path = new_info.get('path')
|
||||
|
||||
if new_info.get("path") != previous_info.get(
|
||||
"path"
|
||||
): # model manager moved model path during rename - don't overwrite it
|
||||
info.path = new_info.get("path")
|
||||
|
||||
# replace empty string values with None/null to avoid phenomenon of vae: ''
|
||||
info_dict = info.dict()
|
||||
info_dict = {x: info_dict[x] if info_dict[x] else None for x in info_dict.keys()}
|
||||
|
||||
ApiDependencies.invoker.services.model_manager.update_model(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
model_attributes=info.dict()
|
||||
model_name=model_name, base_model=base_model, model_type=model_type, model_attributes=info_dict
|
||||
)
|
||||
|
||||
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
@@ -123,49 +129,48 @@ async def update_model(
|
||||
|
||||
return model_response
|
||||
|
||||
|
||||
@models_router.post(
|
||||
"/import",
|
||||
operation_id="import_model",
|
||||
responses= {
|
||||
201: {"description" : "The model imported successfully"},
|
||||
404: {"description" : "The model could not be found"},
|
||||
415: {"description" : "Unrecognized file/folder format"},
|
||||
424: {"description" : "The model appeared to import successfully, but could not be found in the model manager"},
|
||||
409: {"description" : "There is already a model corresponding to this path or repo_id"},
|
||||
responses={
|
||||
201: {"description": "The model imported successfully"},
|
||||
404: {"description": "The model could not be found"},
|
||||
415: {"description": "Unrecognized file/folder format"},
|
||||
424: {"description": "The model appeared to import successfully, but could not be found in the model manager"},
|
||||
409: {"description": "There is already a model corresponding to this path or repo_id"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=ImportModelResponse
|
||||
response_model=ImportModelResponse,
|
||||
)
|
||||
async def import_model(
|
||||
location: str = Body(description="A model path, repo_id or URL to import"),
|
||||
prediction_type: Optional[Literal['v_prediction','epsilon','sample']] = \
|
||||
Body(description='Prediction type for SDv2 checkpoint files', default="v_prediction"),
|
||||
location: str = Body(description="A model path, repo_id or URL to import"),
|
||||
prediction_type: Optional[Literal["v_prediction", "epsilon", "sample"]] = Body(
|
||||
description="Prediction type for SDv2 checkpoint files", default="v_prediction"
|
||||
),
|
||||
) -> ImportModelResponse:
|
||||
""" Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically """
|
||||
|
||||
"""Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically"""
|
||||
|
||||
items_to_import = {location}
|
||||
prediction_types = { x.value: x for x in SchedulerPredictionType }
|
||||
prediction_types = {x.value: x for x in SchedulerPredictionType}
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
try:
|
||||
installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import(
|
||||
items_to_import = items_to_import,
|
||||
prediction_type_helper = lambda x: prediction_types.get(prediction_type)
|
||||
items_to_import=items_to_import, prediction_type_helper=lambda x: prediction_types.get(prediction_type)
|
||||
)
|
||||
info = installed_models.get(location)
|
||||
|
||||
if not info:
|
||||
logger.error("Import failed")
|
||||
raise HTTPException(status_code=415)
|
||||
|
||||
logger.info(f'Successfully imported {location}, got {info}')
|
||||
|
||||
logger.info(f"Successfully imported {location}, got {info}")
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
model_name=info.name,
|
||||
base_model=info.base_model,
|
||||
model_type=info.model_type
|
||||
model_name=info.name, base_model=info.base_model, model_type=info.model_type
|
||||
)
|
||||
return parse_obj_as(ImportModelResponse, model_raw)
|
||||
|
||||
|
||||
except ModelNotFoundException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
@@ -175,38 +180,34 @@ async def import_model(
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
|
||||
|
||||
@models_router.post(
|
||||
"/add",
|
||||
operation_id="add_model",
|
||||
responses= {
|
||||
201: {"description" : "The model added successfully"},
|
||||
404: {"description" : "The model could not be found"},
|
||||
424: {"description" : "The model appeared to add successfully, but could not be found in the model manager"},
|
||||
409: {"description" : "There is already a model corresponding to this path or repo_id"},
|
||||
responses={
|
||||
201: {"description": "The model added successfully"},
|
||||
404: {"description": "The model could not be found"},
|
||||
424: {"description": "The model appeared to add successfully, but could not be found in the model manager"},
|
||||
409: {"description": "There is already a model corresponding to this path or repo_id"},
|
||||
},
|
||||
status_code=201,
|
||||
response_model=ImportModelResponse
|
||||
response_model=ImportModelResponse,
|
||||
)
|
||||
async def add_model(
|
||||
info: Union[tuple(OPENAPI_MODEL_CONFIGS)] = Body(description="Model configuration"),
|
||||
info: Union[tuple(OPENAPI_MODEL_CONFIGS)] = Body(description="Model configuration"),
|
||||
) -> ImportModelResponse:
|
||||
""" Add a model using the configuration information appropriate for its type. Only local models can be added by path"""
|
||||
|
||||
"""Add a model using the configuration information appropriate for its type. Only local models can be added by path"""
|
||||
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.model_manager.add_model(
|
||||
info.model_name,
|
||||
info.base_model,
|
||||
info.model_type,
|
||||
model_attributes = info.dict()
|
||||
info.model_name, info.base_model, info.model_type, model_attributes=info.dict()
|
||||
)
|
||||
logger.info(f'Successfully added {info.model_name}')
|
||||
logger.info(f"Successfully added {info.model_name}")
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
model_name=info.model_name,
|
||||
base_model=info.base_model,
|
||||
model_type=info.model_type
|
||||
model_name=info.model_name, base_model=info.base_model, model_type=info.model_type
|
||||
)
|
||||
return parse_obj_as(ImportModelResponse, model_raw)
|
||||
except ModelNotFoundException as e:
|
||||
@@ -216,66 +217,66 @@ async def add_model(
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
|
||||
|
||||
@models_router.delete(
|
||||
"/{base_model}/{model_type}/{model_name}",
|
||||
operation_id="del_model",
|
||||
responses={
|
||||
204: { "description": "Model deleted successfully" },
|
||||
404: { "description": "Model not found" }
|
||||
},
|
||||
status_code = 204,
|
||||
response_model = None,
|
||||
responses={204: {"description": "Model deleted successfully"}, 404: {"description": "Model not found"}},
|
||||
status_code=204,
|
||||
response_model=None,
|
||||
)
|
||||
async def delete_model(
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_type: ModelType = Path(description="The type of model"),
|
||||
model_name: str = Path(description="model name"),
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_type: ModelType = Path(description="The type of model"),
|
||||
model_name: str = Path(description="model name"),
|
||||
) -> Response:
|
||||
"""Delete Model"""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
|
||||
try:
|
||||
ApiDependencies.invoker.services.model_manager.del_model(model_name,
|
||||
base_model = base_model,
|
||||
model_type = model_type
|
||||
)
|
||||
ApiDependencies.invoker.services.model_manager.del_model(
|
||||
model_name, base_model=base_model, model_type=model_type
|
||||
)
|
||||
logger.info(f"Deleted model: {model_name}")
|
||||
return Response(status_code=204)
|
||||
except ModelNotFoundException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@models_router.put(
|
||||
"/convert/{base_model}/{model_type}/{model_name}",
|
||||
operation_id="convert_model",
|
||||
responses={
|
||||
200: { "description": "Model converted successfully" },
|
||||
400: {"description" : "Bad request" },
|
||||
404: { "description": "Model not found" },
|
||||
200: {"description": "Model converted successfully"},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "Model not found"},
|
||||
},
|
||||
status_code = 200,
|
||||
response_model = ConvertModelResponse,
|
||||
status_code=200,
|
||||
response_model=ConvertModelResponse,
|
||||
)
|
||||
async def convert_model(
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_type: ModelType = Path(description="The type of model"),
|
||||
model_name: str = Path(description="model name"),
|
||||
convert_dest_directory: Optional[str] = Query(default=None, description="Save the converted model to the designated directory"),
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_type: ModelType = Path(description="The type of model"),
|
||||
model_name: str = Path(description="model name"),
|
||||
convert_dest_directory: Optional[str] = Query(
|
||||
default=None, description="Save the converted model to the designated directory"
|
||||
),
|
||||
) -> ConvertModelResponse:
|
||||
"""Convert a checkpoint model into a diffusers model, optionally saving to the indicated destination directory, or `models` if none."""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
try:
|
||||
logger.info(f"Converting model: {model_name}")
|
||||
dest = pathlib.Path(convert_dest_directory) if convert_dest_directory else None
|
||||
ApiDependencies.invoker.services.model_manager.convert_model(model_name,
|
||||
base_model = base_model,
|
||||
model_type = model_type,
|
||||
convert_dest_directory = dest,
|
||||
)
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(model_name,
|
||||
base_model = base_model,
|
||||
model_type = model_type)
|
||||
ApiDependencies.invoker.services.model_manager.convert_model(
|
||||
model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
convert_dest_directory=dest,
|
||||
)
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
model_name, base_model=base_model, model_type=model_type
|
||||
)
|
||||
response = parse_obj_as(ConvertModelResponse, model_raw)
|
||||
except ModelNotFoundException as e:
|
||||
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found: {str(e)}")
|
||||
@@ -283,91 +284,101 @@ async def convert_model(
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
return response
|
||||
|
||||
|
||||
@models_router.get(
|
||||
"/search",
|
||||
operation_id="search_for_models",
|
||||
responses={
|
||||
200: { "description": "Directory searched successfully" },
|
||||
404: { "description": "Invalid directory path" },
|
||||
200: {"description": "Directory searched successfully"},
|
||||
404: {"description": "Invalid directory path"},
|
||||
},
|
||||
status_code = 200,
|
||||
response_model = List[pathlib.Path]
|
||||
status_code=200,
|
||||
response_model=List[pathlib.Path],
|
||||
)
|
||||
async def search_for_models(
|
||||
search_path: pathlib.Path = Query(description="Directory path to search for models")
|
||||
)->List[pathlib.Path]:
|
||||
search_path: pathlib.Path = Query(description="Directory path to search for models"),
|
||||
) -> List[pathlib.Path]:
|
||||
if not search_path.is_dir():
|
||||
raise HTTPException(status_code=404, detail=f"The search path '{search_path}' does not exist or is not directory")
|
||||
return ApiDependencies.invoker.services.model_manager.search_for_models([search_path])
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"The search path '{search_path}' does not exist or is not directory"
|
||||
)
|
||||
return ApiDependencies.invoker.services.model_manager.search_for_models(search_path)
|
||||
|
||||
|
||||
@models_router.get(
|
||||
"/ckpt_confs",
|
||||
operation_id="list_ckpt_configs",
|
||||
responses={
|
||||
200: { "description" : "paths retrieved successfully" },
|
||||
200: {"description": "paths retrieved successfully"},
|
||||
},
|
||||
status_code = 200,
|
||||
response_model = List[pathlib.Path]
|
||||
status_code=200,
|
||||
response_model=List[pathlib.Path],
|
||||
)
|
||||
async def list_ckpt_configs(
|
||||
)->List[pathlib.Path]:
|
||||
async def list_ckpt_configs() -> List[pathlib.Path]:
|
||||
"""Return a list of the legacy checkpoint configuration files stored in `ROOT/configs/stable-diffusion`, relative to ROOT."""
|
||||
return ApiDependencies.invoker.services.model_manager.list_checkpoint_configs()
|
||||
|
||||
|
||||
|
||||
|
||||
@models_router.post(
|
||||
"/sync",
|
||||
operation_id="sync_to_config",
|
||||
responses={
|
||||
201: { "description": "synchronization successful" },
|
||||
201: {"description": "synchronization successful"},
|
||||
},
|
||||
status_code = 201,
|
||||
response_model = bool
|
||||
status_code=201,
|
||||
response_model=bool,
|
||||
)
|
||||
async def sync_to_config(
|
||||
)->bool:
|
||||
async def sync_to_config() -> bool:
|
||||
"""Call after making changes to models.yaml, autoimport directories or models directory to synchronize
|
||||
in-memory data structures with disk data structures."""
|
||||
ApiDependencies.invoker.services.model_manager.sync_to_config()
|
||||
return True
|
||||
|
||||
|
||||
|
||||
@models_router.put(
|
||||
"/merge/{base_model}",
|
||||
operation_id="merge_models",
|
||||
responses={
|
||||
200: { "description": "Model converted successfully" },
|
||||
400: { "description": "Incompatible models" },
|
||||
404: { "description": "One or more models not found" },
|
||||
200: {"description": "Model converted successfully"},
|
||||
400: {"description": "Incompatible models"},
|
||||
404: {"description": "One or more models not found"},
|
||||
},
|
||||
status_code = 200,
|
||||
response_model = MergeModelResponse,
|
||||
status_code=200,
|
||||
response_model=MergeModelResponse,
|
||||
)
|
||||
async def merge_models(
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_names: List[str] = Body(description="model name", min_items=2, max_items=3),
|
||||
merged_model_name: Optional[str] = Body(description="Name of destination model"),
|
||||
alpha: Optional[float] = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5),
|
||||
interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method"),
|
||||
force: Optional[bool] = Body(description="Force merging of models created with different versions of diffusers", default=False),
|
||||
merge_dest_directory: Optional[str] = Body(description="Save the merged model to the designated directory (with 'merged_model_name' appended)", default=None)
|
||||
base_model: BaseModelType = Path(description="Base model"),
|
||||
model_names: List[str] = Body(description="model name", min_items=2, max_items=3),
|
||||
merged_model_name: Optional[str] = Body(description="Name of destination model"),
|
||||
alpha: Optional[float] = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5),
|
||||
interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method"),
|
||||
force: Optional[bool] = Body(
|
||||
description="Force merging of models created with different versions of diffusers", default=False
|
||||
),
|
||||
merge_dest_directory: Optional[str] = Body(
|
||||
description="Save the merged model to the designated directory (with 'merged_model_name' appended)",
|
||||
default=None,
|
||||
),
|
||||
) -> MergeModelResponse:
|
||||
"""Convert a checkpoint model into a diffusers model"""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
try:
|
||||
logger.info(f"Merging models: {model_names} into {merge_dest_directory or '<MODELS>'}/{merged_model_name}")
|
||||
dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None
|
||||
result = ApiDependencies.invoker.services.model_manager.merge_models(model_names,
|
||||
base_model,
|
||||
merged_model_name=merged_model_name or "+".join(model_names),
|
||||
alpha=alpha,
|
||||
interp=interp,
|
||||
force=force,
|
||||
merge_dest_directory = dest
|
||||
)
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(result.name,
|
||||
base_model = base_model,
|
||||
model_type = ModelType.Main,
|
||||
)
|
||||
result = ApiDependencies.invoker.services.model_manager.merge_models(
|
||||
model_names,
|
||||
base_model,
|
||||
merged_model_name=merged_model_name or "+".join(model_names),
|
||||
alpha=alpha,
|
||||
interp=interp,
|
||||
force=force,
|
||||
merge_dest_directory=dest,
|
||||
)
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
result.name,
|
||||
base_model=base_model,
|
||||
model_type=ModelType.Main,
|
||||
)
|
||||
response = parse_obj_as(ConvertModelResponse, model_raw)
|
||||
except ModelNotFoundException:
|
||||
raise HTTPException(status_code=404, detail=f"One or more of the models '{model_names}' not found")
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from typing import Annotated, List, Optional, Union
|
||||
from typing import Annotated, Optional, Union
|
||||
|
||||
from fastapi import Body, HTTPException, Path, Query, Response
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic.fields import Field
|
||||
|
||||
from ...invocations import *
|
||||
# Importing * is bad karma but needed here for node detection
|
||||
from ...invocations import * # noqa: F401 F403
|
||||
from ...invocations.baseinvocation import BaseInvocation
|
||||
from ...services.graph import (
|
||||
Edge,
|
||||
@@ -30,9 +31,7 @@ session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"])
|
||||
},
|
||||
)
|
||||
async def create_session(
|
||||
graph: Optional[Graph] = Body(
|
||||
default=None, description="The graph to initialize the session with"
|
||||
)
|
||||
graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with")
|
||||
) -> GraphExecutionState:
|
||||
"""Creates a new session, optionally initializing it with an invocation graph"""
|
||||
session = ApiDependencies.invoker.create_execution_state(graph)
|
||||
@@ -51,13 +50,9 @@ async def list_sessions(
|
||||
) -> PaginatedResults[GraphExecutionState]:
|
||||
"""Gets a list of sessions, optionally searching"""
|
||||
if query == "":
|
||||
result = ApiDependencies.invoker.services.graph_execution_manager.list(
|
||||
page, per_page
|
||||
)
|
||||
result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page)
|
||||
else:
|
||||
result = ApiDependencies.invoker.services.graph_execution_manager.search(
|
||||
query, page, per_page
|
||||
)
|
||||
result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page)
|
||||
return result
|
||||
|
||||
|
||||
@@ -91,9 +86,9 @@ async def get_session(
|
||||
)
|
||||
async def add_node(
|
||||
session_id: str = Path(description="The id of the session"),
|
||||
node: Annotated[
|
||||
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||
] = Body(description="The node to add"),
|
||||
node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore
|
||||
description="The node to add"
|
||||
),
|
||||
) -> str:
|
||||
"""Adds a node to the graph"""
|
||||
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||
@@ -124,9 +119,9 @@ async def add_node(
|
||||
async def update_node(
|
||||
session_id: str = Path(description="The id of the session"),
|
||||
node_path: str = Path(description="The path to the node in the graph"),
|
||||
node: Annotated[
|
||||
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||
] = Body(description="The new node"),
|
||||
node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore
|
||||
description="The new node"
|
||||
),
|
||||
) -> GraphExecutionState:
|
||||
"""Updates a node in the graph and removes all linked edges"""
|
||||
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||
@@ -230,7 +225,7 @@ async def delete_edge(
|
||||
try:
|
||||
edge = Edge(
|
||||
source=EdgeConnection(node_id=from_node_id, field=from_field),
|
||||
destination=EdgeConnection(node_id=to_node_id, field=to_field)
|
||||
destination=EdgeConnection(node_id=to_node_id, field=to_field),
|
||||
)
|
||||
session.delete_edge(edge)
|
||||
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||
@@ -255,9 +250,7 @@ async def delete_edge(
|
||||
)
|
||||
async def invoke_session(
|
||||
session_id: str = Path(description="The id of the session to invoke"),
|
||||
all: bool = Query(
|
||||
default=False, description="Whether or not to invoke all remaining invocations"
|
||||
),
|
||||
all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"),
|
||||
) -> Response:
|
||||
"""Invokes a session"""
|
||||
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||
@@ -274,9 +267,7 @@ async def invoke_session(
|
||||
@session_router.delete(
|
||||
"/{session_id}/invoke",
|
||||
operation_id="cancel_session_invoke",
|
||||
responses={
|
||||
202: {"description": "The invocation is canceled"}
|
||||
},
|
||||
responses={202: {"description": "The invocation is canceled"}},
|
||||
)
|
||||
async def cancel_session_invoke(
|
||||
session_id: str = Path(description="The id of the session to cancel"),
|
||||
|
||||
@@ -16,9 +16,7 @@ class SocketIO:
|
||||
self.__sio.on("subscribe", handler=self._handle_sub)
|
||||
self.__sio.on("unsubscribe", handler=self._handle_unsub)
|
||||
|
||||
local_handler.register(
|
||||
event_name=EventServiceBase.session_event, _func=self._handle_session_event
|
||||
)
|
||||
local_handler.register(event_name=EventServiceBase.session_event, _func=self._handle_session_event)
|
||||
|
||||
async def _handle_session_event(self, event: Event):
|
||||
await self.__sio.emit(
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Copyright (c) 2022-2023 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||
import asyncio
|
||||
import sys
|
||||
import logging
|
||||
import socket
|
||||
from inspect import signature
|
||||
from pathlib import Path
|
||||
|
||||
import uvicorn
|
||||
import socket
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||
@@ -13,22 +13,12 @@ from fastapi.openapi.utils import get_openapi
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from pathlib import Path
|
||||
from pydantic.schema import schema
|
||||
|
||||
#This should come early so that modules can log their initialization properly
|
||||
from .services.config import InvokeAIAppConfig
|
||||
from ..backend.util.logging import InvokeAILogger
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
app_config.parse_args()
|
||||
logger = InvokeAILogger.getLogger(config=app_config)
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
# we call this early so that the message appears before
|
||||
# other invokeai initialization messages
|
||||
if app_config.version:
|
||||
print(f'InvokeAI version {__version__}')
|
||||
sys.exit(0)
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
import invokeai.frontend.web as web_dir
|
||||
import mimetypes
|
||||
@@ -36,18 +26,26 @@ import mimetypes
|
||||
from .api.dependencies import ApiDependencies
|
||||
from .api.routers import sessions, models, images, boards, board_images, app_info
|
||||
from .api.sockets import SocketIO
|
||||
from .invocations.baseinvocation import BaseInvocation
|
||||
|
||||
from .invocations.baseinvocation import BaseInvocation, _InputField, _OutputField, UIConfigBase
|
||||
|
||||
import torch
|
||||
import invokeai.backend.util.hotfixes
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
import invokeai.backend.util.mps_fixes
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
app_config.parse_args()
|
||||
logger = InvokeAILogger.getLogger(config=app_config)
|
||||
|
||||
# fix for windows mimetypes registry entries being borked
|
||||
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
|
||||
mimetypes.add_type('application/javascript', '.js')
|
||||
mimetypes.add_type('text/css', '.css')
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
|
||||
# Create the app
|
||||
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||
@@ -57,14 +55,13 @@ app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None)
|
||||
event_handler_id: int = id(app)
|
||||
app.add_middleware(
|
||||
EventHandlerASGIMiddleware,
|
||||
handlers=[
|
||||
local_handler
|
||||
], # TODO: consider doing this in services to support different configurations
|
||||
handlers=[local_handler], # TODO: consider doing this in services to support different configurations
|
||||
middleware_id=event_handler_id,
|
||||
)
|
||||
|
||||
socket_io = SocketIO(app)
|
||||
|
||||
|
||||
# Add startup event to load dependencies
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
@@ -76,9 +73,7 @@ async def startup_event():
|
||||
allow_headers=app_config.allow_headers,
|
||||
)
|
||||
|
||||
ApiDependencies.initialize(
|
||||
config=app_config, event_handler_id=event_handler_id, logger=logger
|
||||
)
|
||||
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger)
|
||||
|
||||
|
||||
# Shut down threads
|
||||
@@ -103,7 +98,8 @@ app.include_router(boards.boards_router, prefix="/api")
|
||||
|
||||
app.include_router(board_images.board_images_router, prefix="/api")
|
||||
|
||||
app.include_router(app_info.app_router, prefix='/api')
|
||||
app.include_router(app_info.app_router, prefix="/api")
|
||||
|
||||
|
||||
# Build a custom OpenAPI to include all outputs
|
||||
# TODO: can outputs be included on metadata of invocation schemas somehow?
|
||||
@@ -127,12 +123,18 @@ def custom_openapi():
|
||||
|
||||
output_schemas = schema(output_types, ref_prefix="#/components/schemas/")
|
||||
for schema_key, output_schema in output_schemas["definitions"].items():
|
||||
output_schema["class"] = "output"
|
||||
openapi_schema["components"]["schemas"][schema_key] = output_schema
|
||||
|
||||
# TODO: note that we assume the schema_key here is the TYPE.__name__
|
||||
# This could break in some cases, figure out a better way to do it
|
||||
output_type_titles[schema_key] = output_schema["title"]
|
||||
|
||||
# Add Node Editor UI helper schemas
|
||||
ui_config_schemas = schema([UIConfigBase, _InputField, _OutputField], ref_prefix="#/components/schemas/")
|
||||
for schema_key, ui_config_schema in ui_config_schemas["definitions"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = ui_config_schema
|
||||
|
||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||
for invoker in all_invocations:
|
||||
invoker_name = invoker.__name__
|
||||
@@ -140,10 +142,11 @@ def custom_openapi():
|
||||
output_type_title = output_type_titles[output_type.__name__]
|
||||
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
|
||||
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||
|
||||
invoker_schema["output"] = outputs_ref
|
||||
invoker_schema["class"] = "invocation"
|
||||
|
||||
from invokeai.backend.model_management.models import get_model_config_enums
|
||||
|
||||
for model_config_format_enum in set(get_model_config_enums()):
|
||||
name = model_config_format_enum.__qualname__
|
||||
|
||||
@@ -166,7 +169,8 @@ def custom_openapi():
|
||||
app.openapi = custom_openapi
|
||||
|
||||
# Override API doc favicons
|
||||
app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], 'static/dream_web')), name="static")
|
||||
app.mount("/static", StaticFiles(directory=Path(web_dir.__path__[0], "static/dream_web")), name="static")
|
||||
|
||||
|
||||
@app.get("/docs", include_in_schema=False)
|
||||
def overridden_swagger():
|
||||
@@ -187,11 +191,8 @@ def overridden_redoc():
|
||||
|
||||
|
||||
# Must mount *after* the other routes else it borks em
|
||||
app.mount("/",
|
||||
StaticFiles(directory=Path(web_dir.__path__[0],"dist"),
|
||||
html=True
|
||||
), name="ui"
|
||||
)
|
||||
app.mount("/", StaticFiles(directory=Path(web_dir.__path__[0], "dist"), html=True), name="ui")
|
||||
|
||||
|
||||
def invoke_api():
|
||||
def find_port(port: int):
|
||||
@@ -204,15 +205,48 @@ def invoke_api():
|
||||
else:
|
||||
return port
|
||||
|
||||
from invokeai.backend.install.check_root import check_invokeai_root
|
||||
|
||||
check_invokeai_root(app_config) # note, may exit with an exception if root not set up
|
||||
|
||||
if app_config.dev_reload:
|
||||
try:
|
||||
import jurigged
|
||||
except ImportError as e:
|
||||
logger.error(
|
||||
'Can\'t start `--dev_reload` because jurigged is not found; `pip install -e ".[dev]"` to include development dependencies.',
|
||||
exc_info=e,
|
||||
)
|
||||
else:
|
||||
jurigged.watch(logger=InvokeAILogger.getLogger(name="jurigged").info)
|
||||
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
|
||||
# Start our own event loop for eventing usage
|
||||
loop = asyncio.new_event_loop()
|
||||
config = uvicorn.Config(app=app, host=app_config.host, port=port, loop=loop)
|
||||
# Use access_log to turn off logging
|
||||
config = uvicorn.Config(
|
||||
app=app,
|
||||
host=app_config.host,
|
||||
port=port,
|
||||
loop=loop,
|
||||
log_level=app_config.log_level,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
|
||||
# replace uvicorn's loggers with InvokeAI's for consistent appearance
|
||||
for logname in ["uvicorn.access", "uvicorn"]:
|
||||
log = logging.getLogger(logname)
|
||||
log.handlers.clear()
|
||||
for ch in logger.handlers:
|
||||
log.addHandler(ch)
|
||||
|
||||
loop.run_until_complete(server.serve())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
invoke_api()
|
||||
if app_config.version:
|
||||
print(f"InvokeAI version {__version__}")
|
||||
else:
|
||||
invoke_api()
|
||||
|
||||
|
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
@@ -14,8 +14,14 @@ from ..services.graph import GraphExecutionState, LibraryGraph, Edge
|
||||
from ..services.invoker import Invoker
|
||||
|
||||
|
||||
def add_field_argument(command_parser, name: str, field, default_override = None):
|
||||
default = default_override if default_override is not None else field.default if field.default_factory is None else field.default_factory()
|
||||
def add_field_argument(command_parser, name: str, field, default_override=None):
|
||||
default = (
|
||||
default_override
|
||||
if default_override is not None
|
||||
else field.default
|
||||
if field.default_factory is None
|
||||
else field.default_factory()
|
||||
)
|
||||
if get_origin(field.type_) == Literal:
|
||||
allowed_values = get_args(field.type_)
|
||||
allowed_types = set()
|
||||
@@ -47,8 +53,8 @@ def add_parsers(
|
||||
commands: list[type],
|
||||
command_field: str = "type",
|
||||
exclude_fields: list[str] = ["id", "type"],
|
||||
add_arguments: Union[Callable[[argparse.ArgumentParser], None],None] = None
|
||||
):
|
||||
add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None,
|
||||
):
|
||||
"""Adds parsers for each command to the subparsers"""
|
||||
|
||||
# Create subparsers for each command
|
||||
@@ -61,7 +67,7 @@ def add_parsers(
|
||||
add_arguments(command_parser)
|
||||
|
||||
# Convert all fields to arguments
|
||||
fields = command.__fields__ # type: ignore
|
||||
fields = command.__fields__ # type: ignore
|
||||
for name, field in fields.items():
|
||||
if name in exclude_fields:
|
||||
continue
|
||||
@@ -70,13 +76,11 @@ def add_parsers(
|
||||
|
||||
|
||||
def add_graph_parsers(
|
||||
subparsers,
|
||||
graphs: list[LibraryGraph],
|
||||
add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None
|
||||
subparsers, graphs: list[LibraryGraph], add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None
|
||||
):
|
||||
for graph in graphs:
|
||||
command_parser = subparsers.add_parser(graph.name, help=graph.description)
|
||||
|
||||
|
||||
if add_arguments is not None:
|
||||
add_arguments(command_parser)
|
||||
|
||||
@@ -128,6 +132,7 @@ class CliContext:
|
||||
|
||||
class ExitCli(Exception):
|
||||
"""Exception to exit the CLI"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@@ -155,7 +160,7 @@ class BaseCommand(ABC, BaseModel):
|
||||
@classmethod
|
||||
def get_commands_map(cls):
|
||||
# Get the type strings out of the literals and into a dictionary
|
||||
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseCommand.get_all_subclasses()))
|
||||
return dict(map(lambda t: (get_args(get_type_hints(t)["type"])[0], t), BaseCommand.get_all_subclasses()))
|
||||
|
||||
@abstractmethod
|
||||
def run(self, context: CliContext) -> None:
|
||||
@@ -165,7 +170,8 @@ class BaseCommand(ABC, BaseModel):
|
||||
|
||||
class ExitCommand(BaseCommand):
|
||||
"""Exits the CLI"""
|
||||
type: Literal['exit'] = 'exit'
|
||||
|
||||
type: Literal["exit"] = "exit"
|
||||
|
||||
def run(self, context: CliContext) -> None:
|
||||
raise ExitCli()
|
||||
@@ -173,7 +179,8 @@ class ExitCommand(BaseCommand):
|
||||
|
||||
class HelpCommand(BaseCommand):
|
||||
"""Shows help"""
|
||||
type: Literal['help'] = 'help'
|
||||
|
||||
type: Literal["help"] = "help"
|
||||
|
||||
def run(self, context: CliContext) -> None:
|
||||
context.parser.print_help()
|
||||
@@ -183,11 +190,7 @@ def get_graph_execution_history(
|
||||
graph_execution_state: GraphExecutionState,
|
||||
) -> Iterable[str]:
|
||||
"""Gets the history of fully-executed invocations for a graph execution"""
|
||||
return (
|
||||
n
|
||||
for n in reversed(graph_execution_state.executed_history)
|
||||
if n in graph_execution_state.graph.nodes
|
||||
)
|
||||
return (n for n in reversed(graph_execution_state.executed_history) if n in graph_execution_state.graph.nodes)
|
||||
|
||||
|
||||
def get_invocation_command(invocation) -> str:
|
||||
@@ -218,7 +221,8 @@ def get_invocation_command(invocation) -> str:
|
||||
|
||||
class HistoryCommand(BaseCommand):
|
||||
"""Shows the invocation history"""
|
||||
type: Literal['history'] = 'history'
|
||||
|
||||
type: Literal["history"] = "history"
|
||||
|
||||
# Inputs
|
||||
# fmt: off
|
||||
@@ -235,7 +239,8 @@ class HistoryCommand(BaseCommand):
|
||||
|
||||
class SetDefaultCommand(BaseCommand):
|
||||
"""Sets a default value for a field"""
|
||||
type: Literal['default'] = 'default'
|
||||
|
||||
type: Literal["default"] = "default"
|
||||
|
||||
# Inputs
|
||||
# fmt: off
|
||||
@@ -253,7 +258,8 @@ class SetDefaultCommand(BaseCommand):
|
||||
|
||||
class DrawGraphCommand(BaseCommand):
|
||||
"""Debugs a graph"""
|
||||
type: Literal['draw_graph'] = 'draw_graph'
|
||||
|
||||
type: Literal["draw_graph"] = "draw_graph"
|
||||
|
||||
def run(self, context: CliContext) -> None:
|
||||
session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id)
|
||||
@@ -271,7 +277,8 @@ class DrawGraphCommand(BaseCommand):
|
||||
|
||||
class DrawExecutionGraphCommand(BaseCommand):
|
||||
"""Debugs an execution graph"""
|
||||
type: Literal['draw_xgraph'] = 'draw_xgraph'
|
||||
|
||||
type: Literal["draw_xgraph"] = "draw_xgraph"
|
||||
|
||||
def run(self, context: CliContext) -> None:
|
||||
session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id)
|
||||
@@ -286,6 +293,7 @@ class DrawExecutionGraphCommand(BaseCommand):
|
||||
plt.axis("off")
|
||||
plt.show()
|
||||
|
||||
|
||||
class SortedHelpFormatter(argparse.HelpFormatter):
|
||||
def _iter_indented_subactions(self, action):
|
||||
try:
|
||||
|
||||
@@ -19,8 +19,8 @@ from ..services.invocation_services import InvocationServices
|
||||
# singleton object, class variable
|
||||
completer = None
|
||||
|
||||
|
||||
class Completer(object):
|
||||
|
||||
def __init__(self, model_manager: ModelManager):
|
||||
self.commands = self.get_commands()
|
||||
self.matches = None
|
||||
@@ -43,7 +43,7 @@ class Completer(object):
|
||||
except IndexError:
|
||||
pass
|
||||
options = options or list(self.parse_commands().keys())
|
||||
|
||||
|
||||
if not text: # first time
|
||||
self.matches = options
|
||||
else:
|
||||
@@ -56,17 +56,17 @@ class Completer(object):
|
||||
return match
|
||||
|
||||
@classmethod
|
||||
def get_commands(self)->List[object]:
|
||||
def get_commands(self) -> List[object]:
|
||||
"""
|
||||
Return a list of all the client commands and invocations.
|
||||
"""
|
||||
return BaseCommand.get_commands() + BaseInvocation.get_invocations()
|
||||
|
||||
def get_current_command(self, buffer: str)->tuple[str, str]:
|
||||
def get_current_command(self, buffer: str) -> tuple[str, str]:
|
||||
"""
|
||||
Parse the readline buffer to find the most recent command and its switch.
|
||||
"""
|
||||
if len(buffer)==0:
|
||||
if len(buffer) == 0:
|
||||
return None, None
|
||||
tokens = shlex.split(buffer)
|
||||
command = None
|
||||
@@ -78,11 +78,11 @@ class Completer(object):
|
||||
else:
|
||||
switch = t
|
||||
# don't try to autocomplete switches that are already complete
|
||||
if switch and buffer.endswith(' '):
|
||||
switch=None
|
||||
return command or '', switch or ''
|
||||
if switch and buffer.endswith(" "):
|
||||
switch = None
|
||||
return command or "", switch or ""
|
||||
|
||||
def parse_commands(self)->Dict[str, List[str]]:
|
||||
def parse_commands(self) -> Dict[str, List[str]]:
|
||||
"""
|
||||
Return a dict in which the keys are the command name
|
||||
and the values are the parameters the command takes.
|
||||
@@ -90,11 +90,11 @@ class Completer(object):
|
||||
result = dict()
|
||||
for command in self.commands:
|
||||
hints = get_type_hints(command)
|
||||
name = get_args(hints['type'])[0]
|
||||
result.update({name:hints})
|
||||
name = get_args(hints["type"])[0]
|
||||
result.update({name: hints})
|
||||
return result
|
||||
|
||||
def get_command_options(self, command: str, switch: str)->List[str]:
|
||||
def get_command_options(self, command: str, switch: str) -> List[str]:
|
||||
"""
|
||||
Return all the parameters that can be passed to the command as
|
||||
command-line switches. Returns None if the command is unrecognized.
|
||||
@@ -102,49 +102,53 @@ class Completer(object):
|
||||
parsed_commands = self.parse_commands()
|
||||
if command not in parsed_commands:
|
||||
return None
|
||||
|
||||
|
||||
# handle switches in the format "-foo=bar"
|
||||
argument = None
|
||||
if switch and '=' in switch:
|
||||
switch, argument = switch.split('=')
|
||||
|
||||
parameter = switch.strip('-')
|
||||
if switch and "=" in switch:
|
||||
switch, argument = switch.split("=")
|
||||
|
||||
parameter = switch.strip("-")
|
||||
if parameter in parsed_commands[command]:
|
||||
if argument is None:
|
||||
return self.get_parameter_options(parameter, parsed_commands[command][parameter])
|
||||
else:
|
||||
return [f"--{parameter}={x}" for x in self.get_parameter_options(parameter, parsed_commands[command][parameter])]
|
||||
return [
|
||||
f"--{parameter}={x}"
|
||||
for x in self.get_parameter_options(parameter, parsed_commands[command][parameter])
|
||||
]
|
||||
else:
|
||||
return [f"--{x}" for x in parsed_commands[command].keys()]
|
||||
|
||||
def get_parameter_options(self, parameter: str, typehint)->List[str]:
|
||||
def get_parameter_options(self, parameter: str, typehint) -> List[str]:
|
||||
"""
|
||||
Given a parameter type (such as Literal), offers autocompletions.
|
||||
"""
|
||||
if get_origin(typehint) == Literal:
|
||||
return get_args(typehint)
|
||||
if parameter == 'model':
|
||||
if parameter == "model":
|
||||
return self.manager.model_names()
|
||||
|
||||
|
||||
def _pre_input_hook(self):
|
||||
if self.linebuffer:
|
||||
readline.insert_text(self.linebuffer)
|
||||
readline.redisplay()
|
||||
self.linebuffer = None
|
||||
|
||||
|
||||
|
||||
def set_autocompleter(services: InvocationServices) -> Completer:
|
||||
global completer
|
||||
|
||||
|
||||
if completer:
|
||||
return completer
|
||||
|
||||
|
||||
completer = Completer(services.model_manager)
|
||||
|
||||
readline.set_completer(completer.complete)
|
||||
# pyreadline3 does not have a set_auto_history() method
|
||||
try:
|
||||
readline.set_auto_history(True)
|
||||
except:
|
||||
except AttributeError:
|
||||
# pyreadline3 does not have a set_auto_history() method
|
||||
pass
|
||||
readline.set_pre_input_hook(completer._pre_input_hook)
|
||||
readline.set_completer_delims(" ")
|
||||
@@ -162,8 +166,6 @@ def set_autocompleter(services: InvocationServices) -> Completer:
|
||||
pass
|
||||
except OSError: # file likely corrupted
|
||||
newname = f"{histfile}.old"
|
||||
logger.error(
|
||||
f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
|
||||
)
|
||||
logger.error(f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}")
|
||||
histfile.replace(Path(newname))
|
||||
atexit.register(readline.write_history_file, histfile)
|
||||
|
||||
@@ -13,15 +13,8 @@ from pydantic.fields import Field
|
||||
# This should come early so that the logger can pick up its configuration options
|
||||
from .services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args()
|
||||
logger = InvokeAILogger().getLogger(config=config)
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
# we call this early so that the message appears before other invokeai initialization messages
|
||||
if config.version:
|
||||
print(f'InvokeAI version {__version__}')
|
||||
sys.exit(0)
|
||||
|
||||
from invokeai.app.services.board_image_record_storage import (
|
||||
SqliteBoardImageRecordStorage,
|
||||
@@ -36,18 +29,22 @@ from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
|
||||
from invokeai.app.services.images import ImageService, ImageServiceDependencies
|
||||
from invokeai.app.services.resource_name import SimpleNameService
|
||||
from invokeai.app.services.urls import LocalUrlService
|
||||
from .services.default_graphs import (default_text_to_image_graph_id,
|
||||
create_system_graphs)
|
||||
from invokeai.app.services.invocation_stats import InvocationStatsService
|
||||
from .services.default_graphs import default_text_to_image_graph_id, create_system_graphs
|
||||
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||
|
||||
from .cli.commands import (BaseCommand, CliContext, ExitCli,
|
||||
SortedHelpFormatter, add_graph_parsers, add_parsers)
|
||||
from .cli.commands import BaseCommand, CliContext, ExitCli, SortedHelpFormatter, add_graph_parsers, add_parsers
|
||||
from .cli.completer import set_autocompleter
|
||||
from .invocations.baseinvocation import BaseInvocation
|
||||
from .services.events import EventServiceBase
|
||||
from .services.graph import (Edge, EdgeConnection, GraphExecutionState,
|
||||
GraphInvocation, LibraryGraph,
|
||||
are_connection_types_compatible)
|
||||
from .services.graph import (
|
||||
Edge,
|
||||
EdgeConnection,
|
||||
GraphExecutionState,
|
||||
GraphInvocation,
|
||||
LibraryGraph,
|
||||
are_connection_types_compatible,
|
||||
)
|
||||
from .services.image_file_storage import DiskImageFileStorage
|
||||
from .services.invocation_queue import MemoryInvocationQueue
|
||||
from .services.invocation_services import InvocationServices
|
||||
@@ -57,9 +54,15 @@ from .services.processor import DefaultInvocationProcessor
|
||||
from .services.sqlite import SqliteItemStorage
|
||||
|
||||
import torch
|
||||
import invokeai.backend.util.hotfixes
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
import invokeai.backend.util.mps_fixes
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args()
|
||||
logger = InvokeAILogger().getLogger(config=config)
|
||||
|
||||
|
||||
class CliCommand(BaseModel):
|
||||
@@ -69,6 +72,7 @@ class CliCommand(BaseModel):
|
||||
class InvalidArgs(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def add_invocation_args(command_parser):
|
||||
# Add linking capability
|
||||
command_parser.add_argument(
|
||||
@@ -113,7 +117,7 @@ def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser:
|
||||
return parser
|
||||
|
||||
|
||||
class NodeField():
|
||||
class NodeField:
|
||||
alias: str
|
||||
node_path: str
|
||||
field: str
|
||||
@@ -126,15 +130,20 @@ class NodeField():
|
||||
self.field_type = field_type
|
||||
|
||||
|
||||
def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str,NodeField]:
|
||||
return {k:NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()}
|
||||
def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str, NodeField]:
|
||||
return {k: NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()}
|
||||
|
||||
|
||||
def get_node_input_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
|
||||
"""Gets the node field for the specified field alias"""
|
||||
exposed_input = next(e for e in graph.exposed_inputs if e.alias == field_alias)
|
||||
node_type = type(graph.graph.get_node(exposed_input.node_path))
|
||||
return NodeField(alias=exposed_input.alias, node_path=f'{node_id}.{exposed_input.node_path}', field=exposed_input.field, field_type=get_type_hints(node_type)[exposed_input.field])
|
||||
return NodeField(
|
||||
alias=exposed_input.alias,
|
||||
node_path=f"{node_id}.{exposed_input.node_path}",
|
||||
field=exposed_input.field,
|
||||
field_type=get_type_hints(node_type)[exposed_input.field],
|
||||
)
|
||||
|
||||
|
||||
def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
|
||||
@@ -142,7 +151,12 @@ def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -
|
||||
exposed_output = next(e for e in graph.exposed_outputs if e.alias == field_alias)
|
||||
node_type = type(graph.graph.get_node(exposed_output.node_path))
|
||||
node_output_type = node_type.get_output_type()
|
||||
return NodeField(alias=exposed_output.alias, node_path=f'{node_id}.{exposed_output.node_path}', field=exposed_output.field, field_type=get_type_hints(node_output_type)[exposed_output.field])
|
||||
return NodeField(
|
||||
alias=exposed_output.alias,
|
||||
node_path=f"{node_id}.{exposed_output.node_path}",
|
||||
field=exposed_output.field,
|
||||
field_type=get_type_hints(node_output_type)[exposed_output.field],
|
||||
)
|
||||
|
||||
|
||||
def get_node_inputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
|
||||
@@ -165,9 +179,7 @@ def get_node_outputs(invocation: BaseInvocation, context: CliContext) -> dict[st
|
||||
return {e.alias: get_node_output_field(graph, e.alias, invocation.id) for e in graph.exposed_outputs}
|
||||
|
||||
|
||||
def generate_matching_edges(
|
||||
a: BaseInvocation, b: BaseInvocation, context: CliContext
|
||||
) -> list[Edge]:
|
||||
def generate_matching_edges(a: BaseInvocation, b: BaseInvocation, context: CliContext) -> list[Edge]:
|
||||
"""Generates all possible edges between two invocations"""
|
||||
afields = get_node_outputs(a, context)
|
||||
bfields = get_node_inputs(b, context)
|
||||
@@ -179,12 +191,14 @@ def generate_matching_edges(
|
||||
matching_fields = matching_fields.difference(invalid_fields)
|
||||
|
||||
# Validate types
|
||||
matching_fields = [f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type)]
|
||||
matching_fields = [
|
||||
f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type)
|
||||
]
|
||||
|
||||
edges = [
|
||||
Edge(
|
||||
source=EdgeConnection(node_id=afields[alias].node_path, field=afields[alias].field),
|
||||
destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field)
|
||||
destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field),
|
||||
)
|
||||
for alias in matching_fields
|
||||
]
|
||||
@@ -193,6 +207,7 @@ def generate_matching_edges(
|
||||
|
||||
class SessionError(Exception):
|
||||
"""Raised when a session error has occurred"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@@ -209,22 +224,23 @@ def invoke_all(context: CliContext):
|
||||
context.invoker.services.logger.error(
|
||||
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
|
||||
)
|
||||
|
||||
|
||||
raise SessionError()
|
||||
|
||||
|
||||
def invoke_cli():
|
||||
logger.info(f'InvokeAI version {__version__}')
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
# get the optional list of invocations to execute on the command line
|
||||
parser = config.get_parser()
|
||||
parser.add_argument('commands',nargs='*')
|
||||
parser.add_argument("commands", nargs="*")
|
||||
invocation_commands = parser.parse_args().commands
|
||||
|
||||
# get the optional file to read commands from.
|
||||
# Simplest is to use it for STDIN
|
||||
if infile := config.from_file:
|
||||
sys.stdin = open(infile,"r")
|
||||
|
||||
model_manager = ModelManagerService(config,logger)
|
||||
sys.stdin = open(infile, "r")
|
||||
|
||||
model_manager = ModelManagerService(config, logger)
|
||||
|
||||
events = EventServiceBase()
|
||||
output_folder = config.output_path
|
||||
@@ -234,13 +250,13 @@ def invoke_cli():
|
||||
db_location = ":memory:"
|
||||
else:
|
||||
db_location = config.db_path
|
||||
db_location.parent.mkdir(parents=True,exist_ok=True)
|
||||
db_location.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info(f'InvokeAI database location is "{db_location}"')
|
||||
|
||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](
|
||||
filename=db_location, table_name="graph_executions"
|
||||
)
|
||||
filename=db_location, table_name="graph_executions"
|
||||
)
|
||||
|
||||
urls = LocalUrlService()
|
||||
image_record_storage = SqliteImageRecordStorage(db_location)
|
||||
@@ -281,24 +297,22 @@ def invoke_cli():
|
||||
graph_execution_manager=graph_execution_manager,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
services = InvocationServices(
|
||||
model_manager=model_manager,
|
||||
events=events,
|
||||
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')),
|
||||
latents=ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents")),
|
||||
images=images,
|
||||
boards=boards,
|
||||
board_images=board_images,
|
||||
queue=MemoryInvocationQueue(),
|
||||
graph_library=SqliteItemStorage[LibraryGraph](
|
||||
filename=db_location, table_name="graphs"
|
||||
),
|
||||
graph_library=SqliteItemStorage[LibraryGraph](filename=db_location, table_name="graphs"),
|
||||
graph_execution_manager=graph_execution_manager,
|
||||
processor=DefaultInvocationProcessor(),
|
||||
performance_statistics=InvocationStatsService(graph_execution_manager),
|
||||
logger=logger,
|
||||
configuration=config,
|
||||
)
|
||||
|
||||
|
||||
system_graphs = create_system_graphs(services.graph_library)
|
||||
system_graph_names = set([g.name for g in system_graphs])
|
||||
@@ -308,7 +322,7 @@ def invoke_cli():
|
||||
session: GraphExecutionState = invoker.create_execution_state()
|
||||
parser = get_command_parser(services)
|
||||
|
||||
re_negid = re.compile('^-[0-9]+$')
|
||||
re_negid = re.compile("^-[0-9]+$")
|
||||
|
||||
# Uncomment to print out previous sessions at startup
|
||||
# print(services.session_manager.list())
|
||||
@@ -318,7 +332,7 @@ def invoke_cli():
|
||||
|
||||
command_line_args_exist = len(invocation_commands) > 0
|
||||
done = False
|
||||
|
||||
|
||||
while not done:
|
||||
try:
|
||||
if command_line_args_exist:
|
||||
@@ -332,7 +346,7 @@ def invoke_cli():
|
||||
|
||||
try:
|
||||
# Refresh the state of the session
|
||||
#history = list(get_graph_execution_history(context.session))
|
||||
# history = list(get_graph_execution_history(context.session))
|
||||
history = list(reversed(context.nodes_added))
|
||||
|
||||
# Split the command for piping
|
||||
@@ -353,17 +367,17 @@ def invoke_cli():
|
||||
args[field_name] = field_default
|
||||
|
||||
# Parse invocation
|
||||
command: CliCommand = None # type:ignore
|
||||
command: CliCommand = None # type:ignore
|
||||
system_graph: Optional[LibraryGraph] = None
|
||||
if args['type'] in system_graph_names:
|
||||
system_graph = next(filter(lambda g: g.name == args['type'], system_graphs))
|
||||
if args["type"] in system_graph_names:
|
||||
system_graph = next(filter(lambda g: g.name == args["type"], system_graphs))
|
||||
invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id))
|
||||
for exposed_input in system_graph.exposed_inputs:
|
||||
if exposed_input.alias in args:
|
||||
node = invocation.graph.get_node(exposed_input.node_path)
|
||||
field = exposed_input.field
|
||||
setattr(node, field, args[exposed_input.alias])
|
||||
command = CliCommand(command = invocation)
|
||||
command = CliCommand(command=invocation)
|
||||
context.graph_nodes[invocation.id] = system_graph.id
|
||||
else:
|
||||
args["id"] = current_id
|
||||
@@ -385,17 +399,13 @@ def invoke_cli():
|
||||
# Pipe previous command output (if there was a previous command)
|
||||
edges: list[Edge] = list()
|
||||
if len(history) > 0 or current_id != start_id:
|
||||
from_id = (
|
||||
history[0] if current_id == start_id else str(current_id - 1)
|
||||
)
|
||||
from_id = history[0] if current_id == start_id else str(current_id - 1)
|
||||
from_node = (
|
||||
next(filter(lambda n: n[0].id == from_id, new_invocations))[0]
|
||||
if current_id != start_id
|
||||
else context.session.graph.get_node(from_id)
|
||||
)
|
||||
matching_edges = generate_matching_edges(
|
||||
from_node, command.command, context
|
||||
)
|
||||
matching_edges = generate_matching_edges(from_node, command.command, context)
|
||||
edges.extend(matching_edges)
|
||||
|
||||
# Parse provided links
|
||||
@@ -406,16 +416,18 @@ def invoke_cli():
|
||||
node_id = str(current_id + int(node_id))
|
||||
|
||||
link_node = context.session.graph.get_node(node_id)
|
||||
matching_edges = generate_matching_edges(
|
||||
link_node, command.command, context
|
||||
)
|
||||
matching_edges = generate_matching_edges(link_node, command.command, context)
|
||||
matching_destinations = [e.destination for e in matching_edges]
|
||||
edges = [e for e in edges if e.destination not in matching_destinations]
|
||||
edges.extend(matching_edges)
|
||||
|
||||
if "link" in args and args["link"]:
|
||||
for link in args["link"]:
|
||||
edges = [e for e in edges if e.destination.node_id != command.command.id or e.destination.field != link[2]]
|
||||
edges = [
|
||||
e
|
||||
for e in edges
|
||||
if e.destination.node_id != command.command.id or e.destination.field != link[2]
|
||||
]
|
||||
|
||||
node_id = link[0]
|
||||
if re_negid.match(node_id):
|
||||
@@ -428,7 +440,7 @@ def invoke_cli():
|
||||
edges.append(
|
||||
Edge(
|
||||
source=EdgeConnection(node_id=node_output.node_path, field=node_output.field),
|
||||
destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field)
|
||||
destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -467,4 +479,7 @@ def invoke_cli():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
invoke_cli()
|
||||
if config.version:
|
||||
print(f"InvokeAI version {__version__}")
|
||||
else:
|
||||
invoke_cli()
|
||||
|
||||
@@ -4,9 +4,5 @@ __all__ = []
|
||||
|
||||
dirname = os.path.dirname(os.path.abspath(__file__))
|
||||
for f in os.listdir(dirname):
|
||||
if (
|
||||
f != "__init__.py"
|
||||
and os.path.isfile("%s/%s" % (dirname, f))
|
||||
and f[-3:] == ".py"
|
||||
):
|
||||
if f != "__init__.py" and os.path.isfile("%s/%s" % (dirname, f)) and f[-3:] == ".py":
|
||||
__all__.append(f[:-3])
|
||||
|
||||
@@ -2,17 +2,415 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from inspect import signature
|
||||
from typing import (TYPE_CHECKING, Dict, List, Literal, TypedDict, get_args,
|
||||
get_type_hints)
|
||||
import re
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Any,
|
||||
Callable,
|
||||
ClassVar,
|
||||
Literal,
|
||||
Mapping,
|
||||
Optional,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
get_args,
|
||||
get_type_hints,
|
||||
)
|
||||
|
||||
from pydantic import BaseConfig, BaseModel, Field
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from pydantic.fields import Undefined, ModelField
|
||||
from pydantic.typing import NoArgAnyCallable
|
||||
import semver
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..services.invocation_services import InvocationServices
|
||||
|
||||
|
||||
class InvalidVersionError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class FieldDescriptions:
|
||||
denoising_start = "When to start denoising, expressed a percentage of total steps"
|
||||
denoising_end = "When to stop denoising, expressed a percentage of total steps"
|
||||
cfg_scale = "Classifier-Free Guidance scale"
|
||||
scheduler = "Scheduler to use during inference"
|
||||
positive_cond = "Positive conditioning tensor"
|
||||
negative_cond = "Negative conditioning tensor"
|
||||
noise = "Noise tensor"
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
vae_model = "VAE model to load"
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
lora_weight = "The weight at which the LoRA is applied to each model"
|
||||
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
|
||||
raw_prompt = "Raw prompt text (no parsing)"
|
||||
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
|
||||
skipped_layers = "Number of layers to skip in text encoder"
|
||||
seed = "Seed for random number generation"
|
||||
steps = "Number of steps to run"
|
||||
width = "Width of output (px)"
|
||||
height = "Height of output (px)"
|
||||
control = "ControlNet(s) to apply"
|
||||
denoised_latents = "Denoised latents tensor"
|
||||
latents = "Latents tensor"
|
||||
strength = "Strength of denoising (proportional to steps)"
|
||||
core_metadata = "Optional core metadata to be written to image"
|
||||
interp_mode = "Interpolation mode"
|
||||
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
|
||||
fp32 = "Whether or not to use full float32 precision"
|
||||
precision = "Precision to use"
|
||||
tiled = "Processing using overlapping tiles (reduce memory consumption)"
|
||||
detect_res = "Pixel resolution for detection"
|
||||
image_res = "Pixel resolution for output image"
|
||||
safe_mode = "Whether or not to use safe mode"
|
||||
scribble_mode = "Whether or not to use scribble mode"
|
||||
scale_factor = "The factor by which to scale"
|
||||
blend_alpha = (
|
||||
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
|
||||
)
|
||||
num_1 = "The first number"
|
||||
num_2 = "The second number"
|
||||
mask = "The mask to use for the operation"
|
||||
|
||||
|
||||
class Input(str, Enum):
|
||||
"""
|
||||
The type of input a field accepts.
|
||||
- `Input.Direct`: The field must have its value provided directly, when the invocation and field \
|
||||
are instantiated.
|
||||
- `Input.Connection`: The field must have its value provided by a connection.
|
||||
- `Input.Any`: The field may have its value provided either directly or by a connection.
|
||||
"""
|
||||
|
||||
Connection = "connection"
|
||||
Direct = "direct"
|
||||
Any = "any"
|
||||
|
||||
|
||||
class UIType(str, Enum):
|
||||
"""
|
||||
Type hints for the UI.
|
||||
If a field should be provided a data type that does not exactly match the python type of the field, \
|
||||
use this to provide the type that should be used instead. See the node development docs for detail \
|
||||
on adding a new field type, which involves client-side changes.
|
||||
"""
|
||||
|
||||
# region Primitives
|
||||
Boolean = "boolean"
|
||||
Color = "ColorField"
|
||||
Conditioning = "ConditioningField"
|
||||
Control = "ControlField"
|
||||
Float = "float"
|
||||
Image = "ImageField"
|
||||
Integer = "integer"
|
||||
Latents = "LatentsField"
|
||||
String = "string"
|
||||
# endregion
|
||||
|
||||
# region Collection Primitives
|
||||
BooleanCollection = "BooleanCollection"
|
||||
ColorCollection = "ColorCollection"
|
||||
ConditioningCollection = "ConditioningCollection"
|
||||
ControlCollection = "ControlCollection"
|
||||
FloatCollection = "FloatCollection"
|
||||
ImageCollection = "ImageCollection"
|
||||
IntegerCollection = "IntegerCollection"
|
||||
LatentsCollection = "LatentsCollection"
|
||||
StringCollection = "StringCollection"
|
||||
# endregion
|
||||
|
||||
# region Polymorphic Primitives
|
||||
BooleanPolymorphic = "BooleanPolymorphic"
|
||||
ColorPolymorphic = "ColorPolymorphic"
|
||||
ConditioningPolymorphic = "ConditioningPolymorphic"
|
||||
ControlPolymorphic = "ControlPolymorphic"
|
||||
FloatPolymorphic = "FloatPolymorphic"
|
||||
ImagePolymorphic = "ImagePolymorphic"
|
||||
IntegerPolymorphic = "IntegerPolymorphic"
|
||||
LatentsPolymorphic = "LatentsPolymorphic"
|
||||
StringPolymorphic = "StringPolymorphic"
|
||||
# endregion
|
||||
|
||||
# region Models
|
||||
MainModel = "MainModelField"
|
||||
SDXLMainModel = "SDXLMainModelField"
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
VaeModel = "VaeModelField"
|
||||
LoRAModel = "LoRAModelField"
|
||||
ControlNetModel = "ControlNetModelField"
|
||||
UNet = "UNetField"
|
||||
Vae = "VaeField"
|
||||
CLIP = "ClipField"
|
||||
# endregion
|
||||
|
||||
# region Iterate/Collect
|
||||
Collection = "Collection"
|
||||
CollectionItem = "CollectionItem"
|
||||
# endregion
|
||||
|
||||
# region Misc
|
||||
Enum = "enum"
|
||||
Scheduler = "Scheduler"
|
||||
WorkflowField = "WorkflowField"
|
||||
IsIntermediate = "IsIntermediate"
|
||||
MetadataField = "MetadataField"
|
||||
# endregion
|
||||
|
||||
|
||||
class UIComponent(str, Enum):
|
||||
"""
|
||||
The type of UI component to use for a field, used to override the default components, which are \
|
||||
inferred from the field type.
|
||||
"""
|
||||
|
||||
None_ = "none"
|
||||
Textarea = "textarea"
|
||||
Slider = "slider"
|
||||
|
||||
|
||||
class _InputField(BaseModel):
|
||||
"""
|
||||
*DO NOT USE*
|
||||
This helper class is used to tell the client about our custom field attributes via OpenAPI
|
||||
schema generation, and Typescript type generation from that schema. It serves no functional
|
||||
purpose in the backend.
|
||||
"""
|
||||
|
||||
input: Input
|
||||
ui_hidden: bool
|
||||
ui_type: Optional[UIType]
|
||||
ui_component: Optional[UIComponent]
|
||||
ui_order: Optional[int]
|
||||
item_default: Optional[Any]
|
||||
|
||||
|
||||
class _OutputField(BaseModel):
|
||||
"""
|
||||
*DO NOT USE*
|
||||
This helper class is used to tell the client about our custom field attributes via OpenAPI
|
||||
schema generation, and Typescript type generation from that schema. It serves no functional
|
||||
purpose in the backend.
|
||||
"""
|
||||
|
||||
ui_hidden: bool
|
||||
ui_type: Optional[UIType]
|
||||
ui_order: Optional[int]
|
||||
|
||||
|
||||
def InputField(
|
||||
*args: Any,
|
||||
default: Any = Undefined,
|
||||
default_factory: Optional[NoArgAnyCallable] = None,
|
||||
alias: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None,
|
||||
include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None,
|
||||
const: Optional[bool] = None,
|
||||
gt: Optional[float] = None,
|
||||
ge: Optional[float] = None,
|
||||
lt: Optional[float] = None,
|
||||
le: Optional[float] = None,
|
||||
multiple_of: Optional[float] = None,
|
||||
allow_inf_nan: Optional[bool] = None,
|
||||
max_digits: Optional[int] = None,
|
||||
decimal_places: Optional[int] = None,
|
||||
min_items: Optional[int] = None,
|
||||
max_items: Optional[int] = None,
|
||||
unique_items: Optional[bool] = None,
|
||||
min_length: Optional[int] = None,
|
||||
max_length: Optional[int] = None,
|
||||
allow_mutation: bool = True,
|
||||
regex: Optional[str] = None,
|
||||
discriminator: Optional[str] = None,
|
||||
repr: bool = True,
|
||||
input: Input = Input.Any,
|
||||
ui_type: Optional[UIType] = None,
|
||||
ui_component: Optional[UIComponent] = None,
|
||||
ui_hidden: bool = False,
|
||||
ui_order: Optional[int] = None,
|
||||
item_default: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""
|
||||
Creates an input field for an invocation.
|
||||
|
||||
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \
|
||||
that adds a few extra parameters to support graph execution and the node editor UI.
|
||||
|
||||
:param Input input: [Input.Any] The kind of input this field requires. \
|
||||
`Input.Direct` means a value must be provided on instantiation. \
|
||||
`Input.Connection` means the value must be provided by a connection. \
|
||||
`Input.Any` means either will do.
|
||||
|
||||
:param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \
|
||||
In some situations, the field's type is not enough to infer the correct UI type. \
|
||||
For example, model selection fields should render a dropdown UI component to select a model. \
|
||||
Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \
|
||||
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
|
||||
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
|
||||
|
||||
:param UIComponent ui_component: [None] Optionally specifies a specific component to use in the UI. \
|
||||
The UI will always render a suitable component, but sometimes you want something different than the default. \
|
||||
For example, a `string` field will default to a single-line input, but you may want a multi-line textarea instead. \
|
||||
For this case, you could provide `UIComponent.Textarea`.
|
||||
|
||||
: param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI.
|
||||
|
||||
: param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
|
||||
|
||||
: param bool item_default: [None] Specifies the default item value, if this is a collection input. \
|
||||
Ignored for non-collection fields..
|
||||
"""
|
||||
return Field(
|
||||
*args,
|
||||
default=default,
|
||||
default_factory=default_factory,
|
||||
alias=alias,
|
||||
title=title,
|
||||
description=description,
|
||||
exclude=exclude,
|
||||
include=include,
|
||||
const=const,
|
||||
gt=gt,
|
||||
ge=ge,
|
||||
lt=lt,
|
||||
le=le,
|
||||
multiple_of=multiple_of,
|
||||
allow_inf_nan=allow_inf_nan,
|
||||
max_digits=max_digits,
|
||||
decimal_places=decimal_places,
|
||||
min_items=min_items,
|
||||
max_items=max_items,
|
||||
unique_items=unique_items,
|
||||
min_length=min_length,
|
||||
max_length=max_length,
|
||||
allow_mutation=allow_mutation,
|
||||
regex=regex,
|
||||
discriminator=discriminator,
|
||||
repr=repr,
|
||||
input=input,
|
||||
ui_type=ui_type,
|
||||
ui_component=ui_component,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
item_default=item_default,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def OutputField(
|
||||
*args: Any,
|
||||
default: Any = Undefined,
|
||||
default_factory: Optional[NoArgAnyCallable] = None,
|
||||
alias: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
exclude: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None,
|
||||
include: Optional[Union[AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any]] = None,
|
||||
const: Optional[bool] = None,
|
||||
gt: Optional[float] = None,
|
||||
ge: Optional[float] = None,
|
||||
lt: Optional[float] = None,
|
||||
le: Optional[float] = None,
|
||||
multiple_of: Optional[float] = None,
|
||||
allow_inf_nan: Optional[bool] = None,
|
||||
max_digits: Optional[int] = None,
|
||||
decimal_places: Optional[int] = None,
|
||||
min_items: Optional[int] = None,
|
||||
max_items: Optional[int] = None,
|
||||
unique_items: Optional[bool] = None,
|
||||
min_length: Optional[int] = None,
|
||||
max_length: Optional[int] = None,
|
||||
allow_mutation: bool = True,
|
||||
regex: Optional[str] = None,
|
||||
discriminator: Optional[str] = None,
|
||||
repr: bool = True,
|
||||
ui_type: Optional[UIType] = None,
|
||||
ui_hidden: bool = False,
|
||||
ui_order: Optional[int] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""
|
||||
Creates an output field for an invocation output.
|
||||
|
||||
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \
|
||||
that adds a few extra parameters to support graph execution and the node editor UI.
|
||||
|
||||
:param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \
|
||||
In some situations, the field's type is not enough to infer the correct UI type. \
|
||||
For example, model selection fields should render a dropdown UI component to select a model. \
|
||||
Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \
|
||||
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
|
||||
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
|
||||
|
||||
: param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \
|
||||
|
||||
: param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
|
||||
"""
|
||||
return Field(
|
||||
*args,
|
||||
default=default,
|
||||
default_factory=default_factory,
|
||||
alias=alias,
|
||||
title=title,
|
||||
description=description,
|
||||
exclude=exclude,
|
||||
include=include,
|
||||
const=const,
|
||||
gt=gt,
|
||||
ge=ge,
|
||||
lt=lt,
|
||||
le=le,
|
||||
multiple_of=multiple_of,
|
||||
allow_inf_nan=allow_inf_nan,
|
||||
max_digits=max_digits,
|
||||
decimal_places=decimal_places,
|
||||
min_items=min_items,
|
||||
max_items=max_items,
|
||||
unique_items=unique_items,
|
||||
min_length=min_length,
|
||||
max_length=max_length,
|
||||
allow_mutation=allow_mutation,
|
||||
regex=regex,
|
||||
discriminator=discriminator,
|
||||
repr=repr,
|
||||
ui_type=ui_type,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
"""
|
||||
Provides additional node configuration to the UI.
|
||||
This is used internally by the @invocation decorator logic. Do not use this directly.
|
||||
"""
|
||||
|
||||
tags: Optional[list[str]] = Field(default_factory=None, description="The node's tags")
|
||||
title: Optional[str] = Field(default=None, description="The node's display name")
|
||||
category: Optional[str] = Field(default=None, description="The node's category")
|
||||
version: Optional[str] = Field(
|
||||
default=None, description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".'
|
||||
)
|
||||
|
||||
|
||||
class InvocationContext:
|
||||
services: InvocationServices
|
||||
graph_execution_state_id: str
|
||||
@@ -23,10 +421,11 @@ class InvocationContext:
|
||||
|
||||
|
||||
class BaseInvocationOutput(BaseModel):
|
||||
"""Base class for all invocation outputs"""
|
||||
"""
|
||||
Base class for all invocation outputs.
|
||||
|
||||
# All outputs must include a type name like this:
|
||||
# type: Literal['your_output_name']
|
||||
All invocation outputs must use the `@invocation_output` decorator to provide their unique type.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_all_subclasses_tuple(cls):
|
||||
@@ -39,14 +438,35 @@ class BaseInvocationOutput(BaseModel):
|
||||
toprocess.extend(next_subclasses)
|
||||
return tuple(subclasses)
|
||||
|
||||
class Config:
|
||||
@staticmethod
|
||||
def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = list()
|
||||
schema["required"].extend(["type"])
|
||||
|
||||
|
||||
class RequiredConnectionException(Exception):
|
||||
"""Raised when an field which requires a connection did not receive a value."""
|
||||
|
||||
def __init__(self, node_id: str, field_name: str):
|
||||
super().__init__(f"Node {node_id} missing connections for field {field_name}")
|
||||
|
||||
|
||||
class MissingInputException(Exception):
|
||||
"""Raised when an field which requires some input, but did not receive a value."""
|
||||
|
||||
def __init__(self, node_id: str, field_name: str):
|
||||
super().__init__(f"Node {node_id} missing value or connection for field {field_name}")
|
||||
|
||||
|
||||
class BaseInvocation(ABC, BaseModel):
|
||||
"""A node to process inputs and produce outputs.
|
||||
May use dependency injection in __init__ to receive providers.
|
||||
"""
|
||||
A node to process inputs and produce outputs.
|
||||
May use dependency injection in __init__ to receive providers.
|
||||
|
||||
# All invocations must include a type name like this:
|
||||
# type: Literal['your_output_name']
|
||||
All invocations must use the `@invocation` decorator to provide their unique type.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_all_subclasses(cls):
|
||||
@@ -77,70 +497,175 @@ class BaseInvocation(ABC, BaseModel):
|
||||
def get_output_type(cls):
|
||||
return signature(cls.invoke).return_annotation
|
||||
|
||||
class Config:
|
||||
@staticmethod
|
||||
def schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
|
||||
uiconfig = getattr(model_class, "UIConfig", None)
|
||||
if uiconfig and hasattr(uiconfig, "title"):
|
||||
schema["title"] = uiconfig.title
|
||||
if uiconfig and hasattr(uiconfig, "tags"):
|
||||
schema["tags"] = uiconfig.tags
|
||||
if uiconfig and hasattr(uiconfig, "category"):
|
||||
schema["category"] = uiconfig.category
|
||||
if uiconfig and hasattr(uiconfig, "version"):
|
||||
schema["version"] = uiconfig.version
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = list()
|
||||
schema["required"].extend(["type", "id"])
|
||||
|
||||
@abstractmethod
|
||||
def invoke(self, context: InvocationContext) -> BaseInvocationOutput:
|
||||
"""Invoke with provided context and return outputs."""
|
||||
pass
|
||||
|
||||
# fmt: off
|
||||
id: str = Field(description="The id of this node. Must be unique among all nodes.")
|
||||
is_intermediate: bool = Field(default=False, description="Whether or not this node is an intermediate node.")
|
||||
# fmt: on
|
||||
def __init__(self, **data):
|
||||
# nodes may have required fields, that can accept input from connections
|
||||
# on instantiation of the model, we need to exclude these from validation
|
||||
restore = dict()
|
||||
try:
|
||||
field_names = list(self.__fields__.keys())
|
||||
for field_name in field_names:
|
||||
# if the field is required and may get its value from a connection, exclude it from validation
|
||||
field = self.__fields__[field_name]
|
||||
_input = field.field_info.extra.get("input", None)
|
||||
if _input in [Input.Connection, Input.Any] and field.required:
|
||||
if field_name not in data:
|
||||
restore[field_name] = self.__fields__.pop(field_name)
|
||||
# instantiate the node, which will validate the data
|
||||
super().__init__(**data)
|
||||
finally:
|
||||
# restore the removed fields
|
||||
for field_name, field in restore.items():
|
||||
self.__fields__[field_name] = field
|
||||
|
||||
def invoke_internal(self, context: InvocationContext) -> BaseInvocationOutput:
|
||||
for field_name, field in self.__fields__.items():
|
||||
_input = field.field_info.extra.get("input", None)
|
||||
if field.required and not hasattr(self, field_name):
|
||||
if _input == Input.Connection:
|
||||
raise RequiredConnectionException(self.__fields__["type"].default, field_name)
|
||||
elif _input == Input.Any:
|
||||
raise MissingInputException(self.__fields__["type"].default, field_name)
|
||||
return self.invoke(context)
|
||||
|
||||
id: str = Field(
|
||||
description="The id of this instance of an invocation. Must be unique among all instances of invocations."
|
||||
)
|
||||
is_intermediate: bool = InputField(
|
||||
default=False, description="Whether or not this is an intermediate invocation.", ui_type=UIType.IsIntermediate
|
||||
)
|
||||
workflow: Optional[str] = InputField(
|
||||
default=None,
|
||||
description="The workflow to save with the image",
|
||||
ui_type=UIType.WorkflowField,
|
||||
)
|
||||
|
||||
@validator("workflow", pre=True)
|
||||
def validate_workflow_is_json(cls, v):
|
||||
if v is None:
|
||||
return None
|
||||
try:
|
||||
json.loads(v)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("Workflow must be valid JSON")
|
||||
return v
|
||||
|
||||
UIConfig: ClassVar[Type[UIConfigBase]]
|
||||
|
||||
|
||||
# TODO: figure out a better way to provide these hints
|
||||
# TODO: when we can upgrade to python 3.11, we can use the`NotRequired` type instead of `total=False`
|
||||
class UIConfig(TypedDict, total=False):
|
||||
type_hints: Dict[
|
||||
str,
|
||||
Literal[
|
||||
"integer",
|
||||
"float",
|
||||
"boolean",
|
||||
"string",
|
||||
"enum",
|
||||
"image",
|
||||
"latents",
|
||||
"model",
|
||||
"control",
|
||||
"image_collection",
|
||||
"vae_model",
|
||||
"lora_model",
|
||||
],
|
||||
]
|
||||
tags: List[str]
|
||||
title: str
|
||||
GenericBaseInvocation = TypeVar("GenericBaseInvocation", bound=BaseInvocation)
|
||||
|
||||
|
||||
class CustomisedSchemaExtra(TypedDict):
|
||||
ui: UIConfig
|
||||
def invocation(
|
||||
invocation_type: str,
|
||||
title: Optional[str] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
category: Optional[str] = None,
|
||||
version: Optional[str] = None,
|
||||
) -> Callable[[Type[GenericBaseInvocation]], Type[GenericBaseInvocation]]:
|
||||
"""
|
||||
Adds metadata to an invocation.
|
||||
|
||||
|
||||
class InvocationConfig(BaseConfig):
|
||||
"""Customizes pydantic's BaseModel.Config class for use by Invocations.
|
||||
|
||||
Provide `schema_extra` a `ui` dict to add hints for generated UIs.
|
||||
|
||||
`tags`
|
||||
- A list of strings, used to categorise invocations.
|
||||
|
||||
`type_hints`
|
||||
- A dict of field types which override the types in the invocation definition.
|
||||
- Each key should be the name of one of the invocation's fields.
|
||||
- Each value should be one of the valid types:
|
||||
- `integer`, `float`, `boolean`, `string`, `enum`, `image`, `latents`, `model`
|
||||
|
||||
```python
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["stable-diffusion", "image"],
|
||||
"type_hints": {
|
||||
"initial_image": "image",
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
:param str invocation_type: The type of the invocation. Must be unique among all invocations.
|
||||
:param Optional[str] title: Adds a title to the invocation. Use if the auto-generated title isn't quite right. Defaults to None.
|
||||
:param Optional[list[str]] tags: Adds tags to the invocation. Invocations may be searched for by their tags. Defaults to None.
|
||||
:param Optional[str] category: Adds a category to the invocation. Used to group the invocations in the UI. Defaults to None.
|
||||
"""
|
||||
|
||||
schema_extra: CustomisedSchemaExtra
|
||||
def wrapper(cls: Type[GenericBaseInvocation]) -> Type[GenericBaseInvocation]:
|
||||
# Validate invocation types on creation of invocation classes
|
||||
# TODO: ensure unique?
|
||||
if re.compile(r"^\S+$").match(invocation_type) is None:
|
||||
raise ValueError(f'"invocation_type" must consist of non-whitespace characters, got "{invocation_type}"')
|
||||
|
||||
# Add OpenAPI schema extras
|
||||
uiconf_name = cls.__qualname__ + ".UIConfig"
|
||||
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name:
|
||||
cls.UIConfig = type(uiconf_name, (UIConfigBase,), dict())
|
||||
if title is not None:
|
||||
cls.UIConfig.title = title
|
||||
if tags is not None:
|
||||
cls.UIConfig.tags = tags
|
||||
if category is not None:
|
||||
cls.UIConfig.category = category
|
||||
if version is not None:
|
||||
try:
|
||||
semver.Version.parse(version)
|
||||
except ValueError as e:
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
cls.UIConfig.version = version
|
||||
|
||||
# Add the invocation type to the pydantic model of the invocation
|
||||
invocation_type_annotation = Literal[invocation_type] # type: ignore
|
||||
invocation_type_field = ModelField.infer(
|
||||
name="type",
|
||||
value=invocation_type,
|
||||
annotation=invocation_type_annotation,
|
||||
class_validators=None,
|
||||
config=cls.__config__,
|
||||
)
|
||||
cls.__fields__.update({"type": invocation_type_field})
|
||||
# to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html
|
||||
if annotations := cls.__dict__.get("__annotations__", None):
|
||||
annotations.update({"type": invocation_type_annotation})
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
GenericBaseInvocationOutput = TypeVar("GenericBaseInvocationOutput", bound=BaseInvocationOutput)
|
||||
|
||||
|
||||
def invocation_output(
|
||||
output_type: str,
|
||||
) -> Callable[[Type[GenericBaseInvocationOutput]], Type[GenericBaseInvocationOutput]]:
|
||||
"""
|
||||
Adds metadata to an invocation output.
|
||||
|
||||
:param str output_type: The type of the invocation output. Must be unique among all invocation outputs.
|
||||
"""
|
||||
|
||||
def wrapper(cls: Type[GenericBaseInvocationOutput]) -> Type[GenericBaseInvocationOutput]:
|
||||
# Validate output types on creation of invocation output classes
|
||||
# TODO: ensure unique?
|
||||
if re.compile(r"^\S+$").match(output_type) is None:
|
||||
raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"')
|
||||
|
||||
# Add the output type to the pydantic model of the invocation output
|
||||
output_type_annotation = Literal[output_type] # type: ignore
|
||||
output_type_field = ModelField.infer(
|
||||
name="type",
|
||||
value=output_type,
|
||||
annotation=output_type_annotation,
|
||||
class_validators=None,
|
||||
config=cls.__config__,
|
||||
)
|
||||
cls.__fields__.update({"type": output_type_field})
|
||||
|
||||
# to support 3.9, 3.10 and 3.11, as described in https://docs.python.org/3/howto/annotations.html
|
||||
if annotations := cls.__dict__.get("__annotations__", None):
|
||||
annotations.update({"type": output_type_annotation})
|
||||
|
||||
return cls
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -1,66 +1,24 @@
|
||||
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from pydantic import Field, validator
|
||||
from pydantic import validator
|
||||
|
||||
from invokeai.app.models.image import ImageField
|
||||
from invokeai.app.invocations.primitives import IntegerCollectionOutput
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
|
||||
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
||||
InvocationConfig, InvocationContext, UIConfig)
|
||||
|
||||
|
||||
class IntCollectionOutput(BaseInvocationOutput):
|
||||
"""A collection of integers"""
|
||||
|
||||
type: Literal["int_collection"] = "int_collection"
|
||||
|
||||
# Outputs
|
||||
collection: list[int] = Field(default=[], description="The int collection")
|
||||
|
||||
|
||||
class FloatCollectionOutput(BaseInvocationOutput):
|
||||
"""A collection of floats"""
|
||||
|
||||
type: Literal["float_collection"] = "float_collection"
|
||||
|
||||
# Outputs
|
||||
collection: list[float] = Field(
|
||||
default=[], description="The float collection")
|
||||
|
||||
|
||||
class ImageCollectionOutput(BaseInvocationOutput):
|
||||
"""A collection of images"""
|
||||
|
||||
type: Literal["image_collection"] = "image_collection"
|
||||
|
||||
# Outputs
|
||||
collection: list[ImageField] = Field(
|
||||
default=[], description="The output images")
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["type", "collection"]}
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
||||
|
||||
|
||||
@invocation(
|
||||
"range", title="Integer Range", tags=["collection", "integer", "range"], category="collections", version="1.0.0"
|
||||
)
|
||||
class RangeInvocation(BaseInvocation):
|
||||
"""Creates a range of numbers from start to stop with step"""
|
||||
|
||||
type: Literal["range"] = "range"
|
||||
|
||||
# Inputs
|
||||
start: int = Field(default=0, description="The start of the range")
|
||||
stop: int = Field(default=10, description="The stop of the range")
|
||||
step: int = Field(default=1, description="The step of the range")
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Range",
|
||||
"tags": ["range", "integer", "collection"]
|
||||
},
|
||||
}
|
||||
start: int = InputField(default=0, description="The start of the range")
|
||||
stop: int = InputField(default=10, description="The stop of the range")
|
||||
step: int = InputField(default=1, description="The step of the range")
|
||||
|
||||
@validator("stop")
|
||||
def stop_gt_start(cls, v, values):
|
||||
@@ -68,94 +26,48 @@ class RangeInvocation(BaseInvocation):
|
||||
raise ValueError("stop must be greater than start")
|
||||
return v
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||
return IntCollectionOutput(
|
||||
collection=list(range(self.start, self.stop, self.step))
|
||||
)
|
||||
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
|
||||
return IntegerCollectionOutput(collection=list(range(self.start, self.stop, self.step)))
|
||||
|
||||
|
||||
@invocation(
|
||||
"range_of_size",
|
||||
title="Integer Range of Size",
|
||||
tags=["collection", "integer", "size", "range"],
|
||||
category="collections",
|
||||
version="1.0.0",
|
||||
)
|
||||
class RangeOfSizeInvocation(BaseInvocation):
|
||||
"""Creates a range from start to start + size with step"""
|
||||
|
||||
type: Literal["range_of_size"] = "range_of_size"
|
||||
start: int = InputField(default=0, description="The start of the range")
|
||||
size: int = InputField(default=1, description="The number of values")
|
||||
step: int = InputField(default=1, description="The step of the range")
|
||||
|
||||
# Inputs
|
||||
start: int = Field(default=0, description="The start of the range")
|
||||
size: int = Field(default=1, description="The number of values")
|
||||
step: int = Field(default=1, description="The step of the range")
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Sized Range",
|
||||
"tags": ["range", "integer", "size", "collection"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||
return IntCollectionOutput(
|
||||
collection=list(
|
||||
range(
|
||||
self.start, self.start + self.size,
|
||||
self.step)))
|
||||
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
|
||||
return IntegerCollectionOutput(collection=list(range(self.start, self.start + self.size, self.step)))
|
||||
|
||||
|
||||
@invocation(
|
||||
"random_range",
|
||||
title="Random Range",
|
||||
tags=["range", "integer", "random", "collection"],
|
||||
category="collections",
|
||||
version="1.0.0",
|
||||
)
|
||||
class RandomRangeInvocation(BaseInvocation):
|
||||
"""Creates a collection of random numbers"""
|
||||
|
||||
type: Literal["random_range"] = "random_range"
|
||||
|
||||
# Inputs
|
||||
low: int = Field(default=0, description="The inclusive low value")
|
||||
high: int = Field(
|
||||
default=np.iinfo(np.int32).max, description="The exclusive high value"
|
||||
)
|
||||
size: int = Field(default=1, description="The number of values to generate")
|
||||
seed: int = Field(
|
||||
low: int = InputField(default=0, description="The inclusive low value")
|
||||
high: int = InputField(default=np.iinfo(np.int32).max, description="The exclusive high value")
|
||||
size: int = InputField(default=1, description="The number of values to generate")
|
||||
seed: int = InputField(
|
||||
ge=0,
|
||||
le=SEED_MAX,
|
||||
description="The seed for the RNG (omit for random)",
|
||||
default_factory=get_random_seed,
|
||||
)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Random Range",
|
||||
"tags": ["range", "integer", "random", "collection"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
|
||||
rng = np.random.default_rng(self.seed)
|
||||
return IntCollectionOutput(
|
||||
collection=list(
|
||||
rng.integers(
|
||||
low=self.low, high=self.high,
|
||||
size=self.size)))
|
||||
|
||||
|
||||
class ImageCollectionInvocation(BaseInvocation):
|
||||
"""Load a collection of images and provide it as output."""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["image_collection"] = "image_collection"
|
||||
|
||||
# Inputs
|
||||
images: list[ImageField] = Field(
|
||||
default=[], description="The image collection to load"
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageCollectionOutput:
|
||||
return ImageCollectionOutput(collection=self.images)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"type_hints": {
|
||||
"title": "Image Collection",
|
||||
"images": "image_collection",
|
||||
}
|
||||
},
|
||||
}
|
||||
return IntegerCollectionOutput(collection=list(rng.integers(low=self.low, high=self.high, size=self.size)))
|
||||
|
||||
@@ -1,152 +1,132 @@
|
||||
from typing import Literal, Optional, Union, List, Annotated
|
||||
from pydantic import BaseModel, Field
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union
|
||||
|
||||
import torch
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel.prompt_parser import (Blend, Conjunction,
|
||||
CrossAttentionControlSubstitute,
|
||||
FlattenedPrompt, Fragment)
|
||||
from ...backend.util.devices import torch_dtype
|
||||
from ...backend.model_management import ModelType
|
||||
from ...backend.model_management.models import ModelNotFoundException
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
|
||||
|
||||
from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import (
|
||||
BasicConditioningInfo,
|
||||
SDXLConditioningInfo,
|
||||
)
|
||||
|
||||
from ...backend.model_management.models import ModelType
|
||||
from ...backend.model_management.lora import ModelPatcher
|
||||
from ...backend.model_management.models import ModelNotFoundException
|
||||
from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent
|
||||
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
||||
InvocationConfig, InvocationContext)
|
||||
from ...backend.util.devices import torch_dtype
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
UIComponent,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from .model import ClipField
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
class ConditioningField(BaseModel):
|
||||
conditioning_name: Optional[str] = Field(
|
||||
default=None, description="The name of conditioning data")
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["conditioning_name"]}
|
||||
|
||||
@dataclass
|
||||
class BasicConditioningInfo:
|
||||
#type: Literal["basic_conditioning"] = "basic_conditioning"
|
||||
embeds: torch.Tensor
|
||||
extra_conditioning: Optional[InvokeAIDiffuserComponent.ExtraConditioningInfo]
|
||||
# weight: float
|
||||
# mode: ConditioningAlgo
|
||||
|
||||
@dataclass
|
||||
class SDXLConditioningInfo(BasicConditioningInfo):
|
||||
#type: Literal["sdxl_conditioning"] = "sdxl_conditioning"
|
||||
pooled_embeds: torch.Tensor
|
||||
add_time_ids: torch.Tensor
|
||||
|
||||
ConditioningInfoType = Annotated[
|
||||
Union[BasicConditioningInfo, SDXLConditioningInfo],
|
||||
Field(discriminator="type")
|
||||
]
|
||||
|
||||
@dataclass
|
||||
class ConditioningFieldData:
|
||||
conditionings: List[Union[BasicConditioningInfo, SDXLConditioningInfo]]
|
||||
#unconditioned: Optional[torch.Tensor]
|
||||
conditionings: List[BasicConditioningInfo]
|
||||
# unconditioned: Optional[torch.Tensor]
|
||||
|
||||
#class ConditioningAlgo(str, Enum):
|
||||
|
||||
# class ConditioningAlgo(str, Enum):
|
||||
# Compose = "compose"
|
||||
# ComposeEx = "compose_ex"
|
||||
# PerpNeg = "perp_neg"
|
||||
|
||||
class CompelOutput(BaseInvocationOutput):
|
||||
"""Compel parser output"""
|
||||
|
||||
#fmt: off
|
||||
type: Literal["compel_output"] = "compel_output"
|
||||
|
||||
conditioning: ConditioningField = Field(default=None, description="Conditioning")
|
||||
#fmt: on
|
||||
|
||||
|
||||
@invocation("compel", title="Prompt", tags=["prompt", "compel"], category="conditioning", version="1.0.0")
|
||||
class CompelInvocation(BaseInvocation):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
|
||||
type: Literal["compel"] = "compel"
|
||||
|
||||
prompt: str = Field(default="", description="Prompt")
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Prompt (Compel)",
|
||||
"tags": ["prompt", "compel"],
|
||||
"type_hints": {
|
||||
"model": "model"
|
||||
}
|
||||
},
|
||||
}
|
||||
prompt: str = InputField(
|
||||
default="",
|
||||
description=FieldDescriptions.compel_prompt,
|
||||
ui_component=UIComponent.Textarea,
|
||||
)
|
||||
clip: ClipField = InputField(
|
||||
title="CLIP",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
tokenizer_info = context.services.model_manager.get_model(
|
||||
**self.clip.tokenizer.dict(), context=context,
|
||||
**self.clip.tokenizer.dict(),
|
||||
context=context,
|
||||
)
|
||||
text_encoder_info = context.services.model_manager.get_model(
|
||||
**self.clip.text_encoder.dict(), context=context,
|
||||
**self.clip.text_encoder.dict(),
|
||||
context=context,
|
||||
)
|
||||
|
||||
def _lora_loader():
|
||||
for lora in self.clip.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}))
|
||||
lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
|
||||
#loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
# loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
ti_list = []
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", self.prompt):
|
||||
name = trigger[1:-1]
|
||||
try:
|
||||
ti_list.append(
|
||||
context.services.model_manager.get_model(
|
||||
model_name=name,
|
||||
base_model=self.clip.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
context=context,
|
||||
).context.model
|
||||
(
|
||||
name,
|
||||
context.services.model_manager.get_model(
|
||||
model_name=name,
|
||||
base_model=self.clip.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
context=context,
|
||||
).context.model,
|
||||
)
|
||||
)
|
||||
except ModelNotFoundException:
|
||||
# print(e)
|
||||
#import traceback
|
||||
#print(traceback.format_exc())
|
||||
print(f"Warn: trigger: \"{trigger}\" not found")
|
||||
|
||||
with ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()),\
|
||||
ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (tokenizer, ti_manager),\
|
||||
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers),\
|
||||
text_encoder_info as text_encoder:
|
||||
# import traceback
|
||||
# print(traceback.format_exc())
|
||||
print(f'Warn: trigger: "{trigger}" not found')
|
||||
|
||||
with ModelPatcher.apply_lora_text_encoder(
|
||||
text_encoder_info.context.model, _lora_loader()
|
||||
), ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (
|
||||
tokenizer,
|
||||
ti_manager,
|
||||
), ModelPatcher.apply_clip_skip(
|
||||
text_encoder_info.context.model, self.clip.skipped_layers
|
||||
), text_encoder_info as text_encoder:
|
||||
compel = Compel(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
textual_inversion_manager=ti_manager,
|
||||
dtype_for_device_getter=torch_dtype,
|
||||
truncate_long_prompts=True,
|
||||
truncate_long_prompts=False,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
prompt: Union[FlattenedPrompt, Blend] = conjunction.prompts[0]
|
||||
|
||||
if context.services.configuration.log_tokenization:
|
||||
log_tokenization_for_prompt_object(prompt, tokenizer)
|
||||
log_tokenization_for_conjunction(conjunction, tokenizer)
|
||||
|
||||
c, options = compel.build_conditioning_tensor_for_prompt_object(
|
||||
prompt)
|
||||
c, options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
|
||||
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(
|
||||
tokens_count_including_eos_bos=get_max_token_count(
|
||||
tokenizer, conjunction),
|
||||
cross_attention_control_args=options.get(
|
||||
"cross_attention_control", None),)
|
||||
tokens_count_including_eos_bos=get_max_token_count(tokenizer, conjunction),
|
||||
cross_attention_control_args=options.get("cross_attention_control", None),
|
||||
)
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
@@ -162,138 +142,101 @@ class CompelInvocation(BaseInvocation):
|
||||
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
|
||||
context.services.latents.save(conditioning_name, conditioning_data)
|
||||
|
||||
return CompelOutput(
|
||||
return ConditioningOutput(
|
||||
conditioning=ConditioningField(
|
||||
conditioning_name=conditioning_name,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class SDXLPromptInvocationBase:
|
||||
def run_clip_raw(self, context, clip_field, prompt, get_pooled):
|
||||
def run_clip_compel(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
clip_field: ClipField,
|
||||
prompt: str,
|
||||
get_pooled: bool,
|
||||
lora_prefix: str,
|
||||
zero_on_empty: bool,
|
||||
):
|
||||
tokenizer_info = context.services.model_manager.get_model(
|
||||
**clip_field.tokenizer.dict(),
|
||||
context=context,
|
||||
)
|
||||
text_encoder_info = context.services.model_manager.get_model(
|
||||
**clip_field.text_encoder.dict(),
|
||||
context=context,
|
||||
)
|
||||
|
||||
def _lora_loader():
|
||||
for lora in clip_field.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}))
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
|
||||
#loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
ti_list = []
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt):
|
||||
name = trigger[1:-1]
|
||||
try:
|
||||
ti_list.append(
|
||||
context.services.model_manager.get_model(
|
||||
model_name=name,
|
||||
base_model=clip_field.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
).context.model
|
||||
)
|
||||
except ModelNotFoundException:
|
||||
# print(e)
|
||||
#import traceback
|
||||
#print(traceback.format_exc())
|
||||
print(f"Warn: trigger: \"{trigger}\" not found")
|
||||
|
||||
with ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()),\
|
||||
ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (tokenizer, ti_manager),\
|
||||
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers),\
|
||||
text_encoder_info as text_encoder:
|
||||
|
||||
text_inputs = tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
prompt_embeds = text_encoder(
|
||||
text_input_ids.to(text_encoder.device),
|
||||
output_hidden_states=True,
|
||||
# return zero on empty
|
||||
if prompt == "" and zero_on_empty:
|
||||
cpu_text_encoder = text_encoder_info.context.model
|
||||
c = torch.zeros(
|
||||
(1, cpu_text_encoder.config.max_position_embeddings, cpu_text_encoder.config.hidden_size),
|
||||
dtype=text_encoder_info.context.cache.precision,
|
||||
)
|
||||
if get_pooled:
|
||||
c_pooled = prompt_embeds[0]
|
||||
c_pooled = torch.zeros(
|
||||
(1, cpu_text_encoder.config.hidden_size),
|
||||
dtype=c.dtype,
|
||||
)
|
||||
else:
|
||||
c_pooled = None
|
||||
c = prompt_embeds.hidden_states[-2]
|
||||
|
||||
del tokenizer
|
||||
del text_encoder
|
||||
del tokenizer_info
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
if c_pooled is not None:
|
||||
c_pooled = c_pooled.detach().to("cpu")
|
||||
|
||||
return c, c_pooled, None
|
||||
|
||||
def run_clip_compel(self, context, clip_field, prompt, get_pooled):
|
||||
tokenizer_info = context.services.model_manager.get_model(
|
||||
**clip_field.tokenizer.dict(),
|
||||
)
|
||||
text_encoder_info = context.services.model_manager.get_model(
|
||||
**clip_field.text_encoder.dict(),
|
||||
)
|
||||
return c, c_pooled, None
|
||||
|
||||
def _lora_loader():
|
||||
for lora in clip_field.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}))
|
||||
lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
|
||||
#loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
# loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
ti_list = []
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt):
|
||||
name = trigger[1:-1]
|
||||
try:
|
||||
ti_list.append(
|
||||
context.services.model_manager.get_model(
|
||||
model_name=name,
|
||||
base_model=clip_field.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
).context.model
|
||||
(
|
||||
name,
|
||||
context.services.model_manager.get_model(
|
||||
model_name=name,
|
||||
base_model=clip_field.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
context=context,
|
||||
).context.model,
|
||||
)
|
||||
)
|
||||
except ModelNotFoundException:
|
||||
# print(e)
|
||||
#import traceback
|
||||
#print(traceback.format_exc())
|
||||
print(f"Warn: trigger: \"{trigger}\" not found")
|
||||
|
||||
with ModelPatcher.apply_lora_text_encoder(text_encoder_info.context.model, _lora_loader()),\
|
||||
ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (tokenizer, ti_manager),\
|
||||
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers),\
|
||||
text_encoder_info as text_encoder:
|
||||
# import traceback
|
||||
# print(traceback.format_exc())
|
||||
print(f'Warn: trigger: "{trigger}" not found')
|
||||
|
||||
with ModelPatcher.apply_lora(
|
||||
text_encoder_info.context.model, _lora_loader(), lora_prefix
|
||||
), ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as (
|
||||
tokenizer,
|
||||
ti_manager,
|
||||
), ModelPatcher.apply_clip_skip(
|
||||
text_encoder_info.context.model, clip_field.skipped_layers
|
||||
), text_encoder_info as text_encoder:
|
||||
compel = Compel(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
textual_inversion_manager=ti_manager,
|
||||
dtype_for_device_getter=torch_dtype,
|
||||
truncate_long_prompts=True, # TODO:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=True,
|
||||
truncate_long_prompts=False, # TODO:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
|
||||
if context.services.configuration.log_tokenization:
|
||||
# TODO: better logging for and syntax
|
||||
for prompt_obj in conjunction.prompts:
|
||||
log_tokenization_for_prompt_object(prompt_obj, tokenizer)
|
||||
log_tokenization_for_conjunction(conjunction, tokenizer)
|
||||
|
||||
# TODO: ask for optimizations? to not run text_encoder twice
|
||||
c, options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
@@ -318,49 +261,70 @@ class SDXLPromptInvocationBase:
|
||||
|
||||
return c, c_pooled, ec
|
||||
|
||||
|
||||
@invocation(
|
||||
"sdxl_compel_prompt",
|
||||
title="SDXL Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
)
|
||||
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
|
||||
type: Literal["sdxl_compel_prompt"] = "sdxl_compel_prompt"
|
||||
|
||||
prompt: str = Field(default="", description="Prompt")
|
||||
style: str = Field(default="", description="Style prompt")
|
||||
original_width: int = Field(1024, description="")
|
||||
original_height: int = Field(1024, description="")
|
||||
crop_top: int = Field(0, description="")
|
||||
crop_left: int = Field(0, description="")
|
||||
target_width: int = Field(1024, description="")
|
||||
target_height: int = Field(1024, description="")
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
clip2: ClipField = Field(None, description="Clip2 to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "SDXL Prompt (Compel)",
|
||||
"tags": ["prompt", "compel"],
|
||||
"type_hints": {
|
||||
"model": "model"
|
||||
}
|
||||
},
|
||||
}
|
||||
prompt: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea)
|
||||
style: str = InputField(default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea)
|
||||
original_width: int = InputField(default=1024, description="")
|
||||
original_height: int = InputField(default=1024, description="")
|
||||
crop_top: int = InputField(default=0, description="")
|
||||
crop_left: int = InputField(default=0, description="")
|
||||
target_width: int = InputField(default=1024, description="")
|
||||
target_height: int = InputField(default=1024, description="")
|
||||
clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1")
|
||||
clip2: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
c1, c1_pooled, ec1 = self.run_clip_compel(context, self.clip, self.prompt, False)
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
c1, c1_pooled, ec1 = self.run_clip_compel(
|
||||
context, self.clip, self.prompt, False, "lora_te1_", zero_on_empty=True
|
||||
)
|
||||
if self.style.strip() == "":
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.prompt, True)
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(
|
||||
context, self.clip2, self.prompt, True, "lora_te2_", zero_on_empty=True
|
||||
)
|
||||
else:
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True)
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(
|
||||
context, self.clip2, self.style, True, "lora_te2_", zero_on_empty=True
|
||||
)
|
||||
|
||||
original_size = (self.original_height, self.original_width)
|
||||
crop_coords = (self.crop_top, self.crop_left)
|
||||
target_size = (self.target_height, self.target_width)
|
||||
|
||||
add_time_ids = torch.tensor([
|
||||
original_size + crop_coords + target_size
|
||||
])
|
||||
add_time_ids = torch.tensor([original_size + crop_coords + target_size])
|
||||
|
||||
# [1, 77, 768], [1, 154, 1280]
|
||||
if c1.shape[1] < c2.shape[1]:
|
||||
c1 = torch.cat(
|
||||
[
|
||||
c1,
|
||||
torch.zeros(
|
||||
(c1.shape[0], c2.shape[1] - c1.shape[1], c1.shape[2]), device=c1.device, dtype=c1.dtype
|
||||
),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
elif c1.shape[1] > c2.shape[1]:
|
||||
c2 = torch.cat(
|
||||
[
|
||||
c2,
|
||||
torch.zeros(
|
||||
(c2.shape[0], c1.shape[1] - c2.shape[1], c2.shape[2]), device=c2.device, dtype=c2.dtype
|
||||
),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
@@ -376,47 +340,42 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
|
||||
context.services.latents.save(conditioning_name, conditioning_data)
|
||||
|
||||
return CompelOutput(
|
||||
return ConditioningOutput(
|
||||
conditioning=ConditioningField(
|
||||
conditioning_name=conditioning_name,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"sdxl_refiner_compel_prompt",
|
||||
title="SDXL Refiner Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
)
|
||||
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
|
||||
type: Literal["sdxl_refiner_compel_prompt"] = "sdxl_refiner_compel_prompt"
|
||||
|
||||
style: str = Field(default="", description="Style prompt") # TODO: ?
|
||||
original_width: int = Field(1024, description="")
|
||||
original_height: int = Field(1024, description="")
|
||||
crop_top: int = Field(0, description="")
|
||||
crop_left: int = Field(0, description="")
|
||||
aesthetic_score: float = Field(6.0, description="")
|
||||
clip2: ClipField = Field(None, description="Clip to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "SDXL Refiner Prompt (Compel)",
|
||||
"tags": ["prompt", "compel"],
|
||||
"type_hints": {
|
||||
"model": "model"
|
||||
}
|
||||
},
|
||||
}
|
||||
style: str = InputField(
|
||||
default="", description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea
|
||||
) # TODO: ?
|
||||
original_width: int = InputField(default=1024, description="")
|
||||
original_height: int = InputField(default=1024, description="")
|
||||
crop_top: int = InputField(default=0, description="")
|
||||
crop_left: int = InputField(default=0, description="")
|
||||
aesthetic_score: float = InputField(default=6.0, description=FieldDescriptions.sdxl_aesthetic)
|
||||
clip2: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True)
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
# TODO: if there will appear lora for refiner - write proper prefix
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "<NONE>", zero_on_empty=False)
|
||||
|
||||
original_size = (self.original_height, self.original_width)
|
||||
crop_coords = (self.crop_top, self.crop_left)
|
||||
|
||||
add_time_ids = torch.tensor([
|
||||
original_size + crop_coords + (self.aesthetic_score,)
|
||||
])
|
||||
add_time_ids = torch.tensor([original_size + crop_coords + (self.aesthetic_score,)])
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
@@ -424,7 +383,7 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase
|
||||
embeds=c2,
|
||||
pooled_embeds=c2_pooled,
|
||||
add_time_ids=add_time_ids,
|
||||
extra_conditioning=ec2, # or None
|
||||
extra_conditioning=ec2, # or None
|
||||
)
|
||||
]
|
||||
)
|
||||
@@ -432,152 +391,26 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase
|
||||
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
|
||||
context.services.latents.save(conditioning_name, conditioning_data)
|
||||
|
||||
return CompelOutput(
|
||||
conditioning=ConditioningField(
|
||||
conditioning_name=conditioning_name,
|
||||
),
|
||||
)
|
||||
|
||||
class SDXLRawPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Pass unmodified prompt to conditioning without compel processing."""
|
||||
|
||||
type: Literal["sdxl_raw_prompt"] = "sdxl_raw_prompt"
|
||||
|
||||
prompt: str = Field(default="", description="Prompt")
|
||||
style: str = Field(default="", description="Style prompt")
|
||||
original_width: int = Field(1024, description="")
|
||||
original_height: int = Field(1024, description="")
|
||||
crop_top: int = Field(0, description="")
|
||||
crop_left: int = Field(0, description="")
|
||||
target_width: int = Field(1024, description="")
|
||||
target_height: int = Field(1024, description="")
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
clip2: ClipField = Field(None, description="Clip2 to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "SDXL Prompt (Raw)",
|
||||
"tags": ["prompt", "compel"],
|
||||
"type_hints": {
|
||||
"model": "model"
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
c1, c1_pooled, ec1 = self.run_clip_raw(context, self.clip, self.prompt, False)
|
||||
if self.style.strip() == "":
|
||||
c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.prompt, True)
|
||||
else:
|
||||
c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.style, True)
|
||||
|
||||
original_size = (self.original_height, self.original_width)
|
||||
crop_coords = (self.crop_top, self.crop_left)
|
||||
target_size = (self.target_height, self.target_width)
|
||||
|
||||
add_time_ids = torch.tensor([
|
||||
original_size + crop_coords + target_size
|
||||
])
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
SDXLConditioningInfo(
|
||||
embeds=torch.cat([c1, c2], dim=-1),
|
||||
pooled_embeds=c2_pooled,
|
||||
add_time_ids=add_time_ids,
|
||||
extra_conditioning=ec1,
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
|
||||
context.services.latents.save(conditioning_name, conditioning_data)
|
||||
|
||||
return CompelOutput(
|
||||
conditioning=ConditioningField(
|
||||
conditioning_name=conditioning_name,
|
||||
),
|
||||
)
|
||||
|
||||
class SDXLRefinerRawPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
|
||||
type: Literal["sdxl_refiner_raw_prompt"] = "sdxl_refiner_raw_prompt"
|
||||
|
||||
style: str = Field(default="", description="Style prompt") # TODO: ?
|
||||
original_width: int = Field(1024, description="")
|
||||
original_height: int = Field(1024, description="")
|
||||
crop_top: int = Field(0, description="")
|
||||
crop_left: int = Field(0, description="")
|
||||
aesthetic_score: float = Field(6.0, description="")
|
||||
clip2: ClipField = Field(None, description="Clip to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "SDXL Refiner Prompt (Raw)",
|
||||
"tags": ["prompt", "compel"],
|
||||
"type_hints": {
|
||||
"model": "model"
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.style, True)
|
||||
|
||||
original_size = (self.original_height, self.original_width)
|
||||
crop_coords = (self.crop_top, self.crop_left)
|
||||
|
||||
add_time_ids = torch.tensor([
|
||||
original_size + crop_coords + (self.aesthetic_score,)
|
||||
])
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
SDXLConditioningInfo(
|
||||
embeds=c2,
|
||||
pooled_embeds=c2_pooled,
|
||||
add_time_ids=add_time_ids,
|
||||
extra_conditioning=ec2, # or None
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
|
||||
context.services.latents.save(conditioning_name, conditioning_data)
|
||||
|
||||
return CompelOutput(
|
||||
return ConditioningOutput(
|
||||
conditioning=ConditioningField(
|
||||
conditioning_name=conditioning_name,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@invocation_output("clip_skip_output")
|
||||
class ClipSkipInvocationOutput(BaseInvocationOutput):
|
||||
"""Clip skip node output"""
|
||||
type: Literal["clip_skip_output"] = "clip_skip_output"
|
||||
clip: ClipField = Field(None, description="Clip with skipped layers")
|
||||
|
||||
clip: ClipField = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation("clip_skip", title="CLIP Skip", tags=["clipskip", "clip", "skip"], category="conditioning", version="1.0.0")
|
||||
class ClipSkipInvocation(BaseInvocation):
|
||||
"""Skip layers in clip text_encoder model."""
|
||||
type: Literal["clip_skip"] = "clip_skip"
|
||||
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
skipped_layers: int = Field(0, description="Number of layers to skip in text_encoder")
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "CLIP Skip",
|
||||
"tags": ["clip", "skip"]
|
||||
},
|
||||
}
|
||||
clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP")
|
||||
skipped_layers: int = InputField(default=0, description=FieldDescriptions.skipped_layers)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ClipSkipInvocationOutput:
|
||||
self.clip.skipped_layers += self.skipped_layers
|
||||
@@ -587,46 +420,26 @@ class ClipSkipInvocation(BaseInvocation):
|
||||
|
||||
|
||||
def get_max_token_count(
|
||||
tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction],
|
||||
truncate_if_too_long=False) -> int:
|
||||
tokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], truncate_if_too_long=False
|
||||
) -> int:
|
||||
if type(prompt) is Blend:
|
||||
blend: Blend = prompt
|
||||
return max(
|
||||
[
|
||||
get_max_token_count(tokenizer, p, truncate_if_too_long)
|
||||
for p in blend.prompts
|
||||
]
|
||||
)
|
||||
return max([get_max_token_count(tokenizer, p, truncate_if_too_long) for p in blend.prompts])
|
||||
elif type(prompt) is Conjunction:
|
||||
conjunction: Conjunction = prompt
|
||||
return sum(
|
||||
[
|
||||
get_max_token_count(tokenizer, p, truncate_if_too_long)
|
||||
for p in conjunction.prompts
|
||||
]
|
||||
)
|
||||
return sum([get_max_token_count(tokenizer, p, truncate_if_too_long) for p in conjunction.prompts])
|
||||
else:
|
||||
return len(
|
||||
get_tokens_for_prompt_object(
|
||||
tokenizer, prompt, truncate_if_too_long))
|
||||
return len(get_tokens_for_prompt_object(tokenizer, prompt, truncate_if_too_long))
|
||||
|
||||
|
||||
def get_tokens_for_prompt_object(
|
||||
tokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long=True
|
||||
) -> List[str]:
|
||||
def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long=True) -> List[str]:
|
||||
if type(parsed_prompt) is Blend:
|
||||
raise ValueError(
|
||||
"Blend is not supported here - you need to get tokens for each of its .children"
|
||||
)
|
||||
raise ValueError("Blend is not supported here - you need to get tokens for each of its .children")
|
||||
|
||||
text_fragments = [
|
||||
x.text
|
||||
if type(x) is Fragment
|
||||
else (
|
||||
" ".join([f.text for f in x.original])
|
||||
if type(x) is CrossAttentionControlSubstitute
|
||||
else str(x)
|
||||
)
|
||||
else (" ".join([f.text for f in x.original]) if type(x) is CrossAttentionControlSubstitute else str(x))
|
||||
for x in parsed_prompt.children
|
||||
]
|
||||
text = " ".join(text_fragments)
|
||||
@@ -637,25 +450,17 @@ def get_tokens_for_prompt_object(
|
||||
return tokens
|
||||
|
||||
|
||||
def log_tokenization_for_conjunction(
|
||||
c: Conjunction, tokenizer, display_label_prefix=None
|
||||
):
|
||||
def log_tokenization_for_conjunction(c: Conjunction, tokenizer, display_label_prefix=None):
|
||||
display_label_prefix = display_label_prefix or ""
|
||||
for i, p in enumerate(c.prompts):
|
||||
if len(c.prompts) > 1:
|
||||
this_display_label_prefix = f"{display_label_prefix}(conjunction part {i + 1}, weight={c.weights[i]})"
|
||||
else:
|
||||
this_display_label_prefix = display_label_prefix
|
||||
log_tokenization_for_prompt_object(
|
||||
p,
|
||||
tokenizer,
|
||||
display_label_prefix=this_display_label_prefix
|
||||
)
|
||||
log_tokenization_for_prompt_object(p, tokenizer, display_label_prefix=this_display_label_prefix)
|
||||
|
||||
|
||||
def log_tokenization_for_prompt_object(
|
||||
p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None
|
||||
):
|
||||
def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None):
|
||||
display_label_prefix = display_label_prefix or ""
|
||||
if type(p) is Blend:
|
||||
blend: Blend = p
|
||||
@@ -692,13 +497,10 @@ def log_tokenization_for_prompt_object(
|
||||
)
|
||||
else:
|
||||
text = " ".join([x.text for x in flattened_prompt.children])
|
||||
log_tokenization_for_text(
|
||||
text, tokenizer, display_label=display_label_prefix
|
||||
)
|
||||
log_tokenization_for_text(text, tokenizer, display_label=display_label_prefix)
|
||||
|
||||
|
||||
def log_tokenization_for_text(
|
||||
text, tokenizer, display_label=None, truncate_if_too_long=False):
|
||||
def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_too_long=False):
|
||||
"""shows how the prompt is tokenized
|
||||
# usually tokens have '</w>' to indicate end-of-word,
|
||||
# but for readability it has been replaced with ' '
|
||||
|
||||
@@ -1,47 +1,21 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import cv2 as cv
|
||||
import numpy
|
||||
from PIL import Image, ImageOps
|
||||
from pydantic import BaseModel, Field
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
|
||||
from invokeai.app.models.image import ImageCategory, ImageField, ResourceOrigin
|
||||
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||
from .image import ImageOutput
|
||||
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
||||
|
||||
|
||||
class CvInvocationConfig(BaseModel):
|
||||
"""Helper class to provide all OpenCV invocations with additional config"""
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["cv", "image"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
|
||||
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.0.0")
|
||||
class CvInpaintInvocation(BaseInvocation):
|
||||
"""Simple inpaint using opencv."""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["cv_inpaint"] = "cv_inpaint"
|
||||
|
||||
# Inputs
|
||||
image: ImageField = Field(default=None, description="The image to inpaint")
|
||||
mask: ImageField = Field(default=None, description="The mask to use when inpainting")
|
||||
# fmt: on
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "OpenCV Inpaint",
|
||||
"tags": ["opencv", "inpaint"]
|
||||
},
|
||||
}
|
||||
image: ImageField = InputField(description="The image to inpaint")
|
||||
mask: ImageField = InputField(description="The mask to use when inpainting")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
@@ -66,6 +40,7 @@ class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
workflow=self.workflow,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
|
||||
@@ -1,254 +0,0 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from functools import partial
|
||||
from typing import Literal, Optional, get_args
|
||||
|
||||
import torch
|
||||
from pydantic import Field
|
||||
|
||||
from invokeai.app.models.image import (ColorField, ImageCategory, ImageField,
|
||||
ResourceOrigin)
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.backend.generator.inpaint import infill_methods
|
||||
|
||||
from ...backend.generator import Inpaint, InvokeAIGenerator
|
||||
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||
from ..util.step_callback import stable_diffusion_step_callback
|
||||
from .baseinvocation import BaseInvocation, InvocationConfig, InvocationContext
|
||||
from .image import ImageOutput
|
||||
|
||||
from ...backend.model_management.lora import ModelPatcher
|
||||
from ...backend.stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
from .model import UNetField, VaeField
|
||||
from .compel import ConditioningField
|
||||
from contextlib import contextmanager, ExitStack, ContextDecorator
|
||||
|
||||
SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())]
|
||||
INFILL_METHODS = Literal[tuple(infill_methods())]
|
||||
DEFAULT_INFILL_METHOD = (
|
||||
"patchmatch" if "patchmatch" in get_args(INFILL_METHODS) else "tile"
|
||||
)
|
||||
|
||||
|
||||
from .latent import get_scheduler
|
||||
|
||||
class OldModelContext(ContextDecorator):
|
||||
model: StableDiffusionGeneratorPipeline
|
||||
|
||||
def __init__(self, model):
|
||||
self.model = model
|
||||
|
||||
def __enter__(self):
|
||||
return self.model
|
||||
|
||||
def __exit__(self, *exc):
|
||||
return False
|
||||
|
||||
class OldModelInfo:
|
||||
name: str
|
||||
hash: str
|
||||
context: OldModelContext
|
||||
|
||||
def __init__(self, name: str, hash: str, model: StableDiffusionGeneratorPipeline):
|
||||
self.name = name
|
||||
self.hash = hash
|
||||
self.context = OldModelContext(
|
||||
model=model,
|
||||
)
|
||||
|
||||
|
||||
class InpaintInvocation(BaseInvocation):
|
||||
"""Generates an image using inpaint."""
|
||||
|
||||
type: Literal["inpaint"] = "inpaint"
|
||||
|
||||
positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation")
|
||||
negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation")
|
||||
seed: int = Field(ge=0, le=SEED_MAX, description="The seed to use (omit for random)", default_factory=get_random_seed)
|
||||
steps: int = Field(default=30, gt=0, description="The number of steps to use to generate the image")
|
||||
width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting image", )
|
||||
height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting image", )
|
||||
cfg_scale: float = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||
scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" )
|
||||
unet: UNetField = Field(default=None, description="UNet model")
|
||||
vae: VaeField = Field(default=None, description="Vae model")
|
||||
|
||||
# Inputs
|
||||
image: Optional[ImageField] = Field(description="The input image")
|
||||
strength: float = Field(
|
||||
default=0.75, gt=0, le=1, description="The strength of the original image"
|
||||
)
|
||||
fit: bool = Field(
|
||||
default=True,
|
||||
description="Whether or not the result should be fit to the aspect ratio of the input image",
|
||||
)
|
||||
|
||||
# Inputs
|
||||
mask: Optional[ImageField] = Field(description="The mask")
|
||||
seam_size: int = Field(default=96, ge=1, description="The seam inpaint size (px)")
|
||||
seam_blur: int = Field(
|
||||
default=16, ge=0, description="The seam inpaint blur radius (px)"
|
||||
)
|
||||
seam_strength: float = Field(
|
||||
default=0.75, gt=0, le=1, description="The seam inpaint strength"
|
||||
)
|
||||
seam_steps: int = Field(
|
||||
default=30, ge=1, description="The number of steps to use for seam inpaint"
|
||||
)
|
||||
tile_size: int = Field(
|
||||
default=32, ge=1, description="The tile infill method size (px)"
|
||||
)
|
||||
infill_method: INFILL_METHODS = Field(
|
||||
default=DEFAULT_INFILL_METHOD,
|
||||
description="The method used to infill empty regions (px)",
|
||||
)
|
||||
inpaint_width: Optional[int] = Field(
|
||||
default=None,
|
||||
multiple_of=8,
|
||||
gt=0,
|
||||
description="The width of the inpaint region (px)",
|
||||
)
|
||||
inpaint_height: Optional[int] = Field(
|
||||
default=None,
|
||||
multiple_of=8,
|
||||
gt=0,
|
||||
description="The height of the inpaint region (px)",
|
||||
)
|
||||
inpaint_fill: Optional[ColorField] = Field(
|
||||
default=ColorField(r=127, g=127, b=127, a=255),
|
||||
description="The solid infill method color",
|
||||
)
|
||||
inpaint_replace: float = Field(
|
||||
default=0.0,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="The amount by which to replace masked areas with latent noise",
|
||||
)
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["stable-diffusion", "image"],
|
||||
"title": "Inpaint"
|
||||
},
|
||||
}
|
||||
|
||||
def dispatch_progress(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
source_node_id: str,
|
||||
intermediate_state: PipelineIntermediateState,
|
||||
) -> None:
|
||||
stable_diffusion_step_callback(
|
||||
context=context,
|
||||
intermediate_state=intermediate_state,
|
||||
node=self.dict(),
|
||||
source_node_id=source_node_id,
|
||||
)
|
||||
|
||||
def get_conditioning(self, context, unet):
|
||||
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||
c = positive_cond_data.conditionings[0].embeds.to(device=unet.device, dtype=unet.dtype)
|
||||
extra_conditioning_info = positive_cond_data.conditionings[0].extra_conditioning
|
||||
|
||||
negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name)
|
||||
uc = negative_cond_data.conditionings[0].embeds.to(device=unet.device, dtype=unet.dtype)
|
||||
|
||||
return (uc, c, extra_conditioning_info)
|
||||
|
||||
@contextmanager
|
||||
def load_model_old_way(self, context, scheduler):
|
||||
def _lora_loader():
|
||||
for lora in self.unet.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}), context=context,)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
|
||||
unet_info = context.services.model_manager.get_model(**self.unet.unet.dict(), context=context,)
|
||||
vae_info = context.services.model_manager.get_model(**self.vae.vae.dict(), context=context,)
|
||||
|
||||
with vae_info as vae,\
|
||||
ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),\
|
||||
unet_info as unet:
|
||||
|
||||
device = context.services.model_manager.mgr.cache.execution_device
|
||||
dtype = context.services.model_manager.mgr.cache.precision
|
||||
|
||||
pipeline = StableDiffusionGeneratorPipeline(
|
||||
vae=vae,
|
||||
text_encoder=None,
|
||||
tokenizer=None,
|
||||
unet=unet,
|
||||
scheduler=scheduler,
|
||||
safety_checker=None,
|
||||
feature_extractor=None,
|
||||
requires_safety_checker=False,
|
||||
precision="float16" if dtype == torch.float16 else "float32",
|
||||
execution_device=device,
|
||||
)
|
||||
|
||||
yield OldModelInfo(
|
||||
name=self.unet.unet.model_name,
|
||||
hash="<NO-HASH>",
|
||||
model=pipeline,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = (
|
||||
None
|
||||
if self.image is None
|
||||
else context.services.images.get_pil_image(self.image.image_name)
|
||||
)
|
||||
mask = (
|
||||
None
|
||||
if self.mask is None
|
||||
else context.services.images.get_pil_image(self.mask.image_name)
|
||||
)
|
||||
|
||||
# Get the source node id (we are invoking the prepared node)
|
||||
graph_execution_state = context.services.graph_execution_manager.get(
|
||||
context.graph_execution_state_id
|
||||
)
|
||||
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||
|
||||
scheduler = get_scheduler(
|
||||
context=context,
|
||||
scheduler_info=self.unet.scheduler,
|
||||
scheduler_name=self.scheduler,
|
||||
)
|
||||
|
||||
with self.load_model_old_way(context, scheduler) as model:
|
||||
conditioning = self.get_conditioning(context, model.context.model.unet)
|
||||
|
||||
outputs = Inpaint(model).generate(
|
||||
conditioning=conditioning,
|
||||
scheduler=scheduler,
|
||||
init_image=image,
|
||||
mask_image=mask,
|
||||
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||
**self.dict(
|
||||
exclude={"positive_conditioning", "negative_conditioning", "scheduler", "image", "mask"}
|
||||
), # Shorthand for passing all of the parameters above manually
|
||||
)
|
||||
|
||||
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||
# each time it is called. We only need the first one.
|
||||
generator_output = next(outputs)
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=generator_output.image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
session_id=context.graph_execution_state_id,
|
||||
node_id=self.id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
@@ -1,38 +1,36 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
|
||||
|
||||
import math
|
||||
from typing import Literal, Optional, get_args
|
||||
|
||||
import numpy as np
|
||||
import math
|
||||
from PIL import Image, ImageOps
|
||||
from pydantic import Field
|
||||
|
||||
from invokeai.app.invocations.image import ImageOutput
|
||||
from invokeai.app.invocations.primitives import ColorField, ImageField, ImageOutput
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint
|
||||
from invokeai.backend.image_util.lama import LaMA
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
|
||||
from ..models.image import ColorField, ImageCategory, ImageField, ResourceOrigin
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
InvocationConfig,
|
||||
InvocationContext,
|
||||
)
|
||||
from ..models.image import ImageCategory, ResourceOrigin
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
||||
from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES
|
||||
|
||||
|
||||
def infill_methods() -> list[str]:
|
||||
methods = [
|
||||
"tile",
|
||||
"solid",
|
||||
]
|
||||
methods = ["tile", "solid", "lama", "cv2"]
|
||||
if PatchMatch.patchmatch_available():
|
||||
methods.insert(0, "patchmatch")
|
||||
return methods
|
||||
|
||||
|
||||
INFILL_METHODS = Literal[tuple(infill_methods())]
|
||||
DEFAULT_INFILL_METHOD = (
|
||||
"patchmatch" if "patchmatch" in get_args(INFILL_METHODS) else "tile"
|
||||
)
|
||||
DEFAULT_INFILL_METHOD = "patchmatch" if "patchmatch" in get_args(INFILL_METHODS) else "tile"
|
||||
|
||||
|
||||
def infill_lama(im: Image.Image) -> Image.Image:
|
||||
lama = LaMA()
|
||||
return lama(im)
|
||||
|
||||
|
||||
def infill_patchmatch(im: Image.Image) -> Image.Image:
|
||||
@@ -44,13 +42,15 @@ def infill_patchmatch(im: Image.Image) -> Image.Image:
|
||||
return im
|
||||
|
||||
# Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though)
|
||||
im_patched_np = PatchMatch.inpaint(
|
||||
im.convert("RGB"), ImageOps.invert(im.split()[-1]), patch_size=3
|
||||
)
|
||||
im_patched_np = PatchMatch.inpaint(im.convert("RGB"), ImageOps.invert(im.split()[-1]), patch_size=3)
|
||||
im_patched = Image.fromarray(im_patched_np, mode="RGB")
|
||||
return im_patched
|
||||
|
||||
|
||||
def infill_cv2(im: Image.Image) -> Image.Image:
|
||||
return cv2_inpaint(im)
|
||||
|
||||
|
||||
def get_tile_images(image: np.ndarray, width=8, height=8):
|
||||
_nrows, _ncols, depth = image.shape
|
||||
_strides = image.strides
|
||||
@@ -68,9 +68,7 @@ def get_tile_images(image: np.ndarray, width=8, height=8):
|
||||
)
|
||||
|
||||
|
||||
def tile_fill_missing(
|
||||
im: Image.Image, tile_size: int = 16, seed: Optional[int] = None
|
||||
) -> Image.Image:
|
||||
def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int] = None) -> Image.Image:
|
||||
# Only fill if there's an alpha layer
|
||||
if im.mode != "RGBA":
|
||||
return im
|
||||
@@ -101,11 +99,9 @@ def tile_fill_missing(
|
||||
return im
|
||||
|
||||
# Find all invalid tiles and replace with a random valid tile
|
||||
replace_count = (tiles_mask == False).sum()
|
||||
replace_count = (tiles_mask == False).sum() # noqa: E712
|
||||
rng = np.random.default_rng(seed=seed)
|
||||
tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[
|
||||
rng.choice(filtered_tiles.shape[0], replace_count), :, :, :
|
||||
]
|
||||
tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count), :, :, :]
|
||||
|
||||
# Convert back to an image
|
||||
tiles_all = tiles_all.reshape(tshape)
|
||||
@@ -122,26 +118,16 @@ def tile_fill_missing(
|
||||
return si
|
||||
|
||||
|
||||
@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
|
||||
class InfillColorInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image with a solid color"""
|
||||
|
||||
type: Literal["infill_rgba"] = "infill_rgba"
|
||||
image: Optional[ImageField] = Field(
|
||||
default=None, description="The image to infill"
|
||||
)
|
||||
color: ColorField = Field(
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
color: ColorField = InputField(
|
||||
default=ColorField(r=127, g=127, b=127, a=255),
|
||||
description="The color to use to infill",
|
||||
)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Color Infill",
|
||||
"tags": ["image", "inpaint", "color", "infill"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
@@ -157,6 +143,7 @@ class InfillColorInvocation(BaseInvocation):
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
workflow=self.workflow,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
@@ -166,36 +153,23 @@ class InfillColorInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
|
||||
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
|
||||
class InfillTileInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image with tiles of the image"""
|
||||
|
||||
type: Literal["infill_tile"] = "infill_tile"
|
||||
|
||||
image: Optional[ImageField] = Field(
|
||||
default=None, description="The image to infill"
|
||||
)
|
||||
tile_size: int = Field(default=32, ge=1, description="The tile size (px)")
|
||||
seed: int = Field(
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
tile_size: int = InputField(default=32, ge=1, description="The tile size (px)")
|
||||
seed: int = InputField(
|
||||
ge=0,
|
||||
le=SEED_MAX,
|
||||
description="The seed to use for tile generation (omit for random)",
|
||||
default_factory=get_random_seed,
|
||||
)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Tile Infill",
|
||||
"tags": ["image", "inpaint", "tile", "infill"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
infilled = tile_fill_missing(
|
||||
image.copy(), seed=self.seed, tile_size=self.tile_size
|
||||
)
|
||||
infilled = tile_fill_missing(image.copy(), seed=self.seed, tile_size=self.tile_size)
|
||||
infilled.paste(image, (0, 0), image.split()[-1])
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
@@ -205,6 +179,7 @@ class InfillTileInvocation(BaseInvocation):
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
workflow=self.workflow,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
@@ -214,30 +189,96 @@ class InfillTileInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0"
|
||||
)
|
||||
class InfillPatchMatchInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image using the PatchMatch algorithm"""
|
||||
|
||||
type: Literal["infill_patchmatch"] = "infill_patchmatch"
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
downscale: float = InputField(default=2.0, gt=0, description="Run patchmatch on downscaled image to speedup infill")
|
||||
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
|
||||
|
||||
image: Optional[ImageField] = Field(
|
||||
default=None, description="The image to infill"
|
||||
)
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name).convert("RGBA")
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Patch Match Infill",
|
||||
"tags": ["image", "inpaint", "patchmatch", "infill"]
|
||||
},
|
||||
}
|
||||
resample_mode = PIL_RESAMPLING_MAP[self.resample_mode]
|
||||
|
||||
infill_image = image.copy()
|
||||
width = int(image.width / self.downscale)
|
||||
height = int(image.height / self.downscale)
|
||||
infill_image = infill_image.resize(
|
||||
(width, height),
|
||||
resample=resample_mode,
|
||||
)
|
||||
|
||||
if PatchMatch.patchmatch_available():
|
||||
infilled = infill_patchmatch(infill_image)
|
||||
else:
|
||||
raise ValueError("PatchMatch is not available on this system")
|
||||
|
||||
infilled = infilled.resize(
|
||||
(image.width, image.height),
|
||||
resample=resample_mode,
|
||||
)
|
||||
|
||||
infilled.paste(image, (0, 0), mask=image.split()[-1])
|
||||
# image.paste(infilled, (0, 0), mask=image.split()[-1])
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=infilled,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
workflow=self.workflow,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.0.0")
|
||||
class LaMaInfillInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image using the LaMa model"""
|
||||
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
if PatchMatch.patchmatch_available():
|
||||
infilled = infill_patchmatch(image.copy())
|
||||
else:
|
||||
raise ValueError("PatchMatch is not available on this system")
|
||||
infilled = infill_lama(image.copy())
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=infilled,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint")
|
||||
class CV2InfillInvocation(BaseInvocation):
|
||||
"""Infills transparent areas of an image using OpenCV Inpainting"""
|
||||
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
infilled = infill_cv2(image.copy())
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=infilled,
|
||||
|
||||
@@ -1,150 +1,62 @@
|
||||
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
import numpy as np
|
||||
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
InvocationContext,
|
||||
InvocationConfig,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import IntegerOutput
|
||||
|
||||
from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
|
||||
|
||||
|
||||
class MathInvocationConfig(BaseModel):
|
||||
"""Helper class to provide all math invocations with additional config"""
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["math"],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class IntOutput(BaseInvocationOutput):
|
||||
"""An integer output"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["int_output"] = "int_output"
|
||||
a: int = Field(default=None, description="The output integer")
|
||||
# fmt: on
|
||||
|
||||
|
||||
class FloatOutput(BaseInvocationOutput):
|
||||
"""A float output"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["float_output"] = "float_output"
|
||||
param: float = Field(default=None, description="The output float")
|
||||
# fmt: on
|
||||
|
||||
|
||||
class AddInvocation(BaseInvocation, MathInvocationConfig):
|
||||
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")
|
||||
class AddInvocation(BaseInvocation):
|
||||
"""Adds two numbers"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["add"] = "add"
|
||||
a: int = Field(default=0, description="The first number")
|
||||
b: int = Field(default=0, description="The second number")
|
||||
# fmt: on
|
||||
a: int = InputField(default=0, description=FieldDescriptions.num_1)
|
||||
b: int = InputField(default=0, description=FieldDescriptions.num_2)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Add",
|
||||
"tags": ["math", "add"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||
return IntOutput(a=self.a + self.b)
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
return IntegerOutput(value=self.a + self.b)
|
||||
|
||||
|
||||
class SubtractInvocation(BaseInvocation, MathInvocationConfig):
|
||||
@invocation("sub", title="Subtract Integers", tags=["math", "subtract"], category="math", version="1.0.0")
|
||||
class SubtractInvocation(BaseInvocation):
|
||||
"""Subtracts two numbers"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["sub"] = "sub"
|
||||
a: int = Field(default=0, description="The first number")
|
||||
b: int = Field(default=0, description="The second number")
|
||||
# fmt: on
|
||||
a: int = InputField(default=0, description=FieldDescriptions.num_1)
|
||||
b: int = InputField(default=0, description=FieldDescriptions.num_2)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Subtract",
|
||||
"tags": ["math", "subtract"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||
return IntOutput(a=self.a - self.b)
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
return IntegerOutput(value=self.a - self.b)
|
||||
|
||||
|
||||
class MultiplyInvocation(BaseInvocation, MathInvocationConfig):
|
||||
@invocation("mul", title="Multiply Integers", tags=["math", "multiply"], category="math", version="1.0.0")
|
||||
class MultiplyInvocation(BaseInvocation):
|
||||
"""Multiplies two numbers"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["mul"] = "mul"
|
||||
a: int = Field(default=0, description="The first number")
|
||||
b: int = Field(default=0, description="The second number")
|
||||
# fmt: on
|
||||
a: int = InputField(default=0, description=FieldDescriptions.num_1)
|
||||
b: int = InputField(default=0, description=FieldDescriptions.num_2)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Multiply",
|
||||
"tags": ["math", "multiply"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||
return IntOutput(a=self.a * self.b)
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
return IntegerOutput(value=self.a * self.b)
|
||||
|
||||
|
||||
class DivideInvocation(BaseInvocation, MathInvocationConfig):
|
||||
@invocation("div", title="Divide Integers", tags=["math", "divide"], category="math", version="1.0.0")
|
||||
class DivideInvocation(BaseInvocation):
|
||||
"""Divides two numbers"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["div"] = "div"
|
||||
a: int = Field(default=0, description="The first number")
|
||||
b: int = Field(default=0, description="The second number")
|
||||
# fmt: on
|
||||
a: int = InputField(default=0, description=FieldDescriptions.num_1)
|
||||
b: int = InputField(default=0, description=FieldDescriptions.num_2)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Divide",
|
||||
"tags": ["math", "divide"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||
return IntOutput(a=int(self.a / self.b))
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
return IntegerOutput(value=int(self.a / self.b))
|
||||
|
||||
|
||||
@invocation("rand_int", title="Random Integer", tags=["math", "random"], category="math", version="1.0.0")
|
||||
class RandomIntInvocation(BaseInvocation):
|
||||
"""Outputs a single random integer."""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["rand_int"] = "rand_int"
|
||||
low: int = Field(default=0, description="The inclusive low value")
|
||||
high: int = Field(
|
||||
default=np.iinfo(np.int32).max, description="The exclusive high value"
|
||||
)
|
||||
# fmt: on
|
||||
low: int = InputField(default=0, description="The inclusive low value")
|
||||
high: int = InputField(default=np.iinfo(np.int32).max, description="The exclusive high value")
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Random Integer",
|
||||
"tags": ["math", "random", "integer"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||
return IntOutput(a=np.random.randint(self.low, self.high))
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
return IntegerOutput(value=np.random.randint(self.low, self.high))
|
||||
|
||||
@@ -1,25 +1,38 @@
|
||||
from typing import Literal, Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (BaseInvocation,
|
||||
BaseInvocationOutput, InvocationConfig,
|
||||
InvocationContext)
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import ControlField
|
||||
from invokeai.app.invocations.model import (LoRAModelField, MainModelField,
|
||||
VAEModelField)
|
||||
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
|
||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
|
||||
from ...version import __version__
|
||||
|
||||
|
||||
class LoRAMetadataField(BaseModel):
|
||||
class LoRAMetadataField(BaseModelExcludeNull):
|
||||
"""LoRA metadata for an image generated in InvokeAI."""
|
||||
|
||||
lora: LoRAModelField = Field(description="The LoRA model")
|
||||
weight: float = Field(description="The weight of the LoRA model")
|
||||
|
||||
|
||||
class CoreMetadata(BaseModel):
|
||||
class CoreMetadata(BaseModelExcludeNull):
|
||||
"""Core generation metadata for an image generated in InvokeAI."""
|
||||
|
||||
generation_mode: str = Field(description="The generation mode that output this image",)
|
||||
app_version: str = Field(default=__version__, description="The version of InvokeAI used to generate this image")
|
||||
generation_mode: str = Field(
|
||||
description="The generation mode that output this image",
|
||||
)
|
||||
created_by: Optional[str] = Field(description="The name of the creator of the image")
|
||||
positive_prompt: str = Field(description="The positive prompt parameter")
|
||||
negative_prompt: str = Field(description="The negative prompt parameter")
|
||||
width: int = Field(description="The width parameter")
|
||||
@@ -29,104 +42,140 @@ class CoreMetadata(BaseModel):
|
||||
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
|
||||
steps: int = Field(description="The number of steps used for inference")
|
||||
scheduler: str = Field(description="The scheduler used for inference")
|
||||
clip_skip: int = Field(description="The number of skipped CLIP layers",)
|
||||
clip_skip: int = Field(
|
||||
description="The number of skipped CLIP layers",
|
||||
)
|
||||
model: MainModelField = Field(description="The main model used for inference")
|
||||
controlnets: list[ControlField]= Field(description="The ControlNets used for inference")
|
||||
controlnets: list[ControlField] = Field(description="The ControlNets used for inference")
|
||||
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
|
||||
strength: Union[float, None] = Field(
|
||||
default=None,
|
||||
description="The strength used for latents-to-latents",
|
||||
)
|
||||
init_image: Union[str, None] = Field(
|
||||
default=None, description="The name of the initial image"
|
||||
)
|
||||
vae: Union[VAEModelField, None] = Field(
|
||||
vae: Optional[VAEModelField] = Field(
|
||||
default=None,
|
||||
description="The VAE used for decoding, if the main model's default was not used",
|
||||
)
|
||||
|
||||
# Latents-to-Latents
|
||||
strength: Optional[float] = Field(
|
||||
default=None,
|
||||
description="The strength used for latents-to-latents",
|
||||
)
|
||||
init_image: Optional[str] = Field(default=None, description="The name of the initial image")
|
||||
|
||||
class ImageMetadata(BaseModel):
|
||||
# SDXL
|
||||
positive_style_prompt: Optional[str] = Field(default=None, description="The positive style prompt parameter")
|
||||
negative_style_prompt: Optional[str] = Field(default=None, description="The negative style prompt parameter")
|
||||
|
||||
# SDXL Refiner
|
||||
refiner_model: Optional[MainModelField] = Field(default=None, description="The SDXL Refiner model used")
|
||||
refiner_cfg_scale: Optional[float] = Field(
|
||||
default=None,
|
||||
description="The classifier-free guidance scale parameter used for the refiner",
|
||||
)
|
||||
refiner_steps: Optional[int] = Field(default=None, description="The number of steps used for the refiner")
|
||||
refiner_scheduler: Optional[str] = Field(default=None, description="The scheduler used for the refiner")
|
||||
refiner_positive_aesthetic_score: Optional[float] = Field(
|
||||
default=None, description="The aesthetic score used for the refiner"
|
||||
)
|
||||
refiner_negative_aesthetic_score: Optional[float] = Field(
|
||||
default=None, description="The aesthetic score used for the refiner"
|
||||
)
|
||||
refiner_start: Optional[float] = Field(default=None, description="The start value used for refiner denoising")
|
||||
|
||||
|
||||
class ImageMetadata(BaseModelExcludeNull):
|
||||
"""An image's generation metadata"""
|
||||
|
||||
metadata: Optional[dict] = Field(
|
||||
default=None,
|
||||
description="The image's core metadata, if it was created in the Linear or Canvas UI",
|
||||
)
|
||||
graph: Optional[dict] = Field(
|
||||
default=None, description="The graph that created the image"
|
||||
)
|
||||
graph: Optional[dict] = Field(default=None, description="The graph that created the image")
|
||||
|
||||
|
||||
@invocation_output("metadata_accumulator_output")
|
||||
class MetadataAccumulatorOutput(BaseInvocationOutput):
|
||||
"""The output of the MetadataAccumulator node"""
|
||||
|
||||
type: Literal["metadata_accumulator_output"] = "metadata_accumulator_output"
|
||||
|
||||
metadata: CoreMetadata = Field(description="The core metadata for the image")
|
||||
metadata: CoreMetadata = OutputField(description="The core metadata for the image")
|
||||
|
||||
|
||||
@invocation(
|
||||
"metadata_accumulator", title="Metadata Accumulator", tags=["metadata"], category="metadata", version="1.0.0"
|
||||
)
|
||||
class MetadataAccumulatorInvocation(BaseInvocation):
|
||||
"""Outputs a Core Metadata Object"""
|
||||
|
||||
type: Literal["metadata_accumulator"] = "metadata_accumulator"
|
||||
|
||||
generation_mode: str = Field(description="The generation mode that output this image",)
|
||||
positive_prompt: str = Field(description="The positive prompt parameter")
|
||||
negative_prompt: str = Field(description="The negative prompt parameter")
|
||||
width: int = Field(description="The width parameter")
|
||||
height: int = Field(description="The height parameter")
|
||||
seed: int = Field(description="The seed used for noise generation")
|
||||
rand_device: str = Field(description="The device used for random number generation")
|
||||
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
|
||||
steps: int = Field(description="The number of steps used for inference")
|
||||
scheduler: str = Field(description="The scheduler used for inference")
|
||||
clip_skip: int = Field(description="The number of skipped CLIP layers",)
|
||||
model: MainModelField = Field(description="The main model used for inference")
|
||||
controlnets: list[ControlField]= Field(description="The ControlNets used for inference")
|
||||
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
|
||||
strength: Union[float, None] = Field(
|
||||
generation_mode: str = InputField(
|
||||
description="The generation mode that output this image",
|
||||
)
|
||||
positive_prompt: str = InputField(description="The positive prompt parameter")
|
||||
negative_prompt: str = InputField(description="The negative prompt parameter")
|
||||
width: int = InputField(description="The width parameter")
|
||||
height: int = InputField(description="The height parameter")
|
||||
seed: int = InputField(description="The seed used for noise generation")
|
||||
rand_device: str = InputField(description="The device used for random number generation")
|
||||
cfg_scale: float = InputField(description="The classifier-free guidance scale parameter")
|
||||
steps: int = InputField(description="The number of steps used for inference")
|
||||
scheduler: str = InputField(description="The scheduler used for inference")
|
||||
clip_skip: int = InputField(
|
||||
description="The number of skipped CLIP layers",
|
||||
)
|
||||
model: MainModelField = InputField(description="The main model used for inference")
|
||||
controlnets: list[ControlField] = InputField(description="The ControlNets used for inference")
|
||||
loras: list[LoRAMetadataField] = InputField(description="The LoRAs used for inference")
|
||||
strength: Optional[float] = InputField(
|
||||
default=None,
|
||||
description="The strength used for latents-to-latents",
|
||||
)
|
||||
init_image: Union[str, None] = Field(
|
||||
default=None, description="The name of the initial image"
|
||||
init_image: Optional[str] = InputField(
|
||||
default=None,
|
||||
description="The name of the initial image",
|
||||
)
|
||||
vae: Union[VAEModelField, None] = Field(
|
||||
vae: Optional[VAEModelField] = InputField(
|
||||
default=None,
|
||||
description="The VAE used for decoding, if the main model's default was not used",
|
||||
)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Metadata Accumulator",
|
||||
"tags": ["image", "metadata", "generation"]
|
||||
},
|
||||
}
|
||||
# SDXL
|
||||
positive_style_prompt: Optional[str] = InputField(
|
||||
default=None,
|
||||
description="The positive style prompt parameter",
|
||||
)
|
||||
negative_style_prompt: Optional[str] = InputField(
|
||||
default=None,
|
||||
description="The negative style prompt parameter",
|
||||
)
|
||||
|
||||
# SDXL Refiner
|
||||
refiner_model: Optional[MainModelField] = InputField(
|
||||
default=None,
|
||||
description="The SDXL Refiner model used",
|
||||
)
|
||||
refiner_cfg_scale: Optional[float] = InputField(
|
||||
default=None,
|
||||
description="The classifier-free guidance scale parameter used for the refiner",
|
||||
)
|
||||
refiner_steps: Optional[int] = InputField(
|
||||
default=None,
|
||||
description="The number of steps used for the refiner",
|
||||
)
|
||||
refiner_scheduler: Optional[str] = InputField(
|
||||
default=None,
|
||||
description="The scheduler used for the refiner",
|
||||
)
|
||||
refiner_positive_aesthetic_score: Optional[float] = InputField(
|
||||
default=None,
|
||||
description="The aesthetic score used for the refiner",
|
||||
)
|
||||
refiner_negative_aesthetic_score: Optional[float] = InputField(
|
||||
default=None,
|
||||
description="The aesthetic score used for the refiner",
|
||||
)
|
||||
refiner_start: Optional[float] = InputField(
|
||||
default=None,
|
||||
description="The start value used for refiner denoising",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput:
|
||||
"""Collects and outputs a CoreMetadata object"""
|
||||
|
||||
return MetadataAccumulatorOutput(
|
||||
metadata=CoreMetadata(
|
||||
generation_mode=self.generation_mode,
|
||||
positive_prompt=self.positive_prompt,
|
||||
negative_prompt=self.negative_prompt,
|
||||
width=self.width,
|
||||
height=self.height,
|
||||
seed=self.seed,
|
||||
rand_device=self.rand_device,
|
||||
cfg_scale=self.cfg_scale,
|
||||
steps=self.steps,
|
||||
scheduler=self.scheduler,
|
||||
model=self.model,
|
||||
strength=self.strength,
|
||||
init_image=self.init_image,
|
||||
vae=self.vae,
|
||||
controlnets=self.controlnets,
|
||||
loras=self.loras,
|
||||
clip_skip=self.clip_skip,
|
||||
)
|
||||
)
|
||||
return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.dict()))
|
||||
|
||||