mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 10:48:12 -05:00
Compare commits
617 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8e2fd4c96a | ||
|
|
2f424f29a0 | ||
|
|
90f00db032 | ||
|
|
77a63e5310 | ||
|
|
8f921741a5 | ||
|
|
589a817952 | ||
|
|
dcb21c0f46 | ||
|
|
1cb88960fe | ||
|
|
610a1483b7 | ||
|
|
b4e7fc0d1d | ||
|
|
b792b7d68c | ||
|
|
abaa91195d | ||
|
|
1806bfb755 | ||
|
|
7377855c02 | ||
|
|
5f2a6f24cf | ||
|
|
5b8b92d957 | ||
|
|
352202a7bc | ||
|
|
82144de85f | ||
|
|
b70d713e89 | ||
|
|
e39dde4140 | ||
|
|
c151541703 | ||
|
|
29b348ece1 | ||
|
|
9f7c86c33e | ||
|
|
a79d40519c | ||
|
|
4515d52a42 | ||
|
|
2a8513eee0 | ||
|
|
b856fac713 | ||
|
|
4a3951681c | ||
|
|
ba89444e36 | ||
|
|
a044403ac3 | ||
|
|
16dea46b79 | ||
|
|
1f80b5335b | ||
|
|
eee7f13771 | ||
|
|
6db509a4ff | ||
|
|
b7965e1ee6 | ||
|
|
c3d292e8f9 | ||
|
|
206593ec99 | ||
|
|
1b62c781d7 | ||
|
|
c4de509983 | ||
|
|
8d80802a35 | ||
|
|
694925f427 | ||
|
|
61d5cb2536 | ||
|
|
c23fe4f6d2 | ||
|
|
e6e93bbb80 | ||
|
|
b5bd5240b6 | ||
|
|
827ac82d54 | ||
|
|
9c2f3259ca | ||
|
|
6abe2bfe42 | ||
|
|
acf955fc7b | ||
|
|
023db8ac41 | ||
|
|
65cf733a0c | ||
|
|
8323169864 | ||
|
|
bf5cd1bd3b | ||
|
|
c9db01e272 | ||
|
|
6d5e9161fb | ||
|
|
0636348585 | ||
|
|
4c44523ba0 | ||
|
|
5372800e60 | ||
|
|
2ae396640b | ||
|
|
252f222068 | ||
|
|
142ba8c8ea | ||
|
|
84dfd2003e | ||
|
|
5a633ba811 | ||
|
|
f207647f0f | ||
|
|
ad16581ab8 | ||
|
|
fd722ddf7d | ||
|
|
d669e69755 | ||
|
|
d912bab4c2 | ||
|
|
68c2722c02 | ||
|
|
426fea9681 | ||
|
|
62cfdb9f11 | ||
|
|
46b4d6497c | ||
|
|
757c0a5775 | ||
|
|
9c8f0b44ad | ||
|
|
21433a948c | ||
|
|
183344b878 | ||
|
|
fc164d5be2 | ||
|
|
45aa770cd1 | ||
|
|
6d0e782d71 | ||
|
|
117f70e1ec | ||
|
|
c840bd8c12 | ||
|
|
3c64fad379 | ||
|
|
bc813e4065 | ||
|
|
7c1d2422f0 | ||
|
|
a5b11e1071 | ||
|
|
c7e4daf431 | ||
|
|
4c61f3a514 | ||
|
|
2a179799d8 | ||
|
|
650f4bb58c | ||
|
|
7b92b27ceb | ||
|
|
8f1b301d01 | ||
|
|
e3a19d4f3e | ||
|
|
70283f7d8d | ||
|
|
ecbb385447 | ||
|
|
8dc56471ef | ||
|
|
282ba201d2 | ||
|
|
2394f6458f | ||
|
|
47c1be3322 | ||
|
|
741464b053 | ||
|
|
3aab5e7e20 | ||
|
|
1e7a6dc676 | ||
|
|
81fd2ee8c1 | ||
|
|
357601e2d6 | ||
|
|
71ff759692 | ||
|
|
b0657d5fde | ||
|
|
fa391c0b78 | ||
|
|
6082aace6d | ||
|
|
7ef63161ba | ||
|
|
b731b55de4 | ||
|
|
51956ba356 | ||
|
|
f494077003 | ||
|
|
317165c410 | ||
|
|
f5aadbc200 | ||
|
|
774230f7b9 | ||
|
|
72e25d99c7 | ||
|
|
7c7c1ba02d | ||
|
|
9c6af74556 | ||
|
|
57daa3e1c2 | ||
|
|
ce98fdc5c4 | ||
|
|
f901645c12 | ||
|
|
f514f17e92 | ||
|
|
8744dd0c46 | ||
|
|
f3d669319e | ||
|
|
ace7032067 | ||
|
|
d32819875a | ||
|
|
5b5898827c | ||
|
|
8a233174de | ||
|
|
bec81170b5 | ||
|
|
2f25363d76 | ||
|
|
2aa5688d90 | ||
|
|
ed06a70eca | ||
|
|
e80160f8dd | ||
|
|
bfe64b1510 | ||
|
|
bb1769abab | ||
|
|
e3f906e90d | ||
|
|
d77dc68119 | ||
|
|
ee3d695e2e | ||
|
|
0443befd2f | ||
|
|
b4fd02b910 | ||
|
|
4e0fe4ad6e | ||
|
|
3231499992 | ||
|
|
c134161a45 | ||
|
|
c3f533f20f | ||
|
|
519a9071a8 | ||
|
|
87b4663026 | ||
|
|
6c11e8ee06 | ||
|
|
2a739890a3 | ||
|
|
02e84c9565 | ||
|
|
39715017f9 | ||
|
|
35518542f8 | ||
|
|
0aa1106c96 | ||
|
|
33f832e6ab | ||
|
|
281c788489 | ||
|
|
3858bef185 | ||
|
|
f9a1afd09c | ||
|
|
251e9c0294 | ||
|
|
d8bf2e3c10 | ||
|
|
218f30b7d0 | ||
|
|
da983c7773 | ||
|
|
7012e16c43 | ||
|
|
b1050abf7f | ||
|
|
210998081a | ||
|
|
604acb9d91 | ||
|
|
5beeb1a897 | ||
|
|
de6304b729 | ||
|
|
d0be79c33d | ||
|
|
b4ed8bc47a | ||
|
|
bd85e00530 | ||
|
|
4e446130d8 | ||
|
|
4c93b514bb | ||
|
|
d078941316 | ||
|
|
230d3a496d | ||
|
|
ec2890c19b | ||
|
|
a540cc537f | ||
|
|
39c57aa358 | ||
|
|
2d990c1f54 | ||
|
|
7fb2da8741 | ||
|
|
c69fcb1c10 | ||
|
|
0982548e1f | ||
|
|
11a29fdc4d | ||
|
|
24407048a5 | ||
|
|
a7c2333312 | ||
|
|
b5b541c747 | ||
|
|
ad6ea02c9c | ||
|
|
c22326f9f8 | ||
|
|
1a6ed85d99 | ||
|
|
a094bbd839 | ||
|
|
73dda812ea | ||
|
|
8eaf1c4033 | ||
|
|
4f44b64052 | ||
|
|
c559bf3e10 | ||
|
|
a485515bc6 | ||
|
|
2c9b29725b | ||
|
|
28612c899a | ||
|
|
88acbeaa35 | ||
|
|
46729efe95 | ||
|
|
b3d03e1146 | ||
|
|
e29c9a7d9e | ||
|
|
9b157b6532 | ||
|
|
10a1e7962b | ||
|
|
cb672d7d00 | ||
|
|
e791fb6b0b | ||
|
|
1c9001ad21 | ||
|
|
3083356cf0 | ||
|
|
179814e50a | ||
|
|
9515c07fca | ||
|
|
a45e94fde7 | ||
|
|
8b6196e0a2 | ||
|
|
ee2c0ab51b | ||
|
|
ca5f129902 | ||
|
|
cf2eca7c60 | ||
|
|
16aea1e869 | ||
|
|
75ff6cd3c3 | ||
|
|
7b7b31637c | ||
|
|
fca564c18a | ||
|
|
eb8d87e185 | ||
|
|
dbadb1d7b5 | ||
|
|
a4afb69615 | ||
|
|
8b7925edf3 | ||
|
|
168a51c5a6 | ||
|
|
3f5d8c3e44 | ||
|
|
609bb19573 | ||
|
|
d561d6d3dd | ||
|
|
7ffaa17551 | ||
|
|
97eac58a50 | ||
|
|
cedbe8fcd7 | ||
|
|
a461875abd | ||
|
|
ab018ccdfe | ||
|
|
d41dcdfc46 | ||
|
|
972aecc4c5 | ||
|
|
6b7be4e5dc | ||
|
|
9b1a7b553f | ||
|
|
7f99efc5df | ||
|
|
0a6d8b4855 | ||
|
|
5e41811fb5 | ||
|
|
5a4967582e | ||
|
|
1d0ba4a1a7 | ||
|
|
4878c7a2d5 | ||
|
|
9e5aa645a7 | ||
|
|
d01e23973e | ||
|
|
71bbd78574 | ||
|
|
fff41a7349 | ||
|
|
d5f524a156 | ||
|
|
3ab9d02883 | ||
|
|
27a2e27c3a | ||
|
|
da04b11a31 | ||
|
|
3795b40f63 | ||
|
|
9436f2e3d1 | ||
|
|
7fadd5e5c4 | ||
|
|
4c2a588e1f | ||
|
|
5f9de762ff | ||
|
|
91f7abb398 | ||
|
|
6420b81a5d | ||
|
|
b6ed5eafd6 | ||
|
|
694d5aa2e8 | ||
|
|
833079140b | ||
|
|
fd27948c36 | ||
|
|
1dfaaa2a57 | ||
|
|
bac6b50dd1 | ||
|
|
a30c91f398 | ||
|
|
17294bfa55 | ||
|
|
3fa1771cc9 | ||
|
|
f3bd386ff0 | ||
|
|
8486ce31de | ||
|
|
1d9845557f | ||
|
|
55dce6cfdd | ||
|
|
58be915446 | ||
|
|
dc9268f772 | ||
|
|
47ddc00c6a | ||
|
|
0d22fd59ed | ||
|
|
d5efd57c28 | ||
|
|
b52a92da7e | ||
|
|
b949162e7e | ||
|
|
5409991256 | ||
|
|
be1bcbc173 | ||
|
|
d6196e863d | ||
|
|
63e790b79b | ||
|
|
cf53bba99e | ||
|
|
ed4c8f6a8a | ||
|
|
aab8263c31 | ||
|
|
b21bd6f428 | ||
|
|
cb6903dfd0 | ||
|
|
cd87ca8214 | ||
|
|
58e5bf5a58 | ||
|
|
f17c7ca6f7 | ||
|
|
c3dd28cff9 | ||
|
|
db4e1e8b53 | ||
|
|
3e43c3e698 | ||
|
|
cc7733af1c | ||
|
|
2a29734a56 | ||
|
|
f2e533f7c8 | ||
|
|
078f897b67 | ||
|
|
8352ab2076 | ||
|
|
1a3d47814b | ||
|
|
e852ad0a51 | ||
|
|
136cd0e868 | ||
|
|
7afe26320a | ||
|
|
702da71515 | ||
|
|
b313cf8afd | ||
|
|
852d78d9ad | ||
|
|
5570a88858 | ||
|
|
cfd897874b | ||
|
|
1249147c57 | ||
|
|
eec5c3bbb1 | ||
|
|
ca8d9fb885 | ||
|
|
7d77fb9691 | ||
|
|
a4c0dfb33c | ||
|
|
2dded68267 | ||
|
|
172ce3dc25 | ||
|
|
6c8d4b091e | ||
|
|
7beebc3659 | ||
|
|
5461318eda | ||
|
|
d0abe13b60 | ||
|
|
aca9d74489 | ||
|
|
a0c213a158 | ||
|
|
740210fc99 | ||
|
|
ca10d0652f | ||
|
|
e1a85d8184 | ||
|
|
9d8236c59d | ||
|
|
7eafcd47a6 | ||
|
|
ded3f13a33 | ||
|
|
e5646d7241 | ||
|
|
79ac9698c1 | ||
|
|
d29f57c93d | ||
|
|
9b7cde8918 | ||
|
|
8ae71303a5 | ||
|
|
2cd7bd4a8e | ||
|
|
b813298f2a | ||
|
|
58f787f7d4 | ||
|
|
2bba543d20 | ||
|
|
d3c1b747ee | ||
|
|
b9ecf93ba3 | ||
|
|
487da8394d | ||
|
|
4c93bc56f8 | ||
|
|
727dfeae43 | ||
|
|
88d561dee7 | ||
|
|
7a379f1d4f | ||
|
|
3ad89f99d2 | ||
|
|
d76c5da514 | ||
|
|
da5b0673e7 | ||
|
|
d7180afe9d | ||
|
|
2e9c15711b | ||
|
|
e19b08b149 | ||
|
|
234d76a269 | ||
|
|
826d941068 | ||
|
|
34e449213c | ||
|
|
671c5943e4 | ||
|
|
16c24ec367 | ||
|
|
e8240855e0 | ||
|
|
a5e065048e | ||
|
|
a53c3269db | ||
|
|
8bf93d3a32 | ||
|
|
d42cc0fd1c | ||
|
|
d2553d783c | ||
|
|
10b747d22b | ||
|
|
1d567fa593 | ||
|
|
3a3dd39d3a | ||
|
|
f4b3d7dba2 | ||
|
|
de2c7fd372 | ||
|
|
b140e1c619 | ||
|
|
1308584289 | ||
|
|
2ac4778bcf | ||
|
|
6101d67dba | ||
|
|
3cd50fe3a1 | ||
|
|
e683b574d1 | ||
|
|
0decd05913 | ||
|
|
d01b7ea2d2 | ||
|
|
4fa91724d9 | ||
|
|
e3d1c64b77 | ||
|
|
17f35a7bba | ||
|
|
ab2f0a6fbf | ||
|
|
41cbf2f7c4 | ||
|
|
d5d2e1d7a3 | ||
|
|
587faa3e52 | ||
|
|
80229ab73e | ||
|
|
68b2911d2f | ||
|
|
2bf2f627e4 | ||
|
|
58676b2ce2 | ||
|
|
11f79dc1e1 | ||
|
|
2a095ddc8e | ||
|
|
dd849d2e91 | ||
|
|
8c63fac958 | ||
|
|
11a70e9764 | ||
|
|
33ce78e4a2 | ||
|
|
4f78518858 | ||
|
|
fad99ac4d2 | ||
|
|
423b592b25 | ||
|
|
8aa7d1da55 | ||
|
|
6b702c32ca | ||
|
|
767012aec0 | ||
|
|
2267057e2b | ||
|
|
b8212e4dea | ||
|
|
5b7e4a5f5d | ||
|
|
07f9fa63d0 | ||
|
|
1ae8986451 | ||
|
|
b305c240de | ||
|
|
248dc81ec3 | ||
|
|
ebe0071ed2 | ||
|
|
7a518218e5 | ||
|
|
fc14ac7faa | ||
|
|
95e2739c47 | ||
|
|
f129393a2e | ||
|
|
c55bbd1a85 | ||
|
|
ccba41cdb2 | ||
|
|
3d442bbf22 | ||
|
|
4888d0d832 | ||
|
|
47de3fb007 | ||
|
|
41bc160cb8 | ||
|
|
d0ba155c19 | ||
|
|
5f0848bf7d | ||
|
|
6551527fe2 | ||
|
|
159ce2ea08 | ||
|
|
3715570d17 | ||
|
|
65a7432b5a | ||
|
|
557e28f460 | ||
|
|
62a7f252f5 | ||
|
|
2fa14200aa | ||
|
|
0605cf94f0 | ||
|
|
d69156c616 | ||
|
|
0963bbbe78 | ||
|
|
f3351a5e47 | ||
|
|
f3f4c68acc | ||
|
|
5d617ce63d | ||
|
|
8a0d45ac5a | ||
|
|
2468ba7445 | ||
|
|
65b7d2db47 | ||
|
|
e07f1bb89c | ||
|
|
f4f813d108 | ||
|
|
6217edcb6c | ||
|
|
c5cc832304 | ||
|
|
a76038bac4 | ||
|
|
ff4942f9b4 | ||
|
|
1ccad64871 | ||
|
|
19f0022bbe | ||
|
|
ecc7b7a700 | ||
|
|
e46102124e | ||
|
|
314ed7d8f6 | ||
|
|
b1341bc611 | ||
|
|
07be605dcb | ||
|
|
fe318775c3 | ||
|
|
1bb07795d8 | ||
|
|
caf07479ec | ||
|
|
508780d07f | ||
|
|
05e67e924c | ||
|
|
fb2488314f | ||
|
|
062f58209b | ||
|
|
7cb9d6b1a6 | ||
|
|
fb721234ec | ||
|
|
92906aeb08 | ||
|
|
cab41f0538 | ||
|
|
5d0dcaf81e | ||
|
|
9591c8d4e0 | ||
|
|
bcb1fbe031 | ||
|
|
e87a2fe14b | ||
|
|
d00571b5a4 | ||
|
|
b08a514594 | ||
|
|
265ccaca4a | ||
|
|
7aa6c827f7 | ||
|
|
093174942b | ||
|
|
f299f40763 | ||
|
|
7545e38655 | ||
|
|
0bc55a0d55 | ||
|
|
d38e7170fe | ||
|
|
15a9412255 | ||
|
|
e29399e032 | ||
|
|
bc18a94d8c | ||
|
|
5d2bdd478c | ||
|
|
9cacba916b | ||
|
|
628e82fa79 | ||
|
|
fbbbba2fac | ||
|
|
9cbf9d52b4 | ||
|
|
fb35fe1a41 | ||
|
|
b60b5750af | ||
|
|
3ff40114fa | ||
|
|
71c6ae8789 | ||
|
|
d9a7536fa8 | ||
|
|
99f4417cd7 | ||
|
|
47f94bde04 | ||
|
|
197e6b95e3 | ||
|
|
8e47ca8d57 | ||
|
|
714fff39ba | ||
|
|
89239d1c54 | ||
|
|
c03d98cf46 | ||
|
|
d1ad46d6f1 | ||
|
|
6ae7560f66 | ||
|
|
e561d19206 | ||
|
|
9eed1919c2 | ||
|
|
b87f7b1129 | ||
|
|
7410a60208 | ||
|
|
7c86130a3d | ||
|
|
58a1d9aae0 | ||
|
|
24e32f6ae2 | ||
|
|
3dd7393984 | ||
|
|
f18f743d03 | ||
|
|
c660dcdfcd | ||
|
|
9e0250c0b4 | ||
|
|
08c747f1e0 | ||
|
|
04ae6fde80 | ||
|
|
b1a53c8ef0 | ||
|
|
cd64511f24 | ||
|
|
1e98e0b159 | ||
|
|
4f7af55bc3 | ||
|
|
d0e6a57e48 | ||
|
|
d28a486769 | ||
|
|
84722d92f6 | ||
|
|
8a3b5ac21d | ||
|
|
717d53a773 | ||
|
|
96926d6648 | ||
|
|
f3639de8b1 | ||
|
|
b71e675e8d | ||
|
|
d3c850104b | ||
|
|
c00155f6a4 | ||
|
|
8753070fc7 | ||
|
|
ed8f9f021d | ||
|
|
3ccc705396 | ||
|
|
11e422cf29 | ||
|
|
7f695fed39 | ||
|
|
310501cd8a | ||
|
|
106b3aea1b | ||
|
|
6e52ca3307 | ||
|
|
94c31f672f | ||
|
|
240bbb9852 | ||
|
|
8cf2ed91a9 | ||
|
|
7be5b4ca8b | ||
|
|
d589ad96aa | ||
|
|
097e41e8d2 | ||
|
|
4cf43b858d | ||
|
|
13a4666a6e | ||
|
|
9232290950 | ||
|
|
f3153d45bc | ||
|
|
d9cb6da951 | ||
|
|
17535d887f | ||
|
|
35da7f5b96 | ||
|
|
4e95a68582 | ||
|
|
9dfeb93f80 | ||
|
|
02247ffc79 | ||
|
|
48da030415 | ||
|
|
817e04bee0 | ||
|
|
e5d0b0c37d | ||
|
|
950f450665 | ||
|
|
79daf8b039 | ||
|
|
383cbca896 | ||
|
|
07c55d5e2a | ||
|
|
156151df45 | ||
|
|
03b1d71af9 | ||
|
|
f6ad107fdd | ||
|
|
e2c392631a | ||
|
|
4a1b4d63ef | ||
|
|
83ecda977c | ||
|
|
9601febef8 | ||
|
|
0503680efa | ||
|
|
57ccec1df3 | ||
|
|
22f3634481 | ||
|
|
5590c73af2 | ||
|
|
1f76b30e54 | ||
|
|
8bd04654c7 | ||
|
|
0dce3188cc | ||
|
|
106c7aa956 | ||
|
|
b04f199035 | ||
|
|
a2b992dfd1 | ||
|
|
745e253a78 | ||
|
|
2ea551d37d | ||
|
|
8d1481ca10 | ||
|
|
307e7e00c2 | ||
|
|
c3ad1c8a9f | ||
|
|
05d51d7b5b | ||
|
|
09f69a4d28 | ||
|
|
a338af17c8 | ||
|
|
bc82fc0cdd | ||
|
|
418a3d6e41 | ||
|
|
fbcc52ec3d | ||
|
|
47e89f4ba1 | ||
|
|
888d3ae968 | ||
|
|
a28120abdd | ||
|
|
4493d83aea | ||
|
|
eff0fb9a69 | ||
|
|
5bb0f9bedc | ||
|
|
bf812e6493 | ||
|
|
a3da12d867 | ||
|
|
6b4a06c3fc | ||
|
|
3833b28132 | ||
|
|
e8f9ab82ed | ||
|
|
6ab364b16a | ||
|
|
a4dc11addc | ||
|
|
0372702eb4 | ||
|
|
aa8eeea478 | ||
|
|
e54ecc4c37 | ||
|
|
4a12c76097 | ||
|
|
be72faf78e | ||
|
|
28d44d80ed | ||
|
|
9008d9996f | ||
|
|
be2a9b78bb | ||
|
|
70003ee5b1 | ||
|
|
45a5ccba84 | ||
|
|
f80a64a0f4 | ||
|
|
511df2963b | ||
|
|
f92f62a91b | ||
|
|
7f41893da4 | ||
|
|
42da4f57c2 | ||
|
|
c2e11dfe83 | ||
|
|
17e1930229 | ||
|
|
bde94347d3 | ||
|
|
b1612afff4 | ||
|
|
1d10d952b2 | ||
|
|
9150f9ef3c | ||
|
|
7bc0f7cc6c | ||
|
|
c52d11b24c | ||
|
|
59486615dd | ||
|
|
f0212cd361 | ||
|
|
ee4cb5fdc9 | ||
|
|
75b919237b | ||
|
|
07a9062e1f | ||
|
|
cdb3e18b80 | ||
|
|
01eb93d664 | ||
|
|
89f69c2d94 | ||
|
|
dc6f6fcab7 | ||
|
|
6ca177e462 |
@@ -3,21 +3,23 @@
|
||||
!invokeai
|
||||
!ldm
|
||||
!pyproject.toml
|
||||
!README.md
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
**/*.ckpt
|
||||
|
||||
# ignore frontend but whitelist dist
|
||||
invokeai/frontend/**
|
||||
!invokeai/frontend/dist
|
||||
invokeai/frontend/
|
||||
!invokeai/frontend/dist/
|
||||
|
||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||
invokeai/assets
|
||||
!invokeai/assets/web
|
||||
invokeai/assets/
|
||||
!invokeai/assets/web/
|
||||
|
||||
# ignore python cache
|
||||
**/__pycache__
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__/
|
||||
**/*.py[cod]
|
||||
**/*.egg-info
|
||||
|
||||
# Distribution / packaging
|
||||
*.egg-info/
|
||||
*.egg
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
root = true
|
||||
|
||||
# All files
|
||||
[*]
|
||||
max_line_length = 80
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 2
|
||||
@@ -10,3 +13,18 @@ trim_trailing_whitespace = true
|
||||
# Python
|
||||
[*.py]
|
||||
indent_size = 4
|
||||
max_line_length = 120
|
||||
|
||||
# css
|
||||
[*.css]
|
||||
indent_size = 4
|
||||
|
||||
# flake8
|
||||
[.flake8]
|
||||
indent_size = 4
|
||||
|
||||
# Markdown MkDocs
|
||||
[docs/**/*.md]
|
||||
max_line_length = 80
|
||||
indent_size = 4
|
||||
indent_style = unset
|
||||
|
||||
37
.flake8
Normal file
37
.flake8
Normal file
@@ -0,0 +1,37 @@
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
extend-ignore =
|
||||
# See https://github.com/PyCQA/pycodestyle/issues/373
|
||||
E203,
|
||||
# use Bugbear's B950 instead
|
||||
E501,
|
||||
# from black repo https://github.com/psf/black/blob/main/.flake8
|
||||
E266, W503, B907
|
||||
extend-select =
|
||||
# Bugbear line length
|
||||
B950
|
||||
extend-exclude =
|
||||
scripts/orig_scripts/*
|
||||
ldm/models/*
|
||||
ldm/modules/*
|
||||
ldm/data/*
|
||||
ldm/generate.py
|
||||
ldm/util.py
|
||||
ldm/simplet2i.py
|
||||
per-file-ignores =
|
||||
# B950 line too long
|
||||
# W605 invalid escape sequence
|
||||
# F841 assigned to but never used
|
||||
# F401 imported but unused
|
||||
tests/test_prompt_parser.py: B950, W605, F401
|
||||
tests/test_textual_inversion.py: F841, B950
|
||||
# B023 Function definition does not bind loop variable
|
||||
scripts/legacy_api.py: F401, B950, B023, F841
|
||||
ldm/invoke/__init__.py: F401
|
||||
# B010 Do not call setattr with a constant attribute value
|
||||
ldm/invoke/server_legacy.py: B010
|
||||
# =====================
|
||||
# flake-quote settings:
|
||||
# =====================
|
||||
# Set this to match black style:
|
||||
inline-quotes = double
|
||||
59
.github/CODEOWNERS
vendored
59
.github/CODEOWNERS
vendored
@@ -1,18 +1,18 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @mauwii
|
||||
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @mauwii @tildebyte
|
||||
mkdocs.yml @lstein @mauwii
|
||||
/docs/ @lstein @mauwii @blessedcoolant
|
||||
mkdocs.yml @mauwii @lstein
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @mauwii @lstein @ebr
|
||||
/docker/ @mauwii
|
||||
/scripts/ @ebr @lstein
|
||||
/installer/ @ebr @lstein @tildebyte
|
||||
/scripts/ @ebr @lstein @blessedcoolant
|
||||
/installer/ @ebr @lstein
|
||||
ldm/invoke/config @lstein @ebr
|
||||
invokeai/assets @lstein @ebr
|
||||
invokeai/configs @lstein @ebr
|
||||
invokeai/assets @lstein @blessedcoolant
|
||||
invokeai/configs @lstein @ebr @blessedcoolant
|
||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
||||
|
||||
# web ui
|
||||
@@ -20,31 +20,42 @@ invokeai/configs @lstein @ebr
|
||||
/invokeai/backend @blessedcoolant @psychedelicious
|
||||
|
||||
# generation and model management
|
||||
/ldm/*.py @lstein
|
||||
/ldm/*.py @lstein @blessedcoolant
|
||||
/ldm/generate.py @lstein @keturn
|
||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt* @lstein
|
||||
/ldm/invoke/ckpt_generator @lstein
|
||||
/ldm/invoke/CLI.py @lstein
|
||||
/ldm/invoke/config @lstein @ebr @mauwii
|
||||
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
||||
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
||||
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
||||
/ldm/invoke/generator @keturn @damian0815
|
||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||
/ldm/invoke/merge_diffusers.py @lstein
|
||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
||||
/ldm/invoke/txt2mask.py @lstein
|
||||
/ldm/invoke/patchmatch.py @Kyle0654
|
||||
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
|
||||
/ldm/invoke/patchmatch.py @Kyle0654 @lstein
|
||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
||||
|
||||
# attention, textual inversion, model configuration
|
||||
/ldm/models @damian0815 @keturn
|
||||
/ldm/modules @damian0815 @keturn
|
||||
/ldm/models @damian0815 @keturn @blessedcoolant
|
||||
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
|
||||
/ldm/modules/attention.py @damian0815 @keturn
|
||||
/ldm/modules/diffusionmodules @damian0815 @keturn
|
||||
/ldm/modules/distributions @damian0815 @keturn
|
||||
/ldm/modules/ema.py @damian0815 @keturn
|
||||
/ldm/modules/embedding_manager.py @lstein
|
||||
/ldm/modules/encoders @damian0815 @keturn
|
||||
/ldm/modules/image_degradation @damian0815 @keturn
|
||||
/ldm/modules/losses @damian0815 @keturn
|
||||
/ldm/modules/x_transformer.py @damian0815 @keturn
|
||||
|
||||
# Nodes
|
||||
apps/ @Kyle0654
|
||||
apps/ @Kyle0654 @jpphoto
|
||||
|
||||
# legacy REST API
|
||||
# is CapableWeb still engaged?
|
||||
/ldm/invoke/pngwriter.py @CapableWeb
|
||||
/ldm/invoke/server_legacy.py @CapableWeb
|
||||
/scripts/legacy_api.py @CapableWeb
|
||||
/tests/legacy_tests.sh @CapableWeb
|
||||
# these are dead code
|
||||
#/ldm/invoke/pngwriter.py @CapableWeb
|
||||
#/ldm/invoke/server_legacy.py @CapableWeb
|
||||
#/scripts/legacy_api.py @CapableWeb
|
||||
#/tests/legacy_tests.sh @CapableWeb
|
||||
|
||||
|
||||
|
||||
65
.github/workflows/build-container.yml
vendored
65
.github/workflows/build-container.yml
vendored
@@ -3,9 +3,19 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'update/ci/*'
|
||||
- 'update/ci/docker/*'
|
||||
- 'update/docker/*'
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
- 'docker/Dockerfile'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
@@ -20,18 +30,15 @@ jobs:
|
||||
include:
|
||||
- flavor: amd
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cuda
|
||||
pip-extra-index-url: ''
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cpu
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.flavor }}
|
||||
env:
|
||||
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||
DOCKERFILE: 'docker/Dockerfile'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -41,7 +48,9 @@ jobs:
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
images: |
|
||||
ghcr.io/${{ github.repository }}
|
||||
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
@@ -52,13 +61,14 @@ jobs:
|
||||
flavor: |
|
||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
suffix=-${{ matrix.flavor }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: ${{ matrix.platforms }}
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
@@ -68,25 +78,34 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ${{ env.DOCKERFILE }}
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||
type=gha,scope=main-${{ matrix.flavor }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||
|
||||
- name: Output image, digest and metadata to summary
|
||||
run: |
|
||||
{
|
||||
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
|
||||
echo digest: "${{ steps.docker_build.outputs.digest }}"
|
||||
echo labels: "${{ steps.meta.outputs.labels }}"
|
||||
echo tags: "${{ steps.meta.outputs.tags }}"
|
||||
echo version: "${{ steps.meta.outputs.version }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
- name: Docker Hub Description
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
uses: peter-evans/dockerhub-description@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
short-description: ${{ github.event.repository.description }}
|
||||
|
||||
10
.github/workflows/mkdocs-material.yml
vendored
10
.github/workflows/mkdocs-material.yml
vendored
@@ -9,6 +9,10 @@ jobs:
|
||||
mkdocs-material:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
||||
REPO_NAME: '${{ github.repository }}'
|
||||
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
||||
steps:
|
||||
- name: checkout sources
|
||||
uses: actions/checkout@v3
|
||||
@@ -19,11 +23,15 @@ jobs:
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install requirements
|
||||
env:
|
||||
PIP_USE_PEP517: 1
|
||||
run: |
|
||||
python -m \
|
||||
pip install -r docs/requirements-mkdocs.txt
|
||||
pip install ".[docs]"
|
||||
|
||||
- name: confirm buildability
|
||||
run: |
|
||||
|
||||
2
.github/workflows/pypi-release.yml
vendored
2
.github/workflows/pypi-release.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
run: twine check dist/*
|
||||
|
||||
- name: check PyPI versions
|
||||
if: github.ref == 'refs/heads/main'
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
||||
run: |
|
||||
pip install --upgrade requests
|
||||
python -c "\
|
||||
|
||||
67
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
67
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Test invoke.py pip
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
pytorch:
|
||||
# - linux-cuda-11_6
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
# - windows-cuda-11_6
|
||||
# - windows-cuda-11_7
|
||||
include:
|
||||
# - pytorch: linux-cuda-11_6
|
||||
# os: ubuntu-22.04
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
# github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cpu
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: macos-default
|
||||
os: macOS-12
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: windows-cpu
|
||||
os: windows-2022
|
||||
github-env: $env:GITHUB_ENV
|
||||
# - pytorch: windows-cuda-11_6
|
||||
# os: windows-2022
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
# github-env: $env:GITHUB_ENV
|
||||
# - pytorch: windows-cuda-11_7
|
||||
# os: windows-2022
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||
# github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
13
.github/workflows/test-invoke-pip.yml
vendored
13
.github/workflows/test-invoke-pip.yml
vendored
@@ -3,11 +3,24 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'ldm/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/dist/**'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
.idea/
|
||||
embeddings/
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
@@ -232,4 +233,4 @@ installer/update.bat
|
||||
installer/update.sh
|
||||
|
||||
# no longer stored in source directory
|
||||
models
|
||||
models
|
||||
|
||||
41
.pre-commit-config.yaml
Normal file
41
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 23.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 6.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies:
|
||||
- flake8-black
|
||||
- flake8-bugbear
|
||||
- flake8-comprehensions
|
||||
- flake8-simplify
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: 'v3.0.0-alpha.4'
|
||||
hooks:
|
||||
- id: prettier
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: check-toml
|
||||
- id: end-of-file-fixer
|
||||
- id: no-commit-to-branch
|
||||
args: ['--branch', 'main']
|
||||
- id: trailing-whitespace
|
||||
14
.prettierignore
Normal file
14
.prettierignore
Normal file
@@ -0,0 +1,14 @@
|
||||
invokeai/frontend/.husky
|
||||
invokeai/frontend/patches
|
||||
|
||||
# Ignore artifacts:
|
||||
build
|
||||
coverage
|
||||
static
|
||||
invokeai/frontend/dist
|
||||
|
||||
# Ignore all HTML files:
|
||||
*.html
|
||||
|
||||
# Ignore deprecated docs
|
||||
docs/installation/deprecated_documentation
|
||||
@@ -1,9 +1,9 @@
|
||||
endOfLine: lf
|
||||
tabWidth: 2
|
||||
useTabs: false
|
||||
singleQuote: true
|
||||
quoteProps: as-needed
|
||||
embeddedLanguageFormatting: auto
|
||||
endOfLine: lf
|
||||
singleQuote: true
|
||||
semi: true
|
||||
trailingComma: es5
|
||||
useTabs: false
|
||||
overrides:
|
||||
- files: '*.md'
|
||||
options:
|
||||
@@ -11,3 +11,9 @@ overrides:
|
||||
printWidth: 80
|
||||
parser: markdown
|
||||
cursorOffset: -1
|
||||
- files: docs/**/*.md
|
||||
options:
|
||||
tabWidth: 4
|
||||
- files: 'invokeai/frontend/public/locales/*.json'
|
||||
options:
|
||||
tabWidth: 4
|
||||
|
||||
158
README.md
158
README.md
@@ -1,6 +1,6 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
|
||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
@@ -28,12 +28,14 @@
|
||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||
|
||||
</div>
|
||||
|
||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||
|
||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
_Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
@@ -41,38 +43,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
</div>
|
||||
|
||||
# Getting Started with InvokeAI
|
||||
## Table of Contents
|
||||
|
||||
1. [Quick Start](#getting-started-with-invokeai)
|
||||
2. [Installation](#detailed-installation-instructions)
|
||||
3. [Hardware Requirements](#hardware-requirements)
|
||||
4. [Features](#features)
|
||||
5. [Latest Changes](#latest-changes)
|
||||
6. [Troubleshooting](#troubleshooting)
|
||||
7. [Contributing](#contributing)
|
||||
8. [Contributors](#contributors)
|
||||
9. [Support](#support)
|
||||
10. [Further Reading](#further-reading)
|
||||
|
||||
## Getting Started with InvokeAI
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
### Automatic Installer (suggested for 1st time users)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
|
||||
3. Unzip the file.
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
5. Wait a while, until it is done.
|
||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
||||
8. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On
|
||||
macOS, open a Terminal window, drag the file `install.sh` from Finder
|
||||
into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
|
||||
## Table of Contents
|
||||
5. You'll be asked to confirm the location of the folder in which
|
||||
to install InvokeAI and its image generation model files. Pick a
|
||||
location with at least 15 GB of free memory. More if you plan on
|
||||
installing lots of models.
|
||||
|
||||
1. [Installation](#installation)
|
||||
2. [Hardware Requirements](#hardware-requirements)
|
||||
3. [Features](#features)
|
||||
4. [Latest Changes](#latest-changes)
|
||||
5. [Troubleshooting](#troubleshooting)
|
||||
6. [Contributing](#contributing)
|
||||
7. [Contributors](#contributors)
|
||||
8. [Support](#support)
|
||||
9. [Further Reading](#further-reading)
|
||||
6. Wait while the installer does its thing. After installing the software,
|
||||
the installer will launch a script that lets you configure InvokeAI and
|
||||
select a set of starting image generaiton models.
|
||||
|
||||
## Installation
|
||||
7. Find the folder that InvokeAI was installed into (it is not the
|
||||
same as the unpacked zip file directory!) The default location of this
|
||||
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||
|
||||
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
|
||||
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||
minute or two for Stable Diffusion to start up, then open your browser
|
||||
and go to http://localhost:9090.
|
||||
|
||||
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||
|
||||
### Command-Line Installation (for users familiar with Terminals)
|
||||
|
||||
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
|
||||
not supported.
|
||||
|
||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||
|
||||
```terminal
|
||||
mkdir invokeai
|
||||
````
|
||||
|
||||
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||
|
||||
```terminal
|
||||
cd invokeai
|
||||
python -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||
|
||||
_For Linux/Mac users:_
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
_For Windows users:_
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||
|
||||
_For Windows/Linux with an NVIDIA GPU:_
|
||||
|
||||
```terminal
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
```
|
||||
|
||||
_For Macintoshes, either Intel or M1/M2:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517
|
||||
```
|
||||
|
||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||
|
||||
```terminal
|
||||
invokeai-configure
|
||||
```
|
||||
|
||||
7. Launch the web server (do it every time you run InvokeAI):
|
||||
|
||||
```terminal
|
||||
invokeai --web
|
||||
```
|
||||
|
||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
|
||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||
|
||||
### Detailed Installation Instructions
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
@@ -80,13 +180,13 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
|
||||
### Hardware Requirements
|
||||
## Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
#### System
|
||||
### System
|
||||
|
||||
You will need one of the following:
|
||||
|
||||
@@ -98,11 +198,11 @@ We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
#### Memory
|
||||
### Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||
#### Disk
|
||||
### Disk
|
||||
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
@@ -152,13 +252,15 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||
problems and other issues.
|
||||
|
||||
# Contributing
|
||||
## Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
|
||||
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, here is a
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||
@@ -175,6 +277,8 @@ This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
|
||||
|
||||
### Support
|
||||
|
||||
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
||||
|
||||
Binary file not shown.
@@ -1,164 +0,0 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git, this step will be skipped.
|
||||
|
||||
@rem Next, it'll download the project's source code.
|
||||
@rem Then it will download a self-contained, standalone Python and unpack it.
|
||||
@rem Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
@rem (if necessary) install git into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
|
||||
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
|
||||
|
||||
@rem test the mamba binary
|
||||
echo ***** Micromamba version: *****
|
||||
call micromamba.exe --version
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
|
||||
|
||||
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
del /q micromamba.exe
|
||||
|
||||
@rem For 'git' only
|
||||
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
||||
|
||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
||||
set err_msg=----- InvokeAI source download failed -----
|
||||
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
|
||||
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- InvokeAI source unpack failed -----
|
||||
tar -zxf InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q InvokeAI.tgz
|
||||
|
||||
set err_msg=----- InvokeAI source copy failed -----
|
||||
cd InvokeAI-*
|
||||
xcopy . .. /e /h
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
cd ..
|
||||
|
||||
@rem cleanup
|
||||
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
|
||||
rd /s /q .dev_scripts .github docker-build tests
|
||||
del /q requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo ***** Unpacked InvokeAI source *****
|
||||
|
||||
@rem Download/unpack/clean up python-build-standalone
|
||||
set err_msg=----- Python download failed -----
|
||||
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- Python unpack failed -----
|
||||
tar -zxf python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q python.tgz
|
||||
|
||||
echo ***** Unpacked python-build-standalone *****
|
||||
|
||||
@rem create venv
|
||||
set err_msg=----- problem creating venv -----
|
||||
.\python\python -E -s -m venv .venv
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo ***** Created Python virtual environment *****
|
||||
|
||||
@rem Print venv's Python version
|
||||
set err_msg=----- problem calling venv's python -----
|
||||
echo We're running under
|
||||
.venv\Scripts\python --version
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- pip update failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Updated pip and wheel *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Installed Python dependencies *****
|
||||
|
||||
set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||
echo ***** Installed invoke launcher script ******
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q binary_installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
||||
set err_msg=----- model download clone failed -----
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
deactivate
|
||||
|
||||
echo ***** Finished downloading models *****
|
||||
|
||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
||||
pause
|
||||
exit
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
pause
|
||||
exit
|
||||
@@ -1,235 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# This script will install git (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git, this step will be skipped.
|
||||
|
||||
# Next, it'll download the project's source code.
|
||||
# Then it will download a self-contained, standalone Python and unpack it.
|
||||
# Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||
|
||||
export no_cache_dir="--no-cache-dir"
|
||||
if [ $# -ge 1 ]; then
|
||||
if [ "$1" = "use-cache" ]; then
|
||||
export no_cache_dir=""
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="darwin";;
|
||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) ;;
|
||||
arm64*) ;;
|
||||
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
MAMBA_OS_NAME=$OS_NAME
|
||||
MAMBA_ARCH=$OS_ARCH
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
MAMBA_OS_NAME="osx"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "linux" ]; then
|
||||
MAMBA_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "x86_64" ]; then
|
||||
MAMBA_ARCH="64"
|
||||
fi
|
||||
|
||||
PY_ARCH=$OS_ARCH
|
||||
if [ "$OS_ARCH" == "arm64" ]; then
|
||||
PY_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
# Compute device ('cd' segment of reqs files) detect goes here
|
||||
# This needs a ton of work
|
||||
# Suggestions:
|
||||
# - lspci
|
||||
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
|
||||
# - Surely there's a similar utility for AMD?
|
||||
CD="cuda"
|
||||
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
CD="mps"
|
||||
fi
|
||||
|
||||
# config
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
|
||||
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
|
||||
|
||||
chmod u+x ./micromamba
|
||||
|
||||
# test the mamba binary
|
||||
echo -e "\n***** Micromamba version: *****\n"
|
||||
./micromamba --version
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
||||
|
||||
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -f micromamba.exe
|
||||
|
||||
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
|
||||
|
||||
# Download/unpack/clean up InvokeAI release sourceball
|
||||
_err_msg="\n----- InvokeAI source download failed -----\n"
|
||||
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- InvokeAI source unpack failed -----\n"
|
||||
tar -zxf InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f InvokeAI.tgz
|
||||
|
||||
_err_msg="\n----- InvokeAI source copy failed -----\n"
|
||||
cd InvokeAI-*
|
||||
cp -r . ..
|
||||
_err_exit $? _err_msg
|
||||
cd ..
|
||||
|
||||
# cleanup
|
||||
rm -rf InvokeAI-*/
|
||||
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo -e "\n***** Unpacked InvokeAI source *****\n"
|
||||
|
||||
# Download/unpack/clean up python-build-standalone
|
||||
_err_msg="\n----- Python download failed -----\n"
|
||||
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- Python unpack failed -----\n"
|
||||
tar -zxf python.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f python.tgz
|
||||
|
||||
echo -e "\n***** Unpacked python-build-standalone *****\n"
|
||||
|
||||
# create venv
|
||||
_err_msg="\n----- problem creating venv -----\n"
|
||||
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
# patch sysconfig so that extensions can build properly
|
||||
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
|
||||
PYTHON_INSTALL_DIR="$(pwd)/python"
|
||||
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
|
||||
TMPFILE="$(mktemp)"
|
||||
chmod +w "${SYSCONFIG}"
|
||||
cp "${SYSCONFIG}" "${TMPFILE}"
|
||||
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
|
||||
rm -f "${TMPFILE}"
|
||||
fi
|
||||
|
||||
./python/bin/python3 -E -s -m venv .venv
|
||||
_err_exit $? _err_msg
|
||||
source .venv/bin/activate
|
||||
|
||||
echo -e "\n***** Created Python virtual environment *****\n"
|
||||
|
||||
# Print venv's Python version
|
||||
_err_msg="\n----- problem calling venv's python -----\n"
|
||||
echo -e "We're running under"
|
||||
.venv/bin/python3 --version
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+rx ./invoke.sh
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
rm -rf binary_installer/ installer_files/
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/configure_invokeai.py
|
||||
_err_msg="\n----- model download clone failed -----\n"
|
||||
_err_exit $? _err_msg
|
||||
deactivate
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo "All done! Run the command"
|
||||
echo " $scriptdir/invoke.sh"
|
||||
echo "to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
@@ -1,36 +0,0 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo OR
|
||||
echo 3. open the developer console
|
||||
set /p choice="Please enter 1, 2 or 3: "
|
||||
if /i "%choice%" == "1" (
|
||||
echo Starting the InvokeAI command-line.
|
||||
.venv\Scripts\python scripts\invoke.py %*
|
||||
) else if /i "%choice%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI.
|
||||
.venv\Scripts\python scripts\invoke.py --web %*
|
||||
) else if /i "%choice%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) else (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
deactivate
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -eu
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "OR"
|
||||
echo "3. open the developer console"
|
||||
echo "Please enter 1, 2, or 3:"
|
||||
read choice
|
||||
|
||||
case $choice in
|
||||
1)
|
||||
printf "\nStarting the InvokeAI command-line..\n";
|
||||
.venv/bin/python scripts/invoke.py $*;
|
||||
;;
|
||||
2)
|
||||
printf "\nStarting the InvokeAI browser-based UI..\n";
|
||||
.venv/bin/python scripts/invoke.py --web $*;
|
||||
;;
|
||||
3)
|
||||
printf "\nDeveloper Console:\n";
|
||||
printf "Python command is:\n\t";
|
||||
which python;
|
||||
printf "Python version is:\n\t";
|
||||
python --version;
|
||||
echo "*************************"
|
||||
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
|
||||
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
|
||||
printf "*************************\n"
|
||||
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
|
||||
/usr/bin/env "$SHELL";
|
||||
;;
|
||||
*)
|
||||
echo "Invalid selection";
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
@@ -1,33 +0,0 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
accelerate~=0.15
|
||||
albumentations
|
||||
diffusers[torch]~=0.11
|
||||
einops
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui==1.0.3
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
realesrgan
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torch==1.12.1 ; platform_system == 'Darwin'
|
||||
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
||||
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
transformers
|
||||
picklescan
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
||||
@@ -1,57 +1,63 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
##################
|
||||
## base image ##
|
||||
##################
|
||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
||||
|
||||
# prepare for buildkit cache
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
|
||||
# Install necesarry packages
|
||||
# prepare for buildkit cache
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
# Install necessary packages
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install \
|
||||
-yqq \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
libopencv-dev=4.5.* \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libopencv-dev=4.5.*
|
||||
|
||||
# set working directory and path
|
||||
# set working directory and env
|
||||
ARG APPDIR=/usr/src
|
||||
ARG APPNAME=InvokeAI
|
||||
WORKDIR ${APPDIR}
|
||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
||||
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||
# Keeps Python from generating .pyc files in the container
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
# Turns off buffering for easier container logging
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
# don't fall back to legacy build system
|
||||
ENV PIP_USE_PEP517=1
|
||||
|
||||
#######################
|
||||
## build pyproject ##
|
||||
#######################
|
||||
FROM python-base AS pyproject-builder
|
||||
ENV PIP_USE_PEP517=1
|
||||
|
||||
# prepare for buildkit cache
|
||||
# Install dependencies
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
gcc=4:10.2.* \
|
||||
python3-dev=3.9.*
|
||||
|
||||
# prepare pip for buildkit cache
|
||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||
|
||||
# Install dependencies
|
||||
RUN \
|
||||
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install \
|
||||
-yqq \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
gcc=4:10.2.* \
|
||||
python3-dev=3.9.* \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# create virtual environment
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||
python3 -m venv "${APPNAME}" \
|
||||
--upgrade-deps
|
||||
|
||||
@@ -61,9 +67,8 @@ COPY --link . .
|
||||
# install pyproject.toml
|
||||
ARG PIP_EXTRA_INDEX_URL
|
||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||
ARG PIP_PACKAGE=.
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||
"${APPNAME}/bin/pip" install .
|
||||
|
||||
# build patchmatch
|
||||
RUN python3 -c "from patchmatch import patch_match"
|
||||
@@ -73,14 +78,26 @@ RUN python3 -c "from patchmatch import patch_match"
|
||||
#####################
|
||||
FROM python-base AS runtime
|
||||
|
||||
# setup environment
|
||||
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
||||
ENV INVOKEAI_ROOT=/data
|
||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
||||
# Create a new user
|
||||
ARG UNAME=appuser
|
||||
RUN useradd \
|
||||
--no-log-init \
|
||||
-m \
|
||||
-U \
|
||||
"${UNAME}"
|
||||
|
||||
# set Entrypoint and default CMD
|
||||
# create volume directory
|
||||
ARG VOLUME_DIR=/data
|
||||
RUN mkdir -p "${VOLUME_DIR}" \
|
||||
&& chown -R "${UNAME}" "${VOLUME_DIR}"
|
||||
|
||||
# setup runtime environment
|
||||
USER ${UNAME}
|
||||
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||
EXPOSE 9090
|
||||
ENTRYPOINT [ "invokeai" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
VOLUME [ "/data" ]
|
||||
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||
VOLUME [ "${VOLUME_DIR}" ]
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
||||
# CPU: https://download.pytorch.org/whl/cpu
|
||||
# as found on https://pytorch.org/get-started/locally/
|
||||
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||
# Possible Values are:
|
||||
# - cpu
|
||||
# - cuda
|
||||
# - rocm
|
||||
# Don't forget to also set it when executing run.sh
|
||||
# if it is not set, the script will try to detect the flavor by itself.
|
||||
#
|
||||
# Doc can be found here:
|
||||
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
source ./env.sh
|
||||
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo -e "You are using these values:\n"
|
||||
@@ -21,23 +26,25 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||
echo -e "Platform:\t\t${PLATFORM}"
|
||||
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
||||
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
||||
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||
|
||||
# Create docker volume
|
||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||
echo -e "Volume already exists\n"
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
echo -n "creating docker volume "
|
||||
docker volume create "${VOLUMENAME}"
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${CONTAINER_IMAGE}" \
|
||||
--platform="${PLATFORM:-linux/amd64}" \
|
||||
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||
--file="${DOCKERFILE}" \
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||
|
||||
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||
|
||||
# Activate virtual environment if not already activated and exists
|
||||
if [[ -z $VIRTUAL_ENV ]]; then
|
||||
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||
fi
|
||||
|
||||
# Decide which container flavor to build if not specified
|
||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||
# Check for CUDA and ROCm
|
||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||
CONTAINER_FLAVOR="cuda"
|
||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||
CONTAINER_FLAVOR="rocm"
|
||||
else
|
||||
CONTAINER_FLAVOR="cpu"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||
@@ -26,9 +38,10 @@ fi
|
||||
|
||||
# Variables shared by build.sh and run.sh
|
||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
|
||||
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||
ARCH="${ARCH-$(uname -m)}"
|
||||
PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
||||
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
source ./env.sh
|
||||
|
||||
# Create outputs directory if it does not exist
|
||||
[[ -d ./outputs ]] || mkdir ./outputs
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Volumename:\t${VOLUMENAME}"
|
||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||
@@ -22,10 +24,18 @@ docker run \
|
||||
--name="${REPOSITORY_NAME,,}" \
|
||||
--hostname="${REPOSITORY_NAME,,}" \
|
||||
--mount=source="${VOLUMENAME}",target=/data \
|
||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
||||
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
|
||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||
--publish=9090:9090 \
|
||||
--cap-add=sys_nice \
|
||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||
"${CONTAINER_IMAGE}" ${1:+$@}
|
||||
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||
|
||||
# Remove Trash folder
|
||||
for f in outputs/.Trash*; do
|
||||
if [ -e "$f" ]; then
|
||||
rm -Rf "$f"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
5
docs/.markdownlint.jsonc
Normal file
5
docs/.markdownlint.jsonc
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"MD046": false,
|
||||
"MD007": false,
|
||||
"MD030": false
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 84 KiB |
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 128 KiB |
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 114 KiB |
@@ -214,6 +214,8 @@ Here are the invoke> command that apply to txt2img:
|
||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||
|
||||
!!! note
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ for adj in adjectives:
|
||||
print(f'a {adj} day -A{samp} -C{cg}')
|
||||
```
|
||||
|
||||
It's output looks like this (abbreviated):
|
||||
Its output looks like this (abbreviated):
|
||||
|
||||
```bash
|
||||
a sunny day -Aklms -C7.5
|
||||
|
||||
@@ -154,8 +154,11 @@ training sets will converge with 2000-3000 steps.
|
||||
|
||||
This adjusts how many training images are processed simultaneously in
|
||||
each step. Higher values will cause the training process to run more
|
||||
quickly, but use more memory. The default size will run with GPUs with
|
||||
as little as 12 GB.
|
||||
quickly, but use more memory. The default size is selected based on
|
||||
whether you have the `xformers` memory-efficient attention library
|
||||
installed. If `xformers` is available, the batch size will be 8,
|
||||
otherwise 3. These values were chosen to allow training to run with
|
||||
GPUs with as little as 12 GB VRAM.
|
||||
|
||||
### Learning rate
|
||||
|
||||
@@ -172,8 +175,10 @@ learning rate to improve performance.
|
||||
|
||||
### Use xformers acceleration
|
||||
|
||||
This will activate XFormers memory-efficient attention. You need to
|
||||
have XFormers installed for this to have an effect.
|
||||
This will activate XFormers memory-efficient attention, which will
|
||||
reduce memory requirements by half or more and allow you to select a
|
||||
higher batch size. You need to have XFormers installed for this to
|
||||
have an effect.
|
||||
|
||||
### Learning rate scheduler
|
||||
|
||||
@@ -250,6 +255,67 @@ invokeai-ti \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Using Distributed Training
|
||||
|
||||
If you have multiple GPUs on one machine, or a cluster of GPU-enabled
|
||||
machines, you can activate distributed training. See the [HuggingFace
|
||||
Accelerate pages](https://huggingface.co/docs/accelerate/index) for
|
||||
full information, but the basic recipe is:
|
||||
|
||||
1. Enter the InvokeAI developer's console command line by selecting
|
||||
option [8] from the `invoke.sh`/`invoke.bat` script.
|
||||
|
||||
2. Configurate Accelerate using `accelerate config`:
|
||||
```sh
|
||||
accelerate config
|
||||
```
|
||||
This will guide you through the configuration process, including
|
||||
specifying how many machines you will run training on and the number
|
||||
of GPUs pe rmachine.
|
||||
|
||||
You only need to do this once.
|
||||
|
||||
3. Launch training from the command line using `accelerate launch`. Be sure
|
||||
that your current working directory is the InvokeAI root directory (usually
|
||||
named `invokeai` in your home directory):
|
||||
|
||||
```sh
|
||||
accelerate launch .venv/bin/invokeai-ti \
|
||||
--model=stable-diffusion-1.5 \
|
||||
--resolution=512 \
|
||||
--learnable_property=object \
|
||||
--initializer_token='*' \
|
||||
--placeholder_token='<shraddha>' \
|
||||
--train_data_dir=/home/lstein/invokeai/text-inversion-training-data/shraddha \
|
||||
--output_dir=/home/lstein/invokeai/text-inversion-training/shraddha \
|
||||
--scale_lr \
|
||||
--train_batch_size=10 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--max_train_steps=2000 \
|
||||
--learning_rate=0.0005 \
|
||||
--lr_scheduler=constant \
|
||||
--mixed_precision=fp16 \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Using Embeddings
|
||||
|
||||
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||
|
||||
These will be automatically loaded when you start InvokeAI.
|
||||
|
||||
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
|
||||
|
||||
**Note:** `.pt` embeddings do not require the angle brackets.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||
|
||||
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||
|
||||
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||
|
||||
## Reading
|
||||
|
||||
For more information on textual inversion, please see the following
|
||||
|
||||
@@ -2,62 +2,82 @@
|
||||
title: Overview
|
||||
---
|
||||
|
||||
Here you can find the documentation for InvokeAI's various features.
|
||||
- The Basics
|
||||
|
||||
## The Basics
|
||||
### * The [Web User Interface](WEB.md)
|
||||
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||
- The [Web User Interface](WEB.md)
|
||||
|
||||
### * The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||
Build complex scenes by combine and modifying multiple images in a stepwise
|
||||
fashion. This feature combines img2img, inpainting and outpainting in
|
||||
a single convenient digital artist-optimized user interface.
|
||||
Guide to the Web interface. Also see the
|
||||
[WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||
|
||||
### * The [Command Line Interface (CLI)](CLI.md)
|
||||
Scriptable access to InvokeAI's features.
|
||||
- The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||
|
||||
## Image Generation
|
||||
### * [Prompt Engineering](PROMPTS.md)
|
||||
Get the images you want with the InvokeAI prompt engineering language.
|
||||
Build complex scenes by combine and modifying multiple images in a
|
||||
stepwise fashion. This feature combines img2img, inpainting and
|
||||
outpainting in a single convenient digital artist-optimized user
|
||||
interface.
|
||||
|
||||
## * [Post-Processing](POSTPROCESS.md)
|
||||
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||
- The [Command Line Interface (CLI)](CLI.md)
|
||||
|
||||
## * The [Concepts Library](CONCEPTS.md)
|
||||
Add custom subjects and styles using HuggingFace's repository of embeddings.
|
||||
Scriptable access to InvokeAI's features.
|
||||
|
||||
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||
Use a seed image to build new creations in the CLI.
|
||||
- Image Generation
|
||||
|
||||
### * [Inpainting Guide for the CLI](INPAINTING.md)
|
||||
Selectively erase and replace portions of an existing image in the CLI.
|
||||
- [Prompt Engineering](PROMPTS.md)
|
||||
|
||||
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||
Extend the borders of the image with an "outcrop" function within the CLI.
|
||||
Get the images you want with the InvokeAI prompt engineering language.
|
||||
|
||||
### * [Generating Variations](VARIATIONS.md)
|
||||
Have an image you like and want to generate many more like it? Variations
|
||||
are the ticket.
|
||||
- [Post-Processing](POSTPROCESS.md)
|
||||
|
||||
## Model Management
|
||||
Restore mangled faces and make images larger with upscaling. Also see
|
||||
the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||
|
||||
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
Learn how to import third-party models and switch among them. This
|
||||
guide also covers optimizing models to load quickly.
|
||||
- The [Concepts Library](CONCEPTS.md)
|
||||
|
||||
## * [Merging Models](MODEL_MERGING.md)
|
||||
Teach an old model new tricks. Merge 2-3 models together to create a
|
||||
new model that combines characteristics of the originals.
|
||||
Add custom subjects and styles using HuggingFace's repository of
|
||||
embeddings.
|
||||
|
||||
## * [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||
Personalize models by adding your own style or subjects.
|
||||
- [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||
|
||||
# Other Features
|
||||
Use a seed image to build new creations in the CLI.
|
||||
|
||||
## * [The NSFW Checker](NSFW.md)
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
- [Inpainting Guide for the CLI](INPAINTING.md)
|
||||
|
||||
## * [Miscellaneous](OTHER.md)
|
||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||
batch process a file of prompts, increase the "creativity" of image
|
||||
generation by adding initial noise, and more!
|
||||
Selectively erase and replace portions of an existing image in the CLI.
|
||||
|
||||
- [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||
|
||||
Extend the borders of the image with an "outcrop" function within the
|
||||
CLI.
|
||||
|
||||
- [Generating Variations](VARIATIONS.md)
|
||||
|
||||
Have an image you like and want to generate many more like it?
|
||||
Variations are the ticket.
|
||||
|
||||
- Model Management
|
||||
|
||||
- [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
Learn how to import third-party models and switch among them. This guide
|
||||
also covers optimizing models to load quickly.
|
||||
|
||||
- [Merging Models](MODEL_MERGING.md)
|
||||
|
||||
Teach an old model new tricks. Merge 2-3 models together to create a new
|
||||
model that combines characteristics of the originals.
|
||||
|
||||
- [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||
|
||||
Personalize models by adding your own style or subjects.
|
||||
|
||||
- Other Features
|
||||
|
||||
- [The NSFW Checker](NSFW.md)
|
||||
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
|
||||
- [Miscellaneous](OTHER.md)
|
||||
|
||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||
batch process a file of prompts, increase the "creativity" of image
|
||||
generation by adding initial noise, and more!
|
||||
|
||||
4
docs/help/IDE-Settings/index.md
Normal file
4
docs/help/IDE-Settings/index.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# :octicons-file-code-16: IDE-Settings
|
||||
|
||||
Here we will share settings for IDEs used by our developers, maybe you can find
|
||||
something interestening which will help to boost your development efficency 🔥
|
||||
250
docs/help/IDE-Settings/vs-code.md
Normal file
250
docs/help/IDE-Settings/vs-code.md
Normal file
@@ -0,0 +1,250 @@
|
||||
---
|
||||
title: Visual Studio Code
|
||||
---
|
||||
|
||||
# :material-microsoft-visual-studio-code:Visual Studio Code
|
||||
|
||||
The Workspace Settings are stored in the project (repository) root and get
|
||||
higher priorized than your user settings.
|
||||
|
||||
This helps to have different settings for different projects, while the user
|
||||
settings get used as a default value if no workspace settings are provided.
|
||||
|
||||
## tasks.json
|
||||
|
||||
First we will create a task configuration which will create a virtual
|
||||
environment and update the deps (pip, setuptools and wheel).
|
||||
|
||||
Into this venv we will then install the pyproject.toml in editable mode with
|
||||
dev, docs and test dependencies.
|
||||
|
||||
```json title=".vscode/tasks.json"
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||
// for the documentation about the tasks.json format
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Create virtual environment",
|
||||
"detail": "Create .venv and upgrade pip, setuptools and wheel",
|
||||
"command": "python3",
|
||||
"args": [
|
||||
"-m",
|
||||
"venv",
|
||||
".venv",
|
||||
"--prompt",
|
||||
"InvokeAI",
|
||||
"--upgrade-deps"
|
||||
],
|
||||
"runOptions": {
|
||||
"instanceLimit": 1,
|
||||
"reevaluateOnRerun": true
|
||||
},
|
||||
"group": {
|
||||
"kind": "build"
|
||||
},
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "always",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"clear": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "build InvokeAI",
|
||||
"detail": "Build pyproject.toml with extras dev, docs and test",
|
||||
"command": "${workspaceFolder}/.venv/bin/python3",
|
||||
"args": [
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"--use-pep517",
|
||||
"--editable",
|
||||
".[dev,docs,test]"
|
||||
],
|
||||
"dependsOn": "Create virtual environment",
|
||||
"dependsOrder": "sequence",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "always",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"clear": false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The fastest way to build InvokeAI now is ++cmd+shift+b++
|
||||
|
||||
## launch.json
|
||||
|
||||
This file is used to define debugger configurations, so that you can one-click
|
||||
launch and monitor the application, set halt points to inspect specific states,
|
||||
...
|
||||
|
||||
```json title=".vscode/launch.json"
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "invokeai web",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": ".venv/bin/invokeai",
|
||||
"justMyCode": true
|
||||
},
|
||||
{
|
||||
"name": "invokeai cli",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": ".venv/bin/invokeai",
|
||||
"justMyCode": true
|
||||
},
|
||||
{
|
||||
"name": "mkdocs serve",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": ".venv/bin/mkdocs",
|
||||
"args": ["serve"],
|
||||
"justMyCode": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
|
||||
you have created a virtual environment via the [tasks](#tasksjson) from the
|
||||
previous step.)
|
||||
|
||||
## extensions.json
|
||||
|
||||
A list of recommended vscode-extensions to make your life easier:
|
||||
|
||||
```json title=".vscode/extensions.json"
|
||||
{
|
||||
"recommendations": [
|
||||
"editorconfig.editorconfig",
|
||||
"github.vscode-pull-request-github",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.flake8",
|
||||
"ms-python.isort",
|
||||
"ms-python.python",
|
||||
"ms-python.vscode-pylance",
|
||||
"redhat.vscode-yaml",
|
||||
"tamasfe.even-better-toml",
|
||||
"eamodio.gitlens",
|
||||
"foxundermoon.shell-format",
|
||||
"timonwong.shellcheck",
|
||||
"esbenp.prettier-vscode",
|
||||
"davidanson.vscode-markdownlint",
|
||||
"yzhang.markdown-all-in-one",
|
||||
"bierner.github-markdown-preview",
|
||||
"ms-azuretools.vscode-docker",
|
||||
"mads-hartmann.bash-ide-vscode"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## settings.json
|
||||
|
||||
With bellow settings your files already get formated when you save them (only
|
||||
your modifications if available), which will help you to not run into trouble
|
||||
with the pre-commit hooks. If the hooks fail, they will prevent you from
|
||||
commiting, but most hooks directly add a fixed version, so that you just need to
|
||||
stage and commit them:
|
||||
|
||||
```json title=".vscode/settings.json"
|
||||
{
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.quickSuggestions": {
|
||||
"comments": false,
|
||||
"strings": true,
|
||||
"other": true
|
||||
},
|
||||
"editor.suggest.insertMode": "replace",
|
||||
"gitlens.codeLens.scopes": ["document"]
|
||||
},
|
||||
"[jsonc]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "ms-python.black-formatter",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "file"
|
||||
},
|
||||
"[toml]": {
|
||||
"editor.defaultFormatter": "tamasfe.even-better-toml",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[yaml]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[markdown]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.rulers": [80],
|
||||
"editor.unicodeHighlight.ambiguousCharacters": false,
|
||||
"editor.unicodeHighlight.invisibleCharacters": false,
|
||||
"diffEditor.ignoreTrimWhitespace": false,
|
||||
"editor.wordWrap": "on",
|
||||
"editor.quickSuggestions": {
|
||||
"comments": "off",
|
||||
"strings": "off",
|
||||
"other": "off"
|
||||
},
|
||||
"editor.formatOnSave": true,
|
||||
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||
},
|
||||
"[shellscript]": {
|
||||
"editor.defaultFormatter": "foxundermoon.shell-format"
|
||||
},
|
||||
"[ignore]": {
|
||||
"editor.defaultFormatter": "foxundermoon.shell-format"
|
||||
},
|
||||
"editor.rulers": [88],
|
||||
"evenBetterToml.formatter.alignEntries": false,
|
||||
"evenBetterToml.formatter.allowedBlankLines": 1,
|
||||
"evenBetterToml.formatter.arrayAutoExpand": true,
|
||||
"evenBetterToml.formatter.arrayTrailingComma": true,
|
||||
"evenBetterToml.formatter.arrayAutoCollapse": true,
|
||||
"evenBetterToml.formatter.columnWidth": 88,
|
||||
"evenBetterToml.formatter.compactArrays": true,
|
||||
"evenBetterToml.formatter.compactInlineTables": true,
|
||||
"evenBetterToml.formatter.indentEntries": false,
|
||||
"evenBetterToml.formatter.inlineTableExpand": true,
|
||||
"evenBetterToml.formatter.reorderArrays": true,
|
||||
"evenBetterToml.formatter.reorderKeys": true,
|
||||
"evenBetterToml.formatter.compactEntries": false,
|
||||
"evenBetterToml.schema.enabled": true,
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
"python.formatting.provider": "black",
|
||||
"python.languageServer": "Pylance",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.flake8Enabled": true,
|
||||
"python.testing.unittestEnabled": false,
|
||||
"python.testing.pytestEnabled": true,
|
||||
"python.testing.pytestArgs": [
|
||||
"tests",
|
||||
"--cov=ldm",
|
||||
"--cov-branch",
|
||||
"--cov-report=term:skip-covered"
|
||||
],
|
||||
"yaml.schemas": {
|
||||
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
|
||||
}
|
||||
}
|
||||
```
|
||||
135
docs/help/contributing/010_PULL_REQUEST.md
Normal file
135
docs/help/contributing/010_PULL_REQUEST.md
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
title: Pull-Request
|
||||
---
|
||||
|
||||
# :octicons-git-pull-request-16: Pull-Request
|
||||
|
||||
## pre-requirements
|
||||
|
||||
To follow the steps in this tutorial you will need:
|
||||
|
||||
- [GitHub](https://github.com) account
|
||||
- [git](https://git-scm.com/downloads) source controll
|
||||
- Text / Code Editor (personally I preffer
|
||||
[Visual Studio Code](https://code.visualstudio.com/Download))
|
||||
- Terminal:
|
||||
- If you are on Linux/MacOS you can use bash or zsh
|
||||
- for Windows Users the commands are written for PowerShell
|
||||
|
||||
## Fork Repository
|
||||
|
||||
The first step to be done if you want to contribute to InvokeAI, is to fork the
|
||||
rpeository.
|
||||
|
||||
Since you are already reading this doc, the easiest way to do so is by clicking
|
||||
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
|
||||
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
|
||||
in the top right.
|
||||
|
||||
## Clone your fork
|
||||
|
||||
After you forked the Repository, you should clone it to your dev machine:
|
||||
|
||||
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||
|
||||
``` sh
|
||||
git clone https://github.com/<github username>/InvokeAI \
|
||||
&& cd InvokeAI
|
||||
```
|
||||
|
||||
=== ":fontawesome-brands-windows:Windows"
|
||||
|
||||
``` powershell
|
||||
git clone https://github.com/<github username>/InvokeAI `
|
||||
&& cd InvokeAI
|
||||
```
|
||||
|
||||
## Install in Editable Mode
|
||||
|
||||
To install InvokeAI in editable mode, (as always) we recommend to create and
|
||||
activate a venv first. Afterwards you can install the InvokeAI Package,
|
||||
including dev and docs extras in editable mode, follwed by the installation of
|
||||
the pre-commit hook:
|
||||
|
||||
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||
|
||||
``` sh
|
||||
python -m venv .venv \
|
||||
--prompt InvokeAI \
|
||||
--upgrade-deps \
|
||||
&& source .venv/bin/activate \
|
||||
&& pip install \
|
||||
--upgrade-deps \
|
||||
--use-pep517 \
|
||||
--editable=".[dev,docs]" \
|
||||
&& pre-commit install
|
||||
```
|
||||
|
||||
=== ":fontawesome-brands-windows:Windows"
|
||||
|
||||
``` powershell
|
||||
python -m venv .venv `
|
||||
--prompt InvokeAI `
|
||||
--upgrade-deps `
|
||||
&& .venv/scripts/activate.ps1 `
|
||||
&& pip install `
|
||||
--upgrade `
|
||||
--use-pep517 `
|
||||
--editable=".[dev,docs]" `
|
||||
&& pre-commit install
|
||||
```
|
||||
|
||||
## Create a branch
|
||||
|
||||
Make sure you are on main branch, from there create your feature branch:
|
||||
|
||||
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||
|
||||
``` sh
|
||||
git checkout main \
|
||||
&& git pull \
|
||||
&& git checkout -B <branch name>
|
||||
```
|
||||
|
||||
=== ":fontawesome-brands-windows:Windows"
|
||||
|
||||
``` powershell
|
||||
git checkout main `
|
||||
&& git pull `
|
||||
&& git checkout -B <branch name>
|
||||
```
|
||||
|
||||
## Commit your changes
|
||||
|
||||
When you are done with adding / updating content, you need to commit those
|
||||
changes to your repository before you can actually open an PR:
|
||||
|
||||
```{ .sh .annotate }
|
||||
git add <files you have changed> # (1)!
|
||||
git commit -m "A commit message which describes your change"
|
||||
git push
|
||||
```
|
||||
|
||||
1. Replace this with a space seperated list of the files you changed, like:
|
||||
`README.md foo.sh bar.json baz`
|
||||
|
||||
## Create a Pull Request
|
||||
|
||||
After pushing your changes, you are ready to create a Pull Request. just head
|
||||
over to your fork on [GitHub](https://github.com), which should already show you
|
||||
a message that there have been recent changes on your feature branch and a green
|
||||
button which you could use to create the PR.
|
||||
|
||||
The default target for your PRs would be the main branch of
|
||||
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
|
||||
|
||||
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
|
||||
the GitHub CLI in a VS-Code Terminal Window 🤭):
|
||||
|
||||
```sh
|
||||
gh pr create
|
||||
```
|
||||
|
||||
The CLI will inform you if there are still unpushed commits on your branch. It
|
||||
will also prompt you for things like the the Title and the Body (Description) if
|
||||
you did not already pass them as arguments.
|
||||
26
docs/help/contributing/020_ISSUES.md
Normal file
26
docs/help/contributing/020_ISSUES.md
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
title: Issues
|
||||
---
|
||||
|
||||
# :octicons-issue-opened-16: Issues
|
||||
|
||||
## :fontawesome-solid-bug: Report a bug
|
||||
|
||||
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
|
||||
you
|
||||
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
|
||||
to inform us about the details so that our developers can look into it.
|
||||
|
||||
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
|
||||
find out how to create a Pull Request.
|
||||
|
||||
## Request a feature
|
||||
|
||||
If you have a idea for a new feature on your mind which you would like to see in
|
||||
InvokeAI, there is a
|
||||
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
|
||||
available in the issues section of the repository.
|
||||
|
||||
If you are just curious which features already got requested you can find the
|
||||
overview of open requests
|
||||
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
|
||||
32
docs/help/contributing/030_DOCS.md
Normal file
32
docs/help/contributing/030_DOCS.md
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
title: docs
|
||||
---
|
||||
|
||||
# :simple-readthedocs: MkDocs-Material
|
||||
|
||||
If you want to contribute to the docs, there is a easy way to verify the results
|
||||
of your changes before commiting them.
|
||||
|
||||
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
|
||||
already
|
||||
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
|
||||
When installed it's as simple as:
|
||||
|
||||
```sh
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
This will build the docs locally and serve them on your local host, even
|
||||
auto-refresh is included, so you can just update a doc, save it and tab to the
|
||||
browser, without the needs of restarting the `mkdocs serve`.
|
||||
|
||||
More information about the "mkdocs flavored markdown syntax" can be found
|
||||
[here](https://squidfunk.github.io/mkdocs-material/reference/).
|
||||
|
||||
## :material-microsoft-visual-studio-code:VS-Code
|
||||
|
||||
We also provide a
|
||||
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
|
||||
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
|
||||
the formatting since this is automated via prettier, but this is of course not
|
||||
limited to VS-Code.
|
||||
76
docs/help/contributing/090_NODE_TRANSFORMATION.md
Normal file
76
docs/help/contributing/090_NODE_TRANSFORMATION.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Tranformation to nodes
|
||||
|
||||
## Current state
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img| generate(generate);
|
||||
web --> |txt2img| generate(generate);
|
||||
cli --> |txt2img| generate(generate);
|
||||
cli --> |img2img| generate(generate);
|
||||
generate --> model_manager;
|
||||
generate --> generators;
|
||||
generate --> ti_manager[TI Manager];
|
||||
generate --> etc;
|
||||
```
|
||||
|
||||
## Transitional Architecture
|
||||
|
||||
### first step
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img| img2img_node(Img2img node);
|
||||
web --> |txt2img| generate(generate);
|
||||
img2img_node --> model_manager;
|
||||
img2img_node --> generators;
|
||||
cli --> |txt2img| generate;
|
||||
cli --> |img2img| generate;
|
||||
generate --> model_manager;
|
||||
generate --> generators;
|
||||
generate --> ti_manager[TI Manager];
|
||||
generate --> etc;
|
||||
```
|
||||
|
||||
### second step
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img| img2img_node(img2img node);
|
||||
img2img_node --> model_manager;
|
||||
img2img_node --> generators;
|
||||
web --> |txt2img| txt2img_node(txt2img node);
|
||||
cli --> |txt2img| txt2img_node;
|
||||
cli --> |img2img| generate(generate);
|
||||
generate --> model_manager;
|
||||
generate --> generators;
|
||||
generate --> ti_manager[TI Manager];
|
||||
generate --> etc;
|
||||
txt2img_node --> model_manager;
|
||||
txt2img_node --> generators;
|
||||
txt2img_node --> ti_manager[TI Manager];
|
||||
```
|
||||
|
||||
## Final Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
web[WebUI];
|
||||
cli[CLI];
|
||||
web --> |img2img|img2img_node(img2img node);
|
||||
cli --> |img2img|img2img_node;
|
||||
web --> |txt2img|txt2img_node(txt2img node);
|
||||
cli --> |txt2img|txt2img_node;
|
||||
img2img_node --> model_manager;
|
||||
txt2img_node --> model_manager;
|
||||
img2img_node --> generators;
|
||||
txt2img_node --> generators;
|
||||
img2img_node --> ti_manager[TI Manager];
|
||||
txt2img_node --> ti_manager[TI Manager];
|
||||
```
|
||||
16
docs/help/contributing/index.md
Normal file
16
docs/help/contributing/index.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
title: Contributing
|
||||
---
|
||||
|
||||
# :fontawesome-solid-code-commit: Contributing
|
||||
|
||||
There are different ways how you can contribute to
|
||||
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
|
||||
Issues for Bugs or ideas how to improve.
|
||||
|
||||
This Section of the docs will explain some of the different ways of how you can
|
||||
contribute to make it easier for newcommers as well as advanced users :nerd:
|
||||
|
||||
If you want to contribute code, but you do not have an exact idea yet, take a
|
||||
look at the currently open
|
||||
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
12
docs/help/index.md
Normal file
12
docs/help/index.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# :material-help:Help
|
||||
|
||||
If you are looking for help with the installation of InvokeAI, please take a
|
||||
look into the [Installation](../installation/index.md) section of the docs.
|
||||
|
||||
Here you will find help to topics like
|
||||
|
||||
- how to contribute
|
||||
- configuration recommendation for IDEs
|
||||
|
||||
If you have an Idea about what's missing and aren't scared from contributing,
|
||||
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.
|
||||
@@ -1,19 +0,0 @@
|
||||
<!-- HTML for static distribution bundle build -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Swagger UI</title>
|
||||
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
|
||||
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
|
||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
|
||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
|
||||
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
|
||||
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
|
||||
</body>
|
||||
</html>
|
||||
295
docs/index.md
295
docs/index.md
@@ -2,6 +2,8 @@
|
||||
title: Home
|
||||
---
|
||||
|
||||
# :octicons-home-16: Home
|
||||
|
||||
<!--
|
||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||
|
||||
@@ -29,36 +31,36 @@ title: Home
|
||||
[![github open prs badge]][github open prs link]
|
||||
|
||||
[ci checks on dev badge]:
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||
[ci checks on dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
[ci checks on main badge]:
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[ci checks on main link]:
|
||||
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]:
|
||||
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
[github forks link]:
|
||||
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||
[github open issues badge]:
|
||||
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||
[github open issues link]:
|
||||
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||
[github open prs badge]:
|
||||
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||
[github open prs link]:
|
||||
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||
[github stars badge]:
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||
[latest commit to dev badge]:
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
[latest release badge]:
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
|
||||
</div>
|
||||
@@ -87,24 +89,24 @@ Q&A</a>]
|
||||
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with their
|
||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||
images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory and Disk
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
- At least 12 GB Main Memory RAM.
|
||||
- At least 18 GB of free disk space for the machine learning model, Python,
|
||||
and all its dependencies.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
@@ -113,48 +115,65 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
|
||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
|
||||
This method is recommended for 1st time users
|
||||
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
|
||||
This method is recommended for experienced users and developers
|
||||
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
### The InvokeAI Web Interface
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- separator -->
|
||||
|
||||
### The InvokeAI Command Line Interface
|
||||
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||
|
||||
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||
<!-- separator -->
|
||||
|
||||
### Image Management
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other Features](features/OTHER.md)
|
||||
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other Features](features/OTHER.md)
|
||||
|
||||
<!-- separator -->
|
||||
|
||||
### Model Management
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
<!-- seperator -->
|
||||
|
||||
### Prompt Engineering
|
||||
- [Prompt Syntax](features/PROMPTS.md)
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
|
||||
- [Prompt Syntax](features/PROMPTS.md)
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
@@ -162,84 +181,188 @@ This method is recommended for those familiar with running Docker containers
|
||||
|
||||
#### Migration to Stable Diffusion `diffusers` models
|
||||
|
||||
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
|
||||
Previous versions of InvokeAI supported the original model file format
|
||||
introduced with Stable Diffusion 1.4. In the original format, known variously as
|
||||
"checkpoint", or "legacy" format, there is a single large weights file ending
|
||||
with `.ckpt` or `.safetensors`. Though this format has served the community
|
||||
well, it has a number of disadvantages, including file size, slow loading times,
|
||||
and a variety of non-standard variants that require special-case code to handle.
|
||||
In addition, because checkpoint files are actually a bundle of multiple machine
|
||||
learning sub-models, it is hard to swap different sub-models in and out, or to
|
||||
share common sub-models. A new format, introduced by the StabilityAI company in
|
||||
collaboration with HuggingFace, is called `diffusers` and consists of a
|
||||
directory of individual models. The most immediate benefit of `diffusers` is
|
||||
that they load from disk very quickly. A longer term benefit is that in the near
|
||||
future `diffusers` models will be able to share common sub-models, dramatically
|
||||
reducing disk space when you have multiple fine-tune models derived from the
|
||||
same base.
|
||||
|
||||
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
|
||||
When you perform a new install of version 2.3.0, you will be offered the option
|
||||
to install the `diffusers` versions of a number of popular SD models, including
|
||||
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
|
||||
2.1). These will act and work just like the checkpoint versions. Do not be
|
||||
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
|
||||
InvokeAI 2.3.0 can still load these and generate images from them without any
|
||||
extra intervention on your part.
|
||||
|
||||
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
|
||||
To take advantage of the optimized loading times of `diffusers` models, InvokeAI
|
||||
offers options to convert legacy checkpoint models into optimized `diffusers`
|
||||
models. If you use the `invokeai` command line interface, the relevant commands
|
||||
are:
|
||||
|
||||
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
|
||||
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
|
||||
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
|
||||
- `!convert_model` -- Take the path to a local checkpoint file or a URL that
|
||||
is pointing to one, convert it into a `diffusers` model, and import it into
|
||||
InvokeAI's models registry file.
|
||||
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
|
||||
models file, this command will accept its short name and convert it into a
|
||||
like-named `diffusers` model, optionally deleting the original checkpoint
|
||||
file.
|
||||
- `!import_model` -- Take the local path of either a checkpoint file or a
|
||||
`diffusers` model directory and import it into InvokeAI's registry file. You
|
||||
may also provide the ID of any diffusers model that has been published on
|
||||
the
|
||||
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
|
||||
and it will be downloaded and installed automatically.
|
||||
|
||||
The WebGUI offers similar functionality for model management.
|
||||
|
||||
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
|
||||
For advanced users, new command-line options provide additional functionality.
|
||||
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
|
||||
the path to a directory of checkpoint files, automatically converts them into
|
||||
`diffusers` models and imports them. Each time the script is launched, the
|
||||
directory will be scanned for new checkpoint files to be loaded. Alternatively,
|
||||
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
|
||||
that is already registered with InvokeAI to be converted into a `diffusers`
|
||||
model on the fly, allowing you to take advantage of future diffusers-only
|
||||
features without explicitly converting the model and saving it to disk.
|
||||
|
||||
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
|
||||
Please see
|
||||
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
|
||||
for more information on model management in both the command-line and Web
|
||||
interfaces.
|
||||
|
||||
#### Support for the `XFormers` Memory-Efficient Crossattention Package
|
||||
|
||||
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
|
||||
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
|
||||
installed, the`xformers` package dramatically reduces the memory footprint of
|
||||
loaded Stable Diffusion models files and modestly increases image generation
|
||||
speed. `xformers` will be installed and activated automatically if you specify a
|
||||
CUDA system at install time.
|
||||
|
||||
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
|
||||
The caveat with using `xformers` is that it introduces slightly
|
||||
non-deterministic behavior, and images generated using the same seed and other
|
||||
settings will be subtly different between invocations. Generally the changes are
|
||||
unnoticeable unless you rapidly shift back and forth between images, but to
|
||||
disable `xformers` and restore fully deterministic behavior, you may launch
|
||||
InvokeAI using the `--no-xformers` option. This is most conveniently done by
|
||||
opening the file `invokeai/invokeai.init` with a text editor, and adding the
|
||||
line `--no-xformers` at the bottom.
|
||||
|
||||
#### A Negative Prompt Box in the WebUI
|
||||
|
||||
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
|
||||
There is now a separate text input box for negative prompts in the WebUI. This
|
||||
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
|
||||
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
|
||||
box as well.
|
||||
|
||||
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
|
||||
To see exactly how your prompts are being parsed, launch `invokeai` with the
|
||||
`--log_tokenization` option. The console window will then display the
|
||||
tokenization process for both positive and negative prompts.
|
||||
|
||||
#### Model Merging
|
||||
|
||||
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||
Version 2.3.0 offers an intuitive user interface for merging up to three Stable
|
||||
Diffusion models using an intuitive user interface. Model merging allows you to
|
||||
mix the behavior of models to achieve very interesting effects. To use this,
|
||||
each of the models must already be imported into InvokeAI and saved in
|
||||
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
|
||||
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
|
||||
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
|
||||
the proportions in which to mix them, and the mixing algorithm. The script will
|
||||
create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||
|
||||
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
|
||||
See
|
||||
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
|
||||
for more details.
|
||||
|
||||
#### Textual Inversion Training
|
||||
|
||||
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
|
||||
Textual Inversion (TI) is a technique for training a Stable Diffusion model to
|
||||
emit a particular subject or style when triggered by a keyword phrase. You can
|
||||
perform TI training by placing a small number of images of the subject or style
|
||||
in a directory, and choosing a distinctive trigger phrase, such as
|
||||
"pointillist-style". After successful training, The subject or style will be
|
||||
activated by including `<pointillist-style>` in your prompt.
|
||||
|
||||
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
|
||||
Previous versions of InvokeAI were able to perform TI, but it required using a
|
||||
command-line script with dozens of obscure command-line arguments. Version 2.3.0
|
||||
features an intuitive TI frontend that will build a TI model on top of any
|
||||
`diffusers` model. To access training you can launch from a new item in the
|
||||
launcher script or from the command line using `invokeai-ti --gui`.
|
||||
|
||||
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
|
||||
See
|
||||
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||
for further details.
|
||||
|
||||
#### A New Installer Experience
|
||||
|
||||
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
|
||||
The InvokeAI installer has been upgraded in order to provide a smoother and
|
||||
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
|
||||
a PyPi project, allowing developers and power-users to install InvokeAI with the
|
||||
command `pip install InvokeAI --use-pep517`. Please see
|
||||
[Installation](#installation) for details.
|
||||
|
||||
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
|
||||
Developers should be aware that the `pip` installation procedure has been
|
||||
simplified and that the `conda` method is no longer supported at all.
|
||||
Accordingly, the `environments_and_requirements` directory has been deleted from
|
||||
the repository.
|
||||
|
||||
#### Command-line name changes
|
||||
|
||||
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||
All of InvokeAI's functionality, including the WebUI, command-line interface,
|
||||
textual inversion training and model merging, can all be accessed from the
|
||||
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
|
||||
expanded to add the new functionality. For the convenience of developers and
|
||||
power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||
|
||||
* `invokeai` -- Command-line client
|
||||
* `invokeai --web` -- Web GUI
|
||||
* `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
|
||||
- `invokeai` -- Command-line client
|
||||
- `invokeai --web` -- Web GUI
|
||||
- `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||
- `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||
- `invokeai-configure` -- Configuration tool for initializing the `invokeai`
|
||||
directory and selecting popular starter models.
|
||||
|
||||
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
|
||||
For backward compatibility, the old command names are also recognized, including
|
||||
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
|
||||
eventually be removed.
|
||||
|
||||
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
|
||||
* `invokeai` => `ldm/invoke/CLI.py`
|
||||
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||
Developers should be aware that the locations of the script's source code has
|
||||
been moved. The new locations are:
|
||||
|
||||
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
|
||||
- `invokeai` => `ldm/invoke/CLI.py`
|
||||
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||
|
||||
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
|
||||
For older changelogs, please visit the
|
||||
Developers are strongly encouraged to perform an "editable" install of InvokeAI
|
||||
using `pip install -e . --use-pep517` in the Git repository, and then to call
|
||||
the scripts using their 2.3.0 names, rather than executing the scripts directly.
|
||||
Developers should also be aware that the several important data files have been
|
||||
relocated into a new directory named `invokeai`. This includes the WebGUI's
|
||||
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
|
||||
by the installer to select starter models. Eventually all InvokeAI modules will
|
||||
be in subdirectories of `invokeai`.
|
||||
|
||||
Please see
|
||||
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
|
||||
for further details. For older changelogs, please visit the
|
||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
Please check out our **[:material-frequently-asked-questions:
|
||||
Troubleshooting
|
||||
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
|
||||
get solutions for common installation problems and other issues.
|
||||
Please check out our
|
||||
**[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
|
||||
to get solutions for common installation problems and other issues.
|
||||
|
||||
## :octicons-repo-push-24: Contributing
|
||||
|
||||
@@ -265,8 +388,8 @@ thank them for their time, hard work and effort.
|
||||
For support, please use this repository's GitHub Issues tracking service. Feel
|
||||
free to send me an email if you use and like the script.
|
||||
|
||||
Original portions of the software are Copyright (c) 2022-23
|
||||
by [The InvokeAI Team](https://github.com/invoke-ai).
|
||||
Original portions of the software are Copyright (c) 2022-23 by
|
||||
[The InvokeAI Team](https://github.com/invoke-ai).
|
||||
|
||||
## :octicons-book-24: Further Reading
|
||||
|
||||
|
||||
@@ -40,9 +40,10 @@ experimental versions later.
|
||||
this, open up a command-line window ("Terminal" on Linux and
|
||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||
--version`. If Python is installed, it will print out the version
|
||||
number. If it is version `3.9.1` or `3.10.x`, you meet
|
||||
requirements.
|
||||
|
||||
number. If it is version `3.9.*` or `3.10.*`, you meet
|
||||
requirements. We do not recommend using Python 3.11 or higher,
|
||||
as not all the libraries that InvokeAI depends on work properly
|
||||
with this version.
|
||||
|
||||
!!! warning "What to do if you have an unsupported version"
|
||||
|
||||
@@ -50,8 +51,7 @@ experimental versions later.
|
||||
and download the appropriate installer package for your
|
||||
platform. We recommend [Version
|
||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI. At this time
|
||||
we do not recommend Python 3.11.
|
||||
which has been extensively tested with InvokeAI.
|
||||
|
||||
_Please select your platform in the section below for platform-specific
|
||||
setup requirements._
|
||||
@@ -150,7 +150,7 @@ experimental versions later.
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
C:\Documents\Linco\invokeAI> .\install.bat
|
||||
```
|
||||
|
||||
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
||||
@@ -167,6 +167,11 @@ experimental versions later.
|
||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||
on Macintoshes, where "YourName" is your login name.
|
||||
|
||||
-If you have previously installed InvokeAI, you will be asked to
|
||||
confirm whether you want to reinstall into this directory. You
|
||||
may choose to reinstall, in which case your version will be upgraded,
|
||||
or choose a different directory.
|
||||
|
||||
- The script uses tab autocompletion to suggest directory path completions.
|
||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||
to suggest completions.
|
||||
@@ -181,11 +186,6 @@ experimental versions later.
|
||||
are unsure what GPU you are using, you can ask the installer to
|
||||
guess.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
|
||||
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI and the application itself.
|
||||
|
||||
@@ -197,25 +197,141 @@ experimental versions later.
|
||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||
may restart it and it will pick up where it left off.
|
||||
|
||||
10. **Post-install Configuration**: After installation completes, the installer will launch the
|
||||
configuration script, which will guide you through the first-time
|
||||
process of selecting one or more Stable Diffusion model weights
|
||||
files, downloading and configuring them. We provide a list of
|
||||
popular models that InvokeAI performs well with. However, you can
|
||||
add more weight files later on using the command-line client or
|
||||
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
|
||||
details.
|
||||
|
||||
<figure markdown>
|
||||

|
||||

|
||||
</figure>
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||
10. **Post-install Configuration**: After installation completes, the
|
||||
installer will launch the configuration form, which will guide you
|
||||
through the first-time process of adjusting some of InvokeAI's
|
||||
startup settings. To move around this form use ctrl-N for
|
||||
<N>ext and ctrl-P for <P>revious, or use <tab>
|
||||
and shift-<tab> to move forward and back. Once you are in a
|
||||
multi-checkbox field use the up and down cursor keys to select the
|
||||
item you want, and <space> to toggle it on and off. Within
|
||||
a directory field, pressing <tab> will provide autocomplete
|
||||
options.
|
||||
|
||||
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
Generally the defaults are fine, and you can come back to this screen at
|
||||
any time to tweak your system. Here are the options you can adjust:
|
||||
|
||||
- ***Output directory for images***
|
||||
This is the path to a directory in which InvokeAI will store all its
|
||||
generated images.
|
||||
|
||||
- ***NSFW checker***
|
||||
If checked, InvokeAI will test images for potential sexual content
|
||||
and blur them out if found. Note that the NSFW checker consumes
|
||||
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
|
||||
by most image models. If you have a low VRAM GPU (4-6 GB), you
|
||||
can reduce out of memory errors by disabling the checker.
|
||||
|
||||
- ***HuggingFace Access Token***
|
||||
InvokeAI has the ability to download embedded styles and subjects
|
||||
from the HuggingFace Concept Library on-demand. However, some of
|
||||
the concept library files are password protected. To make download
|
||||
smoother, you can set up an account at huggingface.co, obtain an
|
||||
access token, and paste it into this field. Note that you paste
|
||||
to this screen using ctrl-shift-V
|
||||
|
||||
- ***Free GPU memory after each generation***
|
||||
This is useful for low-memory machines and helps minimize the
|
||||
amount of GPU VRAM used by InvokeAI.
|
||||
|
||||
- ***Enable xformers support if available***
|
||||
If the xformers library was successfully installed, this will activate
|
||||
it to reduce memory consumption and increase rendering speed noticeably.
|
||||
Note that xformers has the side effect of generating slightly different
|
||||
images even when presented with the same seed and other settings.
|
||||
|
||||
- ***Force CPU to be used on GPU systems***
|
||||
This will use the (slow) CPU rather than the accelerated GPU. This
|
||||
can be used to generate images on systems that don't have a compatible
|
||||
GPU.
|
||||
|
||||
- ***Precision***
|
||||
This controls whether to use float32 or float16 arithmetic.
|
||||
float16 uses less memory but is also slightly less accurate.
|
||||
Ordinarily the right arithmetic is picked automatically ("auto"),
|
||||
but you may have to use float32 to get images on certain systems
|
||||
and graphics cards. The "autocast" option is deprecated and
|
||||
shouldn't be used unless you are asked to by a member of the team.
|
||||
|
||||
- ***Number of models to cache in CPU memory***
|
||||
This allows you to keep models in memory and switch rapidly among
|
||||
them rather than having them load from disk each time. This slider
|
||||
controls how many models to keep loaded at once. Each
|
||||
model will use 2-4 GB of RAM, so use this cautiously
|
||||
|
||||
- ***Directory containing embedding/textual inversion files***
|
||||
This is the directory in which you can place custom embedding
|
||||
files (.pt or .bin). During startup, this directory will be
|
||||
scanned and InvokeAI will print out the text terms that
|
||||
are available to trigger the embeddings.
|
||||
|
||||
At the bottom of the screen you will see a checkbox for accepting
|
||||
the CreativeML Responsible AI License. You need to accept the license
|
||||
in order to download Stable Diffusion models from the next screen.
|
||||
|
||||
_You can come back to the startup options form_ as many times as you like.
|
||||
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
|
||||
this script. On the command line, it is named `invokeai-configure`.
|
||||
|
||||
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
|
||||
to another screen that prompts you to download a series of starter models. The ones
|
||||
we recommend are preselected for you, but you are encouraged to use the checkboxes to
|
||||
pick and choose.
|
||||
You will probably wish to download `autoencoder-840000` for use with models that
|
||||
were trained with an older version of the Stability VAE.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Below the preselected list of starter models is a large text field which you can use
|
||||
to specify a series of models to import. You can specify models in a variety of formats,
|
||||
each separated by a space or newline. The formats accepted are:
|
||||
|
||||
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
|
||||
the file browser to the textfield to automatically paste the path. Be sure to remove
|
||||
extraneous quotation marks and other things that come along for the ride.
|
||||
|
||||
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
|
||||
The directory will be scanned from top to bottom (including subfolders) and any
|
||||
file that can be imported will be.
|
||||
|
||||
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
|
||||
and paste directly from a web page, or simply drag the link from the web page
|
||||
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
|
||||
The file will be downloaded and installed.
|
||||
|
||||
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
|
||||
the format _author_name/model_name_, as in `andite/anything-v4.0`
|
||||
|
||||
- The path to a local directory containing a `diffusers`
|
||||
model. These directories always have the file `model_index.json`
|
||||
at their top level.
|
||||
|
||||
_Select a directory for models to import_ You may select a local
|
||||
directory for autoimporting at startup time. If you select this
|
||||
option, the directory you choose will be scanned for new
|
||||
.ckpt/.safetensors files each time InvokeAI starts up, and any new
|
||||
files will be automatically imported and made available for your
|
||||
use.
|
||||
|
||||
_Convert imported models into diffusers_ When legacy checkpoint
|
||||
files are imported, you may select to use them unmodified (the
|
||||
default) or to convert them into `diffusers` models. The latter
|
||||
load much faster and have slightly better rendering performance,
|
||||
but not all checkpoint files can be converted. Note that Stable Diffusion
|
||||
Version 2.X files are **only** supported in `diffusers` format and will
|
||||
be converted regardless.
|
||||
|
||||
_You can come back to the model install form_ as many times as you like.
|
||||
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
|
||||
this script. On the command line, it is named `invokeai-model-install`.
|
||||
|
||||
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
@@ -301,7 +417,7 @@ Then type the following commands:
|
||||
|
||||
=== "AMD System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||
```
|
||||
|
||||
### Corrupted configuration file
|
||||
@@ -327,6 +443,52 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||
assistance.
|
||||
|
||||
### Out of Memory Issues
|
||||
|
||||
The models are large, VRAM is expensive, and you may find yourself
|
||||
faced with Out of Memory errors when generating images. Here are some
|
||||
tips to reduce the problem:
|
||||
|
||||
* **4 GB of VRAM**
|
||||
|
||||
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||
and derived models, provided that you **disable** the NSFW checker. To
|
||||
disable the filter, do one of the following:
|
||||
|
||||
* Select option (6) "_change InvokeAI startup options_" from the
|
||||
launcher. This will bring up the console-based startup settings
|
||||
dialogue and allow you to unselect the "NSFW Checker" option.
|
||||
* Start the startup settings dialogue directly by running
|
||||
`invokeai-configure --skip-sd-weights --skip-support-models`
|
||||
from the command line.
|
||||
* Find the `invokeai.init` initialization file in the InvokeAI root
|
||||
directory, open it in a text editor, and change `--nsfw_checker`
|
||||
to `--no-nsfw_checker`
|
||||
|
||||
If you are on a CUDA system, you can realize significant memory
|
||||
savings by activating the `xformers` library as described above. The
|
||||
downside is `xformers` introduces non-deterministic behavior, such
|
||||
that images generated with exactly the same prompt and settings will
|
||||
be slightly different from each other. See above for more information.
|
||||
|
||||
* **6 GB of VRAM**
|
||||
|
||||
This is a border case. Using the SD 1.5 series you should be able to
|
||||
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||
1024x1024 with it disabled and `xformers` activated.
|
||||
|
||||
If you run into persistent memory issues there are a series of
|
||||
environment variables that you can set before launching InvokeAI that
|
||||
alter how the PyTorch machine learning library manages memory. See
|
||||
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
|
||||
a list of these tweaks.
|
||||
|
||||
* **12 GB of VRAM**
|
||||
|
||||
This should be sufficient to generate larger images up to about
|
||||
1280x1280. If you wish to push further, consider activating
|
||||
`xformers`.
|
||||
|
||||
### Other Problems
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
@@ -348,25 +510,11 @@ version (recommended), follow these steps:
|
||||
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||
`invokeai` root directory.
|
||||
|
||||
2. Choose menu item (6) "Developer's Console". This will launch a new
|
||||
command line.
|
||||
|
||||
3. Type the following command:
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --upgrade
|
||||
```
|
||||
4. Watch the installation run. Once it is complete, you may exit the
|
||||
command line by typing `exit`, and then start InvokeAI from the
|
||||
launch script as per usual.
|
||||
|
||||
|
||||
Alternatively, if you wish to get the most recent unreleased
|
||||
development version, perform the same steps to enter the developer's
|
||||
console, and then type:
|
||||
|
||||
```bash
|
||||
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
|
||||
```
|
||||
2. Choose menu item (10) "Update InvokeAI".
|
||||
|
||||
3. This will launch a menu that gives you the option of:
|
||||
|
||||
1. Updating to the latest official release;
|
||||
2. Updating to the bleeding-edge development version; or
|
||||
3. Manually entering the tag or branch name of a version of
|
||||
InvokeAI you wish to try out.
|
||||
|
||||
@@ -30,25 +30,35 @@ Installation](010_INSTALL_AUTOMATED.md), and in many cases will
|
||||
already be installed (if, for example, you have used your system for
|
||||
gaming):
|
||||
|
||||
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
|
||||
* **Python**
|
||||
|
||||
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
|
||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||
version 3.9 or 3.10 (3.11 is not recommended).
|
||||
|
||||
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
|
||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||
lack of a Windows ROCm library.
|
||||
* **CUDA Tools**
|
||||
|
||||
* **Visual C++ Libraries** _Windows users_ must install the free
|
||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||
For those with _NVidia GPUs_, you will need to
|
||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||
|
||||
* **The Xcode command line tools** for _Macintosh users_. Instructions are
|
||||
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||
* **ROCm Tools**
|
||||
|
||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||
if model downloads give lots of certificate errors. Run:
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
For _Linux users with AMD GPUs_, you will need
|
||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||
lack of a Windows ROCm library.
|
||||
|
||||
* **Visual C++ Libraries**
|
||||
|
||||
_Windows users_ must install the free
|
||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||
|
||||
* **The Xcode command line tools**
|
||||
|
||||
for _Macintosh users_. Instructions are available at
|
||||
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||
|
||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||
if model downloads give lots of certificate errors. Run:
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
### Installation Walkthrough
|
||||
|
||||
@@ -75,7 +85,7 @@ manager, please follow these steps:
|
||||
=== "Linux/Mac"
|
||||
|
||||
```bash
|
||||
export INVOKEAI_ROOT="~/invokeai"
|
||||
export INVOKEAI_ROOT=~/invokeai
|
||||
mkdir $INVOKEAI_ROOT
|
||||
```
|
||||
|
||||
@@ -99,35 +109,30 @@ manager, please follow these steps:
|
||||
Windows environment variable using the Advanced System Settings dialogue.
|
||||
Refer to your operating system documentation for details.
|
||||
|
||||
|
||||
=== "Linux/Mac"
|
||||
```bash
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv create .venv
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv create .venv
|
||||
```
|
||||
```terminal
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
4. Activate the new environment:
|
||||
|
||||
=== "Linux/Mac"
|
||||
```bash
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
```
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
.venv\script\activate
|
||||
```
|
||||
If you get a permissions error at this point, run the command
|
||||
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
|
||||
and try `activate` again.
|
||||
|
||||
The command-line prompt should change to to show `(.venv)` at the
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
If you get a permissions error at this point, run this command and try again
|
||||
|
||||
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||
|
||||
The command-line prompt should change to to show `(InvokeAI)` at the
|
||||
beginning of the prompt. Note that all the following steps should be
|
||||
run while inside the INVOKEAI_ROOT directory
|
||||
|
||||
@@ -137,40 +142,47 @@ manager, please follow these steps:
|
||||
python -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
|
||||
CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||
|
||||
=== "CUDA (NVidia)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
=== "MPS (M1 and M2 Macs)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
pip install InvokeAI --use-pep517
|
||||
```
|
||||
|
||||
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||
become available in the environment
|
||||
|
||||
=== "Linux/Macintosh"
|
||||
|
||||
```bash
|
||||
deactivate && source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
|
||||
```ps
|
||||
deactivate
|
||||
.venv\Scripts\activate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
8. Set up the runtime directory
|
||||
@@ -179,7 +191,7 @@ manager, please follow these steps:
|
||||
models, model config files, directory for textual inversion embeddings, and
|
||||
your outputs.
|
||||
|
||||
```bash
|
||||
```terminal
|
||||
invokeai-configure
|
||||
```
|
||||
|
||||
@@ -283,13 +295,12 @@ on your system, please see the [Git Installation
|
||||
Guide](https://github.com/git-guides/install-git)
|
||||
|
||||
1. From the command line, run this command:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
|
||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||
installation protocol (important!)
|
||||
@@ -314,7 +325,7 @@ installation protocol (important!)
|
||||
|
||||
=== "MPS (M1 and M2 Macs)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
pip install -e . --use-pep517
|
||||
```
|
||||
|
||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||
@@ -330,5 +341,29 @@ installation protocol (important!)
|
||||
repository. You can then use GitHub functions to create and submit
|
||||
pull requests to contribute improvements to the project.
|
||||
|
||||
Please see [Contributing](/index.md#Contributing) for hints
|
||||
Please see [Contributing](../index.md#contributing) for hints
|
||||
on getting started.
|
||||
|
||||
### Unsupported Conda Install
|
||||
|
||||
Congratulations, you found the "secret" Conda installation
|
||||
instructions. If you really **really** want to use Conda with InvokeAI
|
||||
you can do so using this unsupported recipe:
|
||||
|
||||
```
|
||||
mkdir ~/invokeai
|
||||
conda create -n invokeai python=3.10
|
||||
conda activate invokeai
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
invokeai-configure --root ~/invokeai
|
||||
invokeai --root ~/invokeai --web
|
||||
```
|
||||
|
||||
The `pip install` command shown in this recipe is for Linux/Windows
|
||||
systems with an NVIDIA GPU. See step (6) above for the command to use
|
||||
with other platforms/GPU combinations. If you don't wish to pass the
|
||||
`--root` argument to `invokeai` with each launch, you may set the
|
||||
environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
@@ -110,7 +110,7 @@ recipes are available
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
This will be done automatically for you if you use the installer
|
||||
|
||||
@@ -43,25 +43,31 @@ InvokeAI comes with support for a good set of starter models. You'll
|
||||
find them listed in the master models file
|
||||
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||
subset that are currently installed are found in
|
||||
`configs/models.yaml`. The current list is:
|
||||
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
|
||||
|
||||
| Model | HuggingFace Repo ID | Description | URL
|
||||
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
||||
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
|
||||
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
|
||||
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
|
||||
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
|
||||
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
|
||||
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
|
||||
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
|
||||
|Model Name | HuggingFace Repo ID | Description | URL |
|
||||
|---------- | ---------- | ----------- | --- |
|
||||
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|
||||
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|
||||
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|
||||
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|
||||
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|
||||
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
|
||||
|
||||
Note that these files are covered by an "Ethical AI" license which forbids
|
||||
certain uses. When you initially download them, you are asked to
|
||||
accept the license terms.
|
||||
Note that these files are covered by an "Ethical AI" license which
|
||||
forbids certain uses. When you initially download them, you are asked
|
||||
to accept the license terms. In addition, some of these models carry
|
||||
additional license terms that limit their use in commercial
|
||||
applications or on public servers. Be sure to familiarize yourself
|
||||
with the model terms by visiting the URLs in the table above.
|
||||
|
||||
## Community-Contributed Models
|
||||
|
||||
@@ -80,6 +86,13 @@ only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
||||
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||
aware that CIVITAI hosts many models that generate NSFW content.
|
||||
|
||||
!!! note
|
||||
|
||||
InvokeAI 2.3.x does not support directly importing and
|
||||
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||
convert them into `diffusers` models using the conversion methods
|
||||
described below.
|
||||
|
||||
## Installation
|
||||
|
||||
There are multiple ways to install and manage models:
|
||||
@@ -90,7 +103,7 @@ There are multiple ways to install and manage models:
|
||||
models files.
|
||||
|
||||
3. The web interface (WebUI) has a GUI for importing and managing
|
||||
models.
|
||||
models.
|
||||
|
||||
### Installation via `invokeai-configure`
|
||||
|
||||
@@ -106,7 +119,7 @@ confirm that the files are complete.
|
||||
You can install a new model, including any of the community-supported ones, via
|
||||
the command-line client's `!import_model` command.
|
||||
|
||||
#### Installing `.ckpt` and `.safetensors` models
|
||||
#### Installing individual `.ckpt` and `.safetensors` models
|
||||
|
||||
If the model is already downloaded to your local disk, use
|
||||
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||
@@ -131,15 +144,40 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||
For this to work, the URL must not be password-protected. Otherwise
|
||||
you will receive a 404 error.
|
||||
|
||||
When you import a legacy model, the CLI will ask you a few questions
|
||||
about the model, including what size image it was trained on (usually
|
||||
512x512), what name and description you wish to use for it, what
|
||||
configuration file to use for it (usually the default
|
||||
`v1-inference.yaml`), whether you'd like to make this model the
|
||||
default at startup time, and whether you would like to install a
|
||||
custom VAE (variable autoencoder) file for the model. For recent
|
||||
models, the answer to the VAE question is usually "no," but it won't
|
||||
hurt to answer "yes".
|
||||
When you import a legacy model, the CLI will first ask you what type
|
||||
of model this is. You can indicate whether it is a model based on
|
||||
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||
type, or it will not load correctly. You can correct the model type
|
||||
after the fact using the `!edit_model` command.
|
||||
|
||||
The system will then ask you a few other questions about the model,
|
||||
including what size image it was trained on (usually 512x512), what
|
||||
name and description you wish to use for it, and whether you would
|
||||
like to install a custom VAE (variable autoencoder) file for the
|
||||
model. For recent models, the answer to the VAE question is usually
|
||||
"no," but it won't hurt to answer "yes".
|
||||
|
||||
After importing, the model will load. If this is successful, you will
|
||||
be asked if you want to keep the model loaded in memory to start
|
||||
generating immediately. You'll also be asked if you wish to make this
|
||||
the default model on startup. You can change this later using
|
||||
`!edit_model`.
|
||||
|
||||
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
|
||||
|
||||
You may also point `!import_model` to a directory containing a set of
|
||||
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
|
||||
|
||||
!!! example
|
||||
|
||||
```console
|
||||
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
|
||||
```
|
||||
|
||||
You will be given the option to import all models found in the
|
||||
directory, or select which ones to import. If there are subfolders
|
||||
within the directory, they will be searched for models to import.
|
||||
|
||||
#### Installing `diffusers` models
|
||||
|
||||
@@ -173,6 +211,26 @@ description for the model, whether to make this the default model that
|
||||
is loaded at InvokeAI startup time, and whether to replace its
|
||||
VAE. Generally the answer to the latter question is "no".
|
||||
|
||||
### Specifying a configuration file for legacy checkpoints
|
||||
|
||||
Some checkpoint files come with instructions to use a specific .yaml
|
||||
configuration file. For InvokeAI load this file correctly, please put
|
||||
the config file in the same directory as the corresponding `.ckpt` or
|
||||
`.safetensors` file and make sure the file has the same basename as
|
||||
the weights file. Here is an example:
|
||||
|
||||
```bash
|
||||
wonderful-model-v2.ckpt
|
||||
wonderful-model-v2.yaml
|
||||
```
|
||||
|
||||
Similarly, to use a custom VAE, name the VAE like this:
|
||||
|
||||
```bash
|
||||
wonderful-model-v2.vae.pt
|
||||
```
|
||||
|
||||
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
@@ -279,19 +337,23 @@ After you save the modified `models.yaml` file relaunch
|
||||
### Installation via the WebUI
|
||||
|
||||
To access the WebUI Model Manager, click on the button that looks like
|
||||
a cute in the upper right side of the browser screen. This will bring
|
||||
a cube in the upper right side of the browser screen. This will bring
|
||||
up a dialogue that lists the models you have already installed, and
|
||||
allows you to load, delete or edit them:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
To add a new model, click on **+ Add New** and select to either a
|
||||
checkpoint/safetensors model, or a diffusers model:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||
@@ -302,7 +364,9 @@ choose to enter a path to disk, the system will autocomplete for you
|
||||
as you type:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||
@@ -317,7 +381,9 @@ directory and press the "Search" icon. This will display the
|
||||
subfolders, and allow you to choose which ones to import:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
## Model Management Startup Options
|
||||
@@ -342,9 +408,8 @@ invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
|
||||
And here is what the same argument looks like in `invokeai.init`:
|
||||
|
||||
```
|
||||
```bash
|
||||
--outdir="/home/fred/invokeai/outputs
|
||||
--no-nsfw_checker
|
||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Stable Diffusion
|
||||
description: |-
|
||||
TODO: Description Here
|
||||
|
||||
Some useful links:
|
||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
||||
|
||||
license:
|
||||
name: MIT License
|
||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
||||
version: 1.0.0
|
||||
servers:
|
||||
- url: http://localhost:9090/api
|
||||
tags:
|
||||
- name: images
|
||||
description: Retrieve and manage generated images
|
||||
paths:
|
||||
/images/{imageId}:
|
||||
get:
|
||||
tags:
|
||||
- images
|
||||
summary: Get image by ID
|
||||
description: Returns a single image
|
||||
operationId: getImageById
|
||||
parameters:
|
||||
- name: imageId
|
||||
in: path
|
||||
description: ID of image to return
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: successful operation
|
||||
content:
|
||||
image/png:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
'404':
|
||||
description: Image not found
|
||||
/intermediates/{intermediateId}/{step}:
|
||||
get:
|
||||
tags:
|
||||
- images
|
||||
summary: Get intermediate image by ID
|
||||
description: Returns a single intermediate image
|
||||
operationId: getIntermediateById
|
||||
parameters:
|
||||
- name: intermediateId
|
||||
in: path
|
||||
description: ID of intermediate to return
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: step
|
||||
in: path
|
||||
description: The generation step of the intermediate
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: successful operation
|
||||
content:
|
||||
image/png:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
'404':
|
||||
description: Intermediate not found
|
||||
19
docs/other/TRANSLATION.md
Normal file
19
docs/other/TRANSLATION.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Translation
|
||||
|
||||
InvokeAI uses [Weblate](https://weblate.org) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||
|
||||
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||
|
||||
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||
|
||||
## Help & Questions
|
||||
|
||||
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @psychedelicious or @blessedcoolant on Discord if you have any questions.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to the InvokeAI community for their efforts to translate the project!
|
||||
@@ -1,5 +0,0 @@
|
||||
mkdocs
|
||||
mkdocs-material>=8, <9
|
||||
mkdocs-git-revision-date-localized-plugin
|
||||
mkdocs-redirects==1.2.0
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 665 B |
Binary file not shown.
|
Before Width: | Height: | Size: 628 B |
@@ -1,16 +0,0 @@
|
||||
html {
|
||||
box-sizing: border-box;
|
||||
overflow: -moz-scrollbars-vertical;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
|
||||
*,
|
||||
*:before,
|
||||
*:after {
|
||||
box-sizing: inherit;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
background: #fafafa;
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
<!doctype html>
|
||||
<html lang="en-US">
|
||||
<head>
|
||||
<title>Swagger UI: OAuth2 Redirect</title>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
'use strict';
|
||||
function run () {
|
||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
||||
var sentState = oauth2.state;
|
||||
var redirectUrl = oauth2.redirectUrl;
|
||||
var isValid, qp, arr;
|
||||
|
||||
if (/code|token|error/.test(window.location.hash)) {
|
||||
qp = window.location.hash.substring(1).replace('?', '&');
|
||||
} else {
|
||||
qp = location.search.substring(1);
|
||||
}
|
||||
|
||||
arr = qp.split("&");
|
||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
||||
function (key, value) {
|
||||
return key === "" ? value : decodeURIComponent(value);
|
||||
}
|
||||
) : {};
|
||||
|
||||
isValid = qp.state === sentState;
|
||||
|
||||
if ((
|
||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
||||
) && !oauth2.auth.code) {
|
||||
if (!isValid) {
|
||||
oauth2.errCb({
|
||||
authId: oauth2.auth.name,
|
||||
source: "auth",
|
||||
level: "warning",
|
||||
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
|
||||
});
|
||||
}
|
||||
|
||||
if (qp.code) {
|
||||
delete oauth2.state;
|
||||
oauth2.auth.code = qp.code;
|
||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
||||
} else {
|
||||
let oauthErrorMsg;
|
||||
if (qp.error) {
|
||||
oauthErrorMsg = "["+qp.error+"]: " +
|
||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
||||
}
|
||||
|
||||
oauth2.errCb({
|
||||
authId: oauth2.auth.name,
|
||||
source: "auth",
|
||||
level: "error",
|
||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
|
||||
});
|
||||
}
|
||||
} else {
|
||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
||||
}
|
||||
window.close();
|
||||
}
|
||||
|
||||
if (document.readyState !== 'loading') {
|
||||
run();
|
||||
} else {
|
||||
document.addEventListener('DOMContentLoaded', function () {
|
||||
run();
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,20 +0,0 @@
|
||||
window.onload = function() {
|
||||
//<editor-fold desc="Changeable Configuration Block">
|
||||
|
||||
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
|
||||
window.ui = SwaggerUIBundle({
|
||||
url: "openapi3_0.yaml",
|
||||
dom_id: '#swagger-ui',
|
||||
deepLinking: true,
|
||||
presets: [
|
||||
SwaggerUIBundle.presets.apis,
|
||||
SwaggerUIStandalonePreset
|
||||
],
|
||||
plugins: [
|
||||
SwaggerUIBundle.plugins.DownloadUrl
|
||||
],
|
||||
layout: "StandaloneLayout"
|
||||
});
|
||||
|
||||
//</editor-fold>
|
||||
};
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -20,10 +20,9 @@ echo Building installer for version $VERSION
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||
RESPONSE=${input:='n'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
git commit -a
|
||||
|
||||
if ! git tag $VERSION ; then
|
||||
echo "Existing/invalid tag"
|
||||
@@ -32,6 +31,8 @@ if [ "$RESPONSE" == 'y' ]; then
|
||||
|
||||
git push origin :refs/tags/$LATEST_TAG
|
||||
git tag -fa $LATEST_TAG
|
||||
|
||||
echo "remember to push --tags!"
|
||||
fi
|
||||
|
||||
# ----------------------
|
||||
|
||||
@@ -67,6 +67,8 @@ del /q .tmp1 .tmp2
|
||||
@rem -------------- Install and Configure ---------------
|
||||
|
||||
call python .\lib\main.py
|
||||
pause
|
||||
exit /b
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
|
||||
@@ -9,13 +9,16 @@ cd $scriptdir
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
MINIMUM_PYTHON_VERSION=3.9.0
|
||||
MAXIMUM_PYTHON_VERSION=3.11.0
|
||||
PYTHON=""
|
||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
||||
for candidate in python3.10 python3.9 python3 python ; do
|
||||
if ppath=`which $candidate`; then
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
break
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -28,3 +31,4 @@ if [ -z "$PYTHON" ]; then
|
||||
fi
|
||||
|
||||
exec $PYTHON ./lib/main.py ${@}
|
||||
read -p "Press any key to exit"
|
||||
|
||||
@@ -241,14 +241,18 @@ class InvokeAiInstance:
|
||||
|
||||
from plumbum import FG, local
|
||||
|
||||
# Note that we're installing pinned versions of torch and
|
||||
# torchvision here, which may not correspond to what is
|
||||
# in pyproject.toml. This is a hack to prevent torch 2.0 from
|
||||
# being installed and immediately uninstalled and replaced with 1.13
|
||||
pip = local[self.pip]
|
||||
|
||||
(
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"torch",
|
||||
"torchvision",
|
||||
"torch~=1.13.1",
|
||||
"torchvision>=0.14.1",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
@@ -336,7 +340,8 @@ class InvokeAiInstance:
|
||||
elif el in ['-y','--yes','--yes-to-all']:
|
||||
new_argv.append(el)
|
||||
sys.argv = new_argv
|
||||
|
||||
|
||||
import requests # to catch download exceptions
|
||||
from messages import introduction
|
||||
|
||||
introduction()
|
||||
@@ -346,7 +351,21 @@ class InvokeAiInstance:
|
||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||
# from the installer will also automatically propagate down to the config script.
|
||||
# this may change in the future with config refactoring!
|
||||
invokeai_configure.main()
|
||||
succeeded = False
|
||||
try:
|
||||
invokeai_configure.main()
|
||||
succeeded = True
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||
except OSError as e:
|
||||
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||
except Exception as e:
|
||||
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||
finally:
|
||||
if not succeeded:
|
||||
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||
print('Alternatively you can relaunch the installer.')
|
||||
|
||||
def install_user_scripts(self):
|
||||
"""
|
||||
@@ -364,6 +383,9 @@ class InvokeAiInstance:
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
||||
|
||||
if OS == "Linux":
|
||||
shutil.copy(Path(__file__).parent / '..' / "templates" / "dialogrc", self.runtime / '.dialogrc')
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
|
||||
27
installer/templates/dialogrc
Normal file
27
installer/templates/dialogrc
Normal file
@@ -0,0 +1,27 @@
|
||||
# Screen
|
||||
use_shadow = OFF
|
||||
use_colors = ON
|
||||
screen_color = (BLACK, BLACK, ON)
|
||||
|
||||
# Box
|
||||
dialog_color = (YELLOW, BLACK , ON)
|
||||
title_color = (YELLOW, BLACK, ON)
|
||||
border_color = (YELLOW, BLACK, OFF)
|
||||
border2_color = (YELLOW, BLACK, OFF)
|
||||
|
||||
# Button
|
||||
button_active_color = (RED, BLACK, OFF)
|
||||
button_inactive_color = (YELLOW, BLACK, OFF)
|
||||
button_label_active_color = (YELLOW,BLACK,ON)
|
||||
button_label_inactive_color = (YELLOW,BLACK,ON)
|
||||
|
||||
# Menu box
|
||||
menubox_color = (BLACK, BLACK, ON)
|
||||
menubox_border_color = (YELLOW, BLACK, OFF)
|
||||
menubox_border2_color = (YELLOW, BLACK, OFF)
|
||||
|
||||
# Menu window
|
||||
item_color = (YELLOW, BLACK, OFF)
|
||||
item_selected_color = (BLACK, YELLOW, OFF)
|
||||
tag_key_color = (YELLOW, BLACK, OFF)
|
||||
tag_key_selected_color = (BLACK, YELLOW, OFF)
|
||||
@@ -6,15 +6,20 @@ setlocal
|
||||
call .venv\Scripts\activate.bat
|
||||
set INVOKEAI_ROOT=.
|
||||
|
||||
:start
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 1. command-line interface
|
||||
echo 2. browser-based UI
|
||||
echo 3. run textual inversion training
|
||||
echo 4. merge models (diffusers type only)
|
||||
echo 5. re-run the configure script to download new models
|
||||
echo 6. open the developer console
|
||||
echo 7. command-line help
|
||||
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
|
||||
echo 5. download and install models
|
||||
echo 6. change InvokeAI startup options
|
||||
echo 7. re-run the configure script to fix a broken install
|
||||
echo 8. open the developer console
|
||||
echo 9. update InvokeAI
|
||||
echo 10. command-line help
|
||||
echo Q - quit
|
||||
set /P restore="Please enter 1-10, Q: [2] "
|
||||
if not defined restore set restore=2
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
@@ -24,14 +29,20 @@ IF /I "%restore%" == "1" (
|
||||
python .venv\Scripts\invokeai.exe --web %*
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Starting textual inversion training..
|
||||
python .venv\Scripts\invokeai-ti.exe --gui %*
|
||||
python .venv\Scripts\invokeai-ti.exe --gui
|
||||
) ELSE IF /I "%restore%" == "4" (
|
||||
echo Starting model merging script..
|
||||
python .venv\Scripts\invokeai-merge.exe --gui %*
|
||||
python .venv\Scripts\invokeai-merge.exe --gui
|
||||
) ELSE IF /I "%restore%" == "5" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe %*
|
||||
echo Running invokeai-model-install...
|
||||
python .venv\Scripts\invokeai-model-install.exe
|
||||
) ELSE IF /I "%restore%" == "6" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||
) ELSE IF /I "%restore%" == "7" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||
) ELSE IF /I "%restore%" == "8" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
@@ -43,14 +54,27 @@ IF /I "%restore%" == "1" (
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%restore%" == "7" (
|
||||
) ELSE IF /I "%restore%" == "9" (
|
||||
echo Running invokeai-update...
|
||||
python .venv\Scripts\invokeai-update.exe %*
|
||||
) ELSE IF /I "%restore%" == "10" (
|
||||
echo Displaying command line help...
|
||||
python .venv\Scripts\invokeai.exe --help %*
|
||||
pause
|
||||
exit /b
|
||||
) ELSE IF /I "%restore%" == "q" (
|
||||
echo Goodbye!
|
||||
goto ending
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
goto start
|
||||
|
||||
endlocal
|
||||
pause
|
||||
|
||||
:ending
|
||||
exit /b
|
||||
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
# MIT License
|
||||
|
||||
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
|
||||
# Copyright 2023, The InvokeAI Development Team
|
||||
|
||||
####
|
||||
# This launch script assumes that:
|
||||
# 1. it is located in the runtime directory,
|
||||
@@ -11,65 +16,168 @@
|
||||
|
||||
set -eu
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
# Ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
export INVOKEAI_ROOT="$scriptdir"
|
||||
PARAMS=$@
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
# Check to see if dialog is installed (it seems to be fairly standard, but good to check regardless) and if the user has passed the --no-tui argument to disable the dialog TUI
|
||||
tui=true
|
||||
if command -v dialog &>/dev/null; then
|
||||
# This must use $@ to properly loop through the arguments passed by the user
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" == "--no-tui" ]; then
|
||||
tui=false
|
||||
# Remove the --no-tui argument to avoid errors later on when passing arguments to InvokeAI
|
||||
PARAMS=$(echo "$PARAMS" | sed 's/--no-tui//')
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
tui=false
|
||||
fi
|
||||
|
||||
# Set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "3. run textual inversion training"
|
||||
echo "4. merge models (diffusers type only)"
|
||||
echo "5. open the developer console"
|
||||
echo "6. re-run the configure script to download new models"
|
||||
echo "7. command-line help "
|
||||
echo ""
|
||||
read -p "Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " yn
|
||||
choice=${yn:='2'}
|
||||
case $choice in
|
||||
1)
|
||||
echo "Starting the InvokeAI command-line..."
|
||||
exec invokeai $@
|
||||
;;
|
||||
2)
|
||||
echo "Starting the InvokeAI browser-based UI..."
|
||||
exec invokeai --web $@
|
||||
;;
|
||||
3)
|
||||
echo "Starting Textual Inversion:"
|
||||
exec invokeai-ti --gui $@
|
||||
;;
|
||||
4)
|
||||
echo "Merging Models:"
|
||||
exec invokeai-merge --gui $@
|
||||
;;
|
||||
5)
|
||||
echo "Developer Console:"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
6)
|
||||
exec invokeai-configure --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
7)
|
||||
exec invokeai --help
|
||||
;;
|
||||
*)
|
||||
echo "Invalid selection"
|
||||
exit;;
|
||||
# Primary function for the case statement to determine user input
|
||||
do_choice() {
|
||||
case $1 in
|
||||
1)
|
||||
clear
|
||||
printf "Generate images with a browser-based interface\n"
|
||||
invokeai --web $PARAMS
|
||||
;;
|
||||
2)
|
||||
clear
|
||||
printf "Generate images using a command-line interface\n"
|
||||
invokeai $PARAMS
|
||||
;;
|
||||
3)
|
||||
clear
|
||||
printf "Textual inversion training\n"
|
||||
invokeai-ti --gui $PARAMS
|
||||
;;
|
||||
4)
|
||||
clear
|
||||
printf "Merge models (diffusers type only)\n"
|
||||
invokeai-merge --gui $PARAMS
|
||||
;;
|
||||
5)
|
||||
clear
|
||||
printf "Download and install models\n"
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
6)
|
||||
clear
|
||||
printf "Change InvokeAI startup options\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
7)
|
||||
clear
|
||||
printf "Re-run the configure script to fix a broken install\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||
;;
|
||||
8)
|
||||
clear
|
||||
printf "Open the developer console\n"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
9)
|
||||
clear
|
||||
printf "Update InvokeAI\n"
|
||||
invokeai-update
|
||||
;;
|
||||
10)
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai --help
|
||||
;;
|
||||
"HELP 1")
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai --help
|
||||
;;
|
||||
*)
|
||||
clear
|
||||
printf "Exiting...\n"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
clear
|
||||
}
|
||||
|
||||
# Dialog-based TUI for launcing Invoke functions
|
||||
do_dialog() {
|
||||
options=(
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Generate images using a command-line interface"
|
||||
3 "Textual inversion training"
|
||||
4 "Merge models (diffusers type only)"
|
||||
5 "Download and install models"
|
||||
6 "Change InvokeAI startup options"
|
||||
7 "Re-run the configure script to fix a broken install"
|
||||
8 "Open the developer console"
|
||||
9 "Update InvokeAI")
|
||||
|
||||
choice=$(dialog --clear \
|
||||
--backtitle "\Zb\Zu\Z3InvokeAI" \
|
||||
--colors \
|
||||
--title "What would you like to run?" \
|
||||
--ok-label "Run" \
|
||||
--cancel-label "Exit" \
|
||||
--help-button \
|
||||
--help-label "CLI Help" \
|
||||
--menu "Select an option:" \
|
||||
0 0 0 \
|
||||
"${options[@]}" \
|
||||
2>&1 >/dev/tty) || clear
|
||||
do_choice "$choice"
|
||||
clear
|
||||
}
|
||||
|
||||
# Command-line interface for launching Invoke functions
|
||||
do_line_input() {
|
||||
clear
|
||||
printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
|
||||
printf "Do you want to generate images using the\n"
|
||||
printf "1: Browser-based UI\n"
|
||||
printf "2: Command-line interface\n"
|
||||
printf "3: Run textual inversion training\n"
|
||||
printf "4: Merge models (diffusers type only)\n"
|
||||
printf "5: Download and install models\n"
|
||||
printf "6: Change InvokeAI startup options\n"
|
||||
printf "7: Re-run the configure script to fix a broken install\n"
|
||||
printf "8: Open the developer console\n"
|
||||
printf "9: Update InvokeAI\n"
|
||||
printf "10: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
read -p "Please enter 1-10, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
do_choice $choice
|
||||
clear
|
||||
}
|
||||
|
||||
# Main IF statement for launching Invoke with either the TUI or CLI, and for checking if the user is in the developer console
|
||||
if [ "$0" != "bash" ]; then
|
||||
while true; do
|
||||
if $tui; then
|
||||
# .dialogrc must be located in the same directory as the invoke.sh script
|
||||
export DIALOGRC="./.dialogrc"
|
||||
do_dialog
|
||||
else
|
||||
do_line_input
|
||||
fi
|
||||
done
|
||||
else # in developer console
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
printf "Press ^D to exit\n"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
||||
|
||||
@@ -25,12 +25,15 @@ from invokeai.backend.modules.parameters import parameters_to_command
|
||||
import invokeai.frontend.dist as frontend
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, split_weighted_subprompts, \
|
||||
get_tokenizer
|
||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.invoke.globals import Globals, global_converted_ckpts_dir
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
||||
from compel.prompt_parser import Blend
|
||||
from ldm.invoke.globals import global_models_dir
|
||||
from ldm.invoke.merge_diffusers import merge_diffusion_models
|
||||
|
||||
# Loading Arguments
|
||||
opt = Args()
|
||||
@@ -43,7 +46,8 @@ if not os.path.isabs(args.outdir):
|
||||
|
||||
# normalize the config directory relative to root
|
||||
if not os.path.isabs(opt.conf):
|
||||
opt.conf = os.path.normpath(os.path.join(Globals.root,opt.conf))
|
||||
opt.conf = os.path.normpath(os.path.join(Globals.root, opt.conf))
|
||||
|
||||
|
||||
class InvokeAIWebServer:
|
||||
def __init__(self, generate: Generate, gfpgan, codeformer, esrgan) -> None:
|
||||
@@ -189,7 +193,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(file_path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
file_path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
response = {
|
||||
@@ -203,11 +208,7 @@ class InvokeAIWebServer:
|
||||
return make_response(response, 200)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
return make_response("Error uploading file", 500)
|
||||
|
||||
self.load_socketio_listeners(self.socketio)
|
||||
@@ -264,14 +265,16 @@ class InvokeAIWebServer:
|
||||
# location for "finished" images
|
||||
self.result_path = args.outdir
|
||||
# temporary path for intermediates
|
||||
self.intermediate_path = os.path.join(self.result_path, "intermediates/")
|
||||
self.intermediate_path = os.path.join(
|
||||
self.result_path, "intermediates/")
|
||||
# path for user-uploaded init images and masks
|
||||
self.init_image_path = os.path.join(self.result_path, "init-images/")
|
||||
self.mask_image_path = os.path.join(self.result_path, "mask-images/")
|
||||
# path for temp images e.g. gallery generations which are not committed
|
||||
self.temp_image_path = os.path.join(self.result_path, "temp-images/")
|
||||
# path for thumbnail images
|
||||
self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/")
|
||||
self.thumbnail_image_path = os.path.join(
|
||||
self.result_path, "thumbnails/")
|
||||
# txt log
|
||||
self.log_path = os.path.join(self.result_path, "invoke_log.txt")
|
||||
# make all output paths
|
||||
@@ -290,7 +293,7 @@ class InvokeAIWebServer:
|
||||
def load_socketio_listeners(self, socketio):
|
||||
@socketio.on("requestSystemConfig")
|
||||
def handle_request_capabilities():
|
||||
print(f">> System config requested")
|
||||
print(">> System config requested")
|
||||
config = self.get_system_config()
|
||||
config["model_list"] = self.generate.model_manager.list_models()
|
||||
config["infill_methods"] = infill_methods()
|
||||
@@ -301,20 +304,19 @@ class InvokeAIWebServer:
|
||||
try:
|
||||
if not search_folder:
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': None, 'found_models': None},
|
||||
)
|
||||
"foundModels",
|
||||
{'search_folder': None, 'found_models': None},
|
||||
)
|
||||
else:
|
||||
search_folder, found_models = self.generate.model_manager.search_models(search_folder)
|
||||
search_folder, found_models = self.generate.model_manager.search_models(
|
||||
search_folder)
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': search_folder, 'found_models': found_models},
|
||||
{'search_folder': search_folder,
|
||||
'found_models': found_models},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
self.handle_exceptions(e)
|
||||
print("\n")
|
||||
|
||||
@socketio.on("addNewModel")
|
||||
@@ -344,11 +346,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
print(f">> New Model Added: {model_name}")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("deleteModel")
|
||||
def handle_delete_model(model_name: str):
|
||||
@@ -364,11 +362,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
print(f">> Model Deleted: {model_name}")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestModelChange")
|
||||
def handle_set_model(model_name: str):
|
||||
@@ -387,11 +381,110 @@ class InvokeAIWebServer:
|
||||
{"model_name": model_name, "model_list": model_list},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
@socketio.on('convertToDiffusers')
|
||||
def convert_to_diffusers(model_to_convert: dict):
|
||||
try:
|
||||
if (model_info := self.generate.model_manager.model_info(model_name=model_to_convert['model_name'])):
|
||||
if 'weights' in model_info:
|
||||
ckpt_path = Path(model_info['weights'])
|
||||
original_config_file = Path(model_info['config'])
|
||||
model_name = model_to_convert['model_name']
|
||||
model_description = model_info['description']
|
||||
else:
|
||||
self.socketio.emit(
|
||||
"error", {"message": "Model is not a valid checkpoint file"})
|
||||
else:
|
||||
self.socketio.emit(
|
||||
"error", {"message": "Could not retrieve model info."})
|
||||
|
||||
if not ckpt_path.is_absolute():
|
||||
ckpt_path = Path(Globals.root, ckpt_path)
|
||||
|
||||
if original_config_file and not original_config_file.is_absolute():
|
||||
original_config_file = Path(
|
||||
Globals.root, original_config_file)
|
||||
|
||||
diffusers_path = Path(
|
||||
ckpt_path.parent.absolute(),
|
||||
f'{model_name}_diffusers'
|
||||
)
|
||||
|
||||
if model_to_convert['save_location'] == 'root':
|
||||
diffusers_path = Path(
|
||||
global_converted_ckpts_dir(), f'{model_name}_diffusers')
|
||||
|
||||
if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None:
|
||||
diffusers_path = Path(
|
||||
model_to_convert['custom_location'], f'{model_name}_diffusers')
|
||||
|
||||
if diffusers_path.exists():
|
||||
shutil.rmtree(diffusers_path)
|
||||
|
||||
self.generate.model_manager.convert_and_import(
|
||||
ckpt_path,
|
||||
diffusers_path,
|
||||
model_name=model_name,
|
||||
model_description=model_description,
|
||||
vae=None,
|
||||
original_config_file=original_config_file,
|
||||
commit_to_conf=opt.conf,
|
||||
)
|
||||
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
socketio.emit(
|
||||
"modelConverted",
|
||||
{"new_model_name": model_name,
|
||||
"model_list": new_model_list, 'update': True},
|
||||
)
|
||||
print(f">> Model Converted: {model_name}")
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on('mergeDiffusersModels')
|
||||
def merge_diffusers_models(model_merge_info: dict):
|
||||
try:
|
||||
models_to_merge = model_merge_info['models_to_merge']
|
||||
model_ids_or_paths = [
|
||||
self.generate.model_manager.model_name_or_path(x) for x in models_to_merge]
|
||||
merged_pipe = merge_diffusion_models(
|
||||
model_ids_or_paths, model_merge_info['alpha'], model_merge_info['interp'], model_merge_info['force'])
|
||||
|
||||
dump_path = global_models_dir() / 'merged_models'
|
||||
if model_merge_info['model_merge_save_path'] is not None:
|
||||
dump_path = Path(model_merge_info['model_merge_save_path'])
|
||||
|
||||
os.makedirs(dump_path, exist_ok=True)
|
||||
dump_path = dump_path / model_merge_info['merged_model_name']
|
||||
merged_pipe.save_pretrained(dump_path, safe_serialization=1)
|
||||
|
||||
merged_model_config = dict(
|
||||
model_name=model_merge_info['merged_model_name'],
|
||||
description=f'Merge of models {", ".join(models_to_merge)}',
|
||||
commit_to_conf=opt.conf
|
||||
)
|
||||
|
||||
if vae := self.generate.model_manager.config[models_to_merge[0]].get("vae", None):
|
||||
print(
|
||||
f">> Using configured VAE assigned to {models_to_merge[0]}")
|
||||
merged_model_config.update(vae=vae)
|
||||
|
||||
self.generate.model_manager.import_diffuser_model(
|
||||
dump_path, **merged_model_config)
|
||||
new_model_list = self.generate.model_manager.list_models()
|
||||
|
||||
socketio.emit(
|
||||
"modelsMerged",
|
||||
{"merged_models": models_to_merge,
|
||||
"merged_model_name": model_merge_info['merged_model_name'],
|
||||
"model_list": new_model_list, 'update': True},
|
||||
)
|
||||
print(f">> Models Merged: {models_to_merge}")
|
||||
print(
|
||||
f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||
except Exception as e:
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestEmptyTempFolder")
|
||||
def empty_temp_folder():
|
||||
@@ -406,22 +499,20 @@ class InvokeAIWebServer:
|
||||
)
|
||||
os.remove(thumbnail_path)
|
||||
except Exception as e:
|
||||
socketio.emit("error", {"message": f"Unable to delete {f}: {str(e)}"})
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to delete {f}: {str(e)}"})
|
||||
pass
|
||||
|
||||
socketio.emit("tempFolderEmptied")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestSaveStagingAreaImageToGallery")
|
||||
def save_temp_image_to_gallery(url):
|
||||
try:
|
||||
image_path = self.get_image_path_from_url(url)
|
||||
new_path = os.path.join(self.result_path, os.path.basename(image_path))
|
||||
new_path = os.path.join(
|
||||
self.result_path, os.path.basename(image_path))
|
||||
shutil.copy2(image_path, new_path)
|
||||
|
||||
if os.path.splitext(new_path)[1] == ".png":
|
||||
@@ -434,7 +525,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(new_path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
new_path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array = [
|
||||
@@ -455,11 +547,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestLatestImages")
|
||||
def handle_request_latest_images(category, latest_mtime):
|
||||
@@ -497,7 +585,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array.append(
|
||||
@@ -515,7 +604,8 @@ class InvokeAIWebServer:
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
socketio.emit("error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
pass
|
||||
|
||||
socketio.emit(
|
||||
@@ -523,11 +613,7 @@ class InvokeAIWebServer:
|
||||
{"images": image_array, "category": category},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("requestImages")
|
||||
def handle_request_images(category, earliest_mtime=None):
|
||||
@@ -569,7 +655,8 @@ class InvokeAIWebServer:
|
||||
(width, height) = pil_image.size
|
||||
|
||||
thumbnail_path = save_thumbnail(
|
||||
pil_image, os.path.basename(path), self.thumbnail_image_path
|
||||
pil_image, os.path.basename(
|
||||
path), self.thumbnail_image_path
|
||||
)
|
||||
|
||||
image_array.append(
|
||||
@@ -588,7 +675,8 @@ class InvokeAIWebServer:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f">> Unable to load {path}")
|
||||
socketio.emit("error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
socketio.emit(
|
||||
"error", {"message": f"Unable to load {path}: {str(e)}"})
|
||||
pass
|
||||
|
||||
socketio.emit(
|
||||
@@ -600,11 +688,7 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("generateImage")
|
||||
def handle_generate_image_event(
|
||||
@@ -626,7 +710,8 @@ class InvokeAIWebServer:
|
||||
printable_parameters["init_mask"][:64] + "..."
|
||||
)
|
||||
|
||||
print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
|
||||
print(
|
||||
f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
|
||||
print(f'>> ESRGAN Parameters: {esrgan_parameters}')
|
||||
print(f'>> Facetool Parameters: {facetool_parameters}')
|
||||
|
||||
@@ -636,11 +721,7 @@ class InvokeAIWebServer:
|
||||
facetool_parameters,
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("runPostprocessing")
|
||||
def handle_run_postprocessing(original_image, postprocessing_parameters):
|
||||
@@ -662,16 +743,18 @@ class InvokeAIWebServer:
|
||||
|
||||
try:
|
||||
seed = original_image["metadata"]["image"]["seed"]
|
||||
except (KeyError) as e:
|
||||
except KeyError:
|
||||
seed = "unknown_seed"
|
||||
pass
|
||||
|
||||
if postprocessing_parameters["type"] == "esrgan":
|
||||
progress.set_current_status("common:statusUpscalingESRGAN")
|
||||
progress.set_current_status("common.statusUpscalingESRGAN")
|
||||
elif postprocessing_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status("common:statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesGFPGAN")
|
||||
elif postprocessing_parameters["type"] == "codeformer":
|
||||
progress.set_current_status("common:statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesCodeFormer")
|
||||
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@@ -680,7 +763,8 @@ class InvokeAIWebServer:
|
||||
image = self.esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=postprocessing_parameters["upscale"][0],
|
||||
strength=postprocessing_parameters["upscale"][1],
|
||||
denoise_str=postprocessing_parameters["upscale"][1],
|
||||
strength=postprocessing_parameters["upscale"][2],
|
||||
seed=seed,
|
||||
)
|
||||
elif postprocessing_parameters["type"] == "gfpgan":
|
||||
@@ -704,7 +788,7 @@ class InvokeAIWebServer:
|
||||
f'{postprocessing_parameters["type"]} is not a valid postprocessing type'
|
||||
)
|
||||
|
||||
progress.set_current_status("common:statusSavingImage")
|
||||
progress.set_current_status("common.statusSavingImage")
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
@@ -751,15 +835,11 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
@socketio.on("cancel")
|
||||
def handle_cancel():
|
||||
print(f">> Cancel processing requested")
|
||||
print(">> Cancel processing requested")
|
||||
self.canceled.set()
|
||||
|
||||
# TODO: I think this needs a safety mechanism.
|
||||
@@ -780,11 +860,7 @@ class InvokeAIWebServer:
|
||||
{"url": url, "uuid": uuid, "category": category},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
# App Functions
|
||||
def get_system_config(self):
|
||||
@@ -841,12 +917,10 @@ class InvokeAIWebServer:
|
||||
So we need to convert each into a PIL Image.
|
||||
"""
|
||||
|
||||
truncated_outpaint_image_b64 = generation_parameters["init_img"][:64]
|
||||
truncated_outpaint_mask_b64 = generation_parameters["init_mask"][:64]
|
||||
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy()
|
||||
original_bounding_box = generation_parameters["bounding_box"].copy(
|
||||
)
|
||||
|
||||
initial_image = dataURL_to_image(
|
||||
generation_parameters["init_img"]
|
||||
@@ -923,7 +997,8 @@ class InvokeAIWebServer:
|
||||
elif generation_parameters["generation_mode"] == "img2img":
|
||||
init_img_url = generation_parameters["init_img"]
|
||||
init_img_path = self.get_image_path_from_url(init_img_url)
|
||||
generation_parameters["init_img"] = Image.open(init_img_path).convert('RGB')
|
||||
generation_parameters["init_img"] = Image.open(
|
||||
init_img_path).convert('RGB')
|
||||
|
||||
def image_progress(sample, step):
|
||||
if self.canceled.is_set():
|
||||
@@ -934,10 +1009,10 @@ class InvokeAIWebServer:
|
||||
nonlocal progress
|
||||
|
||||
generation_messages = {
|
||||
"txt2img": "common:statusGeneratingTextToImage",
|
||||
"img2img": "common:statusGeneratingImageToImage",
|
||||
"inpainting": "common:statusGeneratingInpainting",
|
||||
"outpainting": "common:statusGeneratingOutpainting",
|
||||
"txt2img": "common.statusGeneratingTextToImage",
|
||||
"img2img": "common.statusGeneratingImageToImage",
|
||||
"inpainting": "common.statusGeneratingInpainting",
|
||||
"outpainting": "common.statusGeneratingOutpainting",
|
||||
}
|
||||
|
||||
progress.set_current_step(step + 1)
|
||||
@@ -982,9 +1057,9 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
if generation_parameters["progress_latents"]:
|
||||
image = self.generate.sample_to_lowres_estimated_image(sample)
|
||||
image = self.generate.sample_to_lowres_estimated_image(
|
||||
sample)
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
@@ -1003,7 +1078,8 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed, attention_maps_image=None):
|
||||
@@ -1015,7 +1091,6 @@ class InvokeAIWebServer:
|
||||
nonlocal facetool_parameters
|
||||
nonlocal progress
|
||||
|
||||
step_index = 1
|
||||
nonlocal prior_variations
|
||||
|
||||
"""
|
||||
@@ -1029,9 +1104,10 @@ class InvokeAIWebServer:
|
||||
**generation_parameters["bounding_box"],
|
||||
)
|
||||
|
||||
progress.set_current_status("common:statusGenerationComplete")
|
||||
progress.set_current_status("common.statusGenerationComplete")
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
all_parameters = generation_parameters
|
||||
@@ -1042,7 +1118,8 @@ class InvokeAIWebServer:
|
||||
and all_parameters["variation_amount"] > 0
|
||||
):
|
||||
first_seed = first_seed or seed
|
||||
this_variation = [[seed, all_parameters["variation_amount"]]]
|
||||
this_variation = [
|
||||
[seed, all_parameters["variation_amount"]]]
|
||||
all_parameters["with_variations"] = (
|
||||
prior_variations + this_variation
|
||||
)
|
||||
@@ -1056,14 +1133,16 @@ class InvokeAIWebServer:
|
||||
raise CanceledException
|
||||
|
||||
if esrgan_parameters:
|
||||
progress.set_current_status("common:statusUpscaling")
|
||||
progress.set_current_status("common.statusUpscaling")
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = self.esrgan.process(
|
||||
image=image,
|
||||
upsampler_scale=esrgan_parameters["level"],
|
||||
denoise_str=esrgan_parameters['denoise_str'],
|
||||
strength=esrgan_parameters["strength"],
|
||||
seed=seed,
|
||||
)
|
||||
@@ -1071,6 +1150,7 @@ class InvokeAIWebServer:
|
||||
postprocessing = True
|
||||
all_parameters["upscale"] = [
|
||||
esrgan_parameters["level"],
|
||||
esrgan_parameters['denoise_str'],
|
||||
esrgan_parameters["strength"],
|
||||
]
|
||||
|
||||
@@ -1079,12 +1159,15 @@ class InvokeAIWebServer:
|
||||
|
||||
if facetool_parameters:
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status("common:statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesGFPGAN")
|
||||
elif facetool_parameters["type"] == "codeformer":
|
||||
progress.set_current_status("common:statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status(
|
||||
"common.statusRestoringFacesCodeFormer")
|
||||
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
@@ -1113,8 +1196,9 @@ class InvokeAIWebServer:
|
||||
]
|
||||
all_parameters["facetool_type"] = facetool_parameters["type"]
|
||||
|
||||
progress.set_current_status("common:statusSavingImage")
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
progress.set_current_status("common.statusSavingImage")
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
# restore the stashed URLS and discard the paths, we are about to send the result to client
|
||||
@@ -1125,12 +1209,14 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
if "init_mask" in all_parameters:
|
||||
all_parameters["init_mask"] = "" # TODO: store the mask in metadata
|
||||
# TODO: store the mask in metadata
|
||||
all_parameters["init_mask"] = ""
|
||||
|
||||
if generation_parameters["generation_mode"] == "unifiedCanvas":
|
||||
all_parameters["bounding_box"] = original_bounding_box
|
||||
|
||||
metadata = self.parameters_to_generated_image_metadata(all_parameters)
|
||||
metadata = self.parameters_to_generated_image_metadata(
|
||||
all_parameters)
|
||||
|
||||
command = parameters_to_command(all_parameters)
|
||||
|
||||
@@ -1160,17 +1246,20 @@ class InvokeAIWebServer:
|
||||
|
||||
if progress.total_iterations > progress.current_iteration:
|
||||
progress.set_current_step(1)
|
||||
progress.set_current_status("common:statusIterationComplete")
|
||||
progress.set_current_status(
|
||||
"common.statusIterationComplete")
|
||||
progress.set_current_status_has_steps(False)
|
||||
else:
|
||||
progress.mark_complete()
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
self.socketio.emit(
|
||||
"progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
|
||||
parsed_prompt, _ = get_prompt_structure(
|
||||
generation_parameters["prompt"])
|
||||
tokens = None if type(parsed_prompt) is Blend else \
|
||||
get_tokens_for_prompt(self.generate.model, parsed_prompt)
|
||||
get_tokens_for_prompt_object(get_tokenizer(self.generate.model), parsed_prompt)
|
||||
attention_maps_image_base64_url = None if attention_maps_image is None \
|
||||
else image_to_dataURL(attention_maps_image)
|
||||
|
||||
@@ -1221,11 +1310,7 @@ class InvokeAIWebServer:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.empty_cuda_cache()
|
||||
print(e)
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def empty_cuda_cache(self):
|
||||
if self.generate.device.type == "cuda":
|
||||
@@ -1287,7 +1372,8 @@ class InvokeAIWebServer:
|
||||
{
|
||||
"type": "esrgan",
|
||||
"scale": int(parameters["upscale"][0]),
|
||||
"strength": float(parameters["upscale"][1]),
|
||||
"denoise_str": int(parameters["upscale"][1]),
|
||||
"strength": float(parameters["upscale"][2]),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1298,13 +1384,6 @@ class InvokeAIWebServer:
|
||||
# semantic drift
|
||||
rfc_dict["sampler"] = parameters["sampler_name"]
|
||||
|
||||
# display weighted subprompts (liable to change)
|
||||
subprompts = split_weighted_subprompts(
|
||||
parameters["prompt"], skip_normalize=True
|
||||
)
|
||||
subprompts = [{"prompt": x[0], "weight": x[1]} for x in subprompts]
|
||||
rfc_dict["prompt"] = subprompts
|
||||
|
||||
# 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs
|
||||
variations = []
|
||||
|
||||
@@ -1331,17 +1410,14 @@ class InvokeAIWebServer:
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def parameters_to_post_processed_image_metadata(
|
||||
self, parameters, original_image_path
|
||||
):
|
||||
try:
|
||||
current_metadata = retrieve_metadata(original_image_path)["sd-metadata"]
|
||||
current_metadata = retrieve_metadata(
|
||||
original_image_path)["sd-metadata"]
|
||||
postprocessing_metadata = {}
|
||||
|
||||
"""
|
||||
@@ -1361,7 +1437,8 @@ class InvokeAIWebServer:
|
||||
if parameters["type"] == "esrgan":
|
||||
postprocessing_metadata["type"] = "esrgan"
|
||||
postprocessing_metadata["scale"] = parameters["upscale"][0]
|
||||
postprocessing_metadata["strength"] = parameters["upscale"][1]
|
||||
postprocessing_metadata["denoise_str"] = parameters["upscale"][1]
|
||||
postprocessing_metadata["strength"] = parameters["upscale"][2]
|
||||
elif parameters["type"] == "gfpgan":
|
||||
postprocessing_metadata["type"] = "gfpgan"
|
||||
postprocessing_metadata["strength"] = parameters["facetool_strength"]
|
||||
@@ -1380,16 +1457,13 @@ class InvokeAIWebServer:
|
||||
postprocessing_metadata
|
||||
)
|
||||
else:
|
||||
current_metadata["image"]["postprocessing"] = [postprocessing_metadata]
|
||||
current_metadata["image"]["postprocessing"] = [
|
||||
postprocessing_metadata]
|
||||
|
||||
return current_metadata
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def save_result_image(
|
||||
self,
|
||||
@@ -1419,7 +1493,7 @@ class InvokeAIWebServer:
|
||||
if step_index:
|
||||
filename += f".{step_index}"
|
||||
if postprocessing:
|
||||
filename += f".postprocessed"
|
||||
filename += ".postprocessed"
|
||||
|
||||
filename += ".png"
|
||||
|
||||
@@ -1433,11 +1507,7 @@ class InvokeAIWebServer:
|
||||
return os.path.abspath(path)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def make_unique_init_image_filename(self, name):
|
||||
try:
|
||||
@@ -1446,11 +1516,7 @@ class InvokeAIWebServer:
|
||||
name = f"{split[0]}.{uuid}{split[1]}"
|
||||
return name
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def calculate_real_steps(self, steps, strength, has_init_image):
|
||||
import math
|
||||
@@ -1465,11 +1531,7 @@ class InvokeAIWebServer:
|
||||
file.writelines(message)
|
||||
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def get_image_path_from_url(self, url):
|
||||
"""Given a url to an image used by the client, returns the absolute file path to that image"""
|
||||
@@ -1492,18 +1554,15 @@ class InvokeAIWebServer:
|
||||
)
|
||||
elif "thumbnails" in url:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.thumbnail_image_path, os.path.basename(url))
|
||||
os.path.join(self.thumbnail_image_path,
|
||||
os.path.basename(url))
|
||||
)
|
||||
else:
|
||||
return os.path.abspath(
|
||||
os.path.join(self.result_path, os.path.basename(url))
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def get_url_from_image_path(self, path):
|
||||
"""Given an absolute file path to an image, returns the URL that the client can use to load the image"""
|
||||
@@ -1521,11 +1580,7 @@ class InvokeAIWebServer:
|
||||
else:
|
||||
return os.path.join(self.result_url, os.path.basename(path))
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
def save_file_unique_uuid_name(self, bytes, name, path):
|
||||
try:
|
||||
@@ -1544,11 +1599,13 @@ class InvokeAIWebServer:
|
||||
|
||||
return file_path
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
self.handle_exceptions(e)
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
def handle_exceptions(self, exception, emit_key: str = 'error'):
|
||||
self.socketio.emit(emit_key, {"message": (str(exception))})
|
||||
print("\n")
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
|
||||
class Progress:
|
||||
@@ -1569,7 +1626,7 @@ class Progress:
|
||||
self.total_iterations = (
|
||||
generation_parameters["iterations"] if generation_parameters else 1
|
||||
)
|
||||
self.current_status = "common:statusPreparing"
|
||||
self.current_status = "common.statusPreparing"
|
||||
self.is_processing = True
|
||||
self.current_status_has_steps = False
|
||||
self.has_error = False
|
||||
@@ -1599,7 +1656,7 @@ class Progress:
|
||||
self.has_error = has_error
|
||||
|
||||
def mark_complete(self):
|
||||
self.current_status = "common:statusProcessingComplete"
|
||||
self.current_status = "common.statusProcessingComplete"
|
||||
self.current_step = 0
|
||||
self.total_steps = 0
|
||||
self.current_iteration = 0
|
||||
@@ -1661,10 +1718,12 @@ def dataURL_to_image(dataURL: str) -> ImageType:
|
||||
)
|
||||
return image
|
||||
|
||||
|
||||
"""
|
||||
Converts an image into a base64 image dataURL.
|
||||
"""
|
||||
|
||||
|
||||
def image_to_dataURL(image: ImageType) -> str:
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
@@ -1674,7 +1733,6 @@ def image_to_dataURL(image: ImageType) -> str:
|
||||
return image_base64
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Converts a base64 image dataURL into bytes.
|
||||
The dataURL is split on the first commma.
|
||||
|
||||
@@ -6,83 +6,83 @@ stable-diffusion-1.5:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
default: True
|
||||
inpainting-1.5:
|
||||
sd-inpainting-1.5:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||
repo_id: runwayml/stable-diffusion-inpainting
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
dreamlike-diffusion-1.0:
|
||||
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art, diffusers version (2.13 BG)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-diffusion-1.0
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
dreamlike-photoreal-2.0:
|
||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-photoreal-2.0
|
||||
recommended: False
|
||||
stable-diffusion-2.1-768:
|
||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-1
|
||||
format: diffusers
|
||||
recommended: True
|
||||
stable-diffusion-2.1-base:
|
||||
description: Stable Diffusion version 2.1 diffusers base model, trained on 512 pixel images (5.21 GB)
|
||||
description: Stable Diffusion version 2.1 diffusers model, trained on 512 pixel images (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-1-base
|
||||
format: diffusers
|
||||
recommended: False
|
||||
sd-inpainting-2.0:
|
||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||
format: diffusers
|
||||
recommended: False
|
||||
analog-diffusion-1.0:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
repo_id: wavymulder/Analog-Diffusion
|
||||
format: diffusers
|
||||
recommended: false
|
||||
deliberate-1.0:
|
||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||
format: diffusers
|
||||
repo_id: XpucT/Deliberate
|
||||
recommended: False
|
||||
d&d-diffusion-1.0:
|
||||
description: Dungeons & Dragons characters (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: 0xJustin/Dungeons-and-Diffusion
|
||||
recommended: False
|
||||
dreamlike-photoreal-2.0:
|
||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-photoreal-2.0
|
||||
recommended: False
|
||||
inkpunk-1.0:
|
||||
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
||||
format: diffusers
|
||||
repo_id: Envvi/Inkpunk-Diffusion
|
||||
recommended: False
|
||||
openjourney-4.0:
|
||||
description: An SD 1.5 model fine tuned on Midjourney images by PromptHero - include "mdjrny-v4 style" in your prompts (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: prompthero/openjourney
|
||||
vae:
|
||||
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: prompthero/openjourney
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
nitro-diffusion-1.0:
|
||||
description: A SD 1.5 model trained on three artstyles - prompt with "archer style", "arcane style" and/or "modern disney style" (2.13 GB)
|
||||
repo_id: nitrosocke/Nitro-Diffusion
|
||||
recommended: False
|
||||
portrait-plus-1.0:
|
||||
description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
|
||||
format: diffusers
|
||||
repo_id: wavymulder/portraitplus
|
||||
recommended: False
|
||||
seek-art-mega-1.0:
|
||||
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
||||
repo_id: coreco/seek.art_MEGA
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
trinart-2.0:
|
||||
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures, diffusers version (2.13 GB)
|
||||
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
||||
repo_id: naclbit/trinart_stable_diffusion_v2
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
trinart-characters-2_0:
|
||||
description: An SD model finetuned with 19.2M anime/manga style images (ckpt version) (4.27 GB)
|
||||
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
|
||||
config: v1-inference.yaml
|
||||
file: derrida_final.ckpt
|
||||
format: ckpt
|
||||
waifu-diffusion-1.4:
|
||||
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
|
||||
repo_id: hakurei/waifu-diffusion
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
|
||||
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
recommended: False
|
||||
ft-mse-improved-autoencoder-840000:
|
||||
description: StabilityAI improved autoencoder fine-tuned for human faces. Improves legacy .ckpt models (335 MB)
|
||||
repo_id: stabilityai/sd-vae-ft-mse-original
|
||||
format: ckpt
|
||||
config: VAE/default
|
||||
file: vae-ft-mse-840000-ema-pruned.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
recommended: True
|
||||
trinart_vae:
|
||||
description: Custom autoencoder for trinart_characters for legacy .ckpt models only (335 MB)
|
||||
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
|
||||
config: VAE/trinart
|
||||
format: ckpt
|
||||
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
|
||||
67
invokeai/configs/stable-diffusion/v2-inference.yaml
Normal file
67
invokeai/configs/stable-diffusion/v2-inference.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-4
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False # we set this to false because this is an inference only config
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
use_checkpoint: True
|
||||
use_fp16: True
|
||||
image_size: 32 # unused
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_head_channels: 64 # need to fix for flash-attn
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
#attn_type: "vanilla-xformers"
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
||||
params:
|
||||
freeze: True
|
||||
layer: "penultimate"
|
||||
@@ -2,4 +2,4 @@ dist/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
||||
stats.html
|
||||
|
||||
11
invokeai/frontend/.gitignore
vendored
11
invokeai/frontend/.gitignore
vendored
@@ -25,4 +25,13 @@ dist-ssr
|
||||
*.sw?
|
||||
|
||||
# build stats
|
||||
stats.html
|
||||
stats.html
|
||||
|
||||
# Yarn - https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored
|
||||
.pnp.*
|
||||
.yarn/*
|
||||
!.yarn/patches
|
||||
!.yarn/plugins
|
||||
!.yarn/releases
|
||||
!.yarn/sdks
|
||||
!.yarn/versions
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
cd invokeai/frontend/ && npx run lint
|
||||
cd invokeai/frontend/ && npm run lint-staged
|
||||
|
||||
@@ -2,4 +2,4 @@ dist/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
||||
stats.html
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
module.exports = {
|
||||
trailingComma: 'es5',
|
||||
tabWidth: 2,
|
||||
endOfLine: 'auto',
|
||||
semi: true,
|
||||
singleQuote: true,
|
||||
overrides: [
|
||||
{
|
||||
files: ['public/locales/*.json'],
|
||||
options: {
|
||||
tabWidth: 4,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
193957
invokeai/frontend/.yarn/releases/yarn-1.22.19.cjs
vendored
Normal file
193957
invokeai/frontend/.yarn/releases/yarn-1.22.19.cjs
vendored
Normal file
File diff suppressed because one or more lines are too long
5
invokeai/frontend/.yarnrc
Normal file
5
invokeai/frontend/.yarnrc
Normal file
@@ -0,0 +1,5 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
yarn-path ".yarn/releases/yarn-1.22.19.cjs"
|
||||
1
invokeai/frontend/.yarnrc.yml
Normal file
1
invokeai/frontend/.yarnrc.yml
Normal file
@@ -0,0 +1 @@
|
||||
yarnPath: .yarn/releases/yarn-1.22.19.cjs
|
||||
@@ -7,7 +7,7 @@ The UI is in `invokeai/frontend`.
|
||||
Install [node](https://nodejs.org/en/download/) (includes npm) and
|
||||
[yarn](https://yarnpkg.com/getting-started/install).
|
||||
|
||||
From `invokeai/frontend/` run `yarn install` to get everything set up.
|
||||
From `invokeai/frontend/` run `yarn install --immutable` to get everything set up.
|
||||
|
||||
## Dev
|
||||
|
||||
|
||||
1
invokeai/frontend/dist/assets/index-14cb2922.css
vendored
Normal file
1
invokeai/frontend/dist/assets/index-14cb2922.css
vendored
Normal file
File diff suppressed because one or more lines are too long
638
invokeai/frontend/dist/assets/index-252612ad.js
vendored
638
invokeai/frontend/dist/assets/index-252612ad.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
624
invokeai/frontend/dist/assets/index-c09cf9ca.js
vendored
Normal file
624
invokeai/frontend/dist/assets/index-c09cf9ca.js
vendored
Normal file
File diff suppressed because one or more lines are too long
4
invokeai/frontend/dist/index.html
vendored
4
invokeai/frontend/dist/index.html
vendored
@@ -5,8 +5,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index-252612ad.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-b0bf79f4.css">
|
||||
<script type="module" crossorigin src="./assets/index-c09cf9ca.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-14cb2922.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
520
invokeai/frontend/dist/locales/ar.json
vendored
Normal file
520
invokeai/frontend/dist/locales/ar.json
vendored
Normal file
@@ -0,0 +1,520 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "مفاتيح الأختصار",
|
||||
"themeLabel": "الموضوع",
|
||||
"languagePickerLabel": "منتقي اللغة",
|
||||
"reportBugLabel": "بلغ عن خطأ",
|
||||
"settingsLabel": "إعدادات",
|
||||
"darkTheme": "داكن",
|
||||
"lightTheme": "فاتح",
|
||||
"greenTheme": "أخضر",
|
||||
"text2img": "نص إلى صورة",
|
||||
"img2img": "صورة إلى صورة",
|
||||
"unifiedCanvas": "لوحة موحدة",
|
||||
"nodes": "عقد",
|
||||
"langArabic": "العربية",
|
||||
"nodesDesc": "نظام مبني على العقد لإنتاج الصور قيد التطوير حاليًا. تبقى على اتصال مع تحديثات حول هذه الميزة المذهلة.",
|
||||
"postProcessing": "معالجة بعد الإصدار",
|
||||
"postProcessDesc1": "Invoke AI توفر مجموعة واسعة من ميزات المعالجة بعد الإصدار. تحسين الصور واستعادة الوجوه متاحين بالفعل في واجهة الويب. يمكنك الوصول إليهم من الخيارات المتقدمة في قائمة الخيارات في علامة التبويب Text To Image و Image To Image. يمكن أيضًا معالجة الصور مباشرةً باستخدام أزرار الإجراء على الصورة فوق عرض الصورة الحالي أو في العارض.",
|
||||
"postProcessDesc2": "سيتم إصدار واجهة رسومية مخصصة قريبًا لتسهيل عمليات المعالجة بعد الإصدار المتقدمة.",
|
||||
"postProcessDesc3": "واجهة سطر الأوامر Invoke AI توفر ميزات أخرى عديدة بما في ذلك Embiggen.",
|
||||
"training": "تدريب",
|
||||
"trainingDesc1": "تدفق خاص مخصص لتدريب تضميناتك الخاصة ونقاط التحقق باستخدام العكس النصي و دريم بوث من واجهة الويب.",
|
||||
"trainingDesc2": " استحضر الذكاء الصناعي يدعم بالفعل تدريب تضمينات مخصصة باستخدام العكس النصي باستخدام السكريبت الرئيسي.",
|
||||
"upload": "رفع",
|
||||
"close": "إغلاق",
|
||||
"load": "تحميل",
|
||||
"back": "الى الخلف",
|
||||
"statusConnected": "متصل",
|
||||
"statusDisconnected": "غير متصل",
|
||||
"statusError": "خطأ",
|
||||
"statusPreparing": "جاري التحضير",
|
||||
"statusProcessingCanceled": "تم إلغاء المعالجة",
|
||||
"statusProcessingComplete": "اكتمال المعالجة",
|
||||
"statusGenerating": "جاري التوليد",
|
||||
"statusGeneratingTextToImage": "جاري توليد النص إلى الصورة",
|
||||
"statusGeneratingImageToImage": "جاري توليد الصورة إلى الصورة",
|
||||
"statusGeneratingInpainting": "جاري توليد Inpainting",
|
||||
"statusGeneratingOutpainting": "جاري توليد Outpainting",
|
||||
"statusGenerationComplete": "اكتمال التوليد",
|
||||
"statusIterationComplete": "اكتمال التكرار",
|
||||
"statusSavingImage": "جاري حفظ الصورة",
|
||||
"statusRestoringFaces": "جاري استعادة الوجوه",
|
||||
"statusRestoringFacesGFPGAN": "تحسيت الوجوه (جي إف بي جان)",
|
||||
"statusRestoringFacesCodeFormer": "تحسين الوجوه (كود فورمر)",
|
||||
"statusUpscaling": "تحسين الحجم",
|
||||
"statusUpscalingESRGAN": "تحسين الحجم (إي إس آر جان)",
|
||||
"statusLoadingModel": "تحميل النموذج",
|
||||
"statusModelChanged": "تغير النموذج"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "الأجيال",
|
||||
"showGenerations": "عرض الأجيال",
|
||||
"uploads": "التحميلات",
|
||||
"showUploads": "عرض التحميلات",
|
||||
"galleryImageSize": "حجم الصورة",
|
||||
"galleryImageResetSize": "إعادة ضبط الحجم",
|
||||
"gallerySettings": "إعدادات المعرض",
|
||||
"maintainAspectRatio": "الحفاظ على نسبة الأبعاد",
|
||||
"autoSwitchNewImages": "التبديل التلقائي إلى الصور الجديدة",
|
||||
"singleColumnLayout": "تخطيط عمود واحد",
|
||||
"pinGallery": "تثبيت المعرض",
|
||||
"allImagesLoaded": "تم تحميل جميع الصور",
|
||||
"loadMore": "تحميل المزيد",
|
||||
"noImagesInGallery": "لا توجد صور في المعرض"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "مفاتيح الأزرار المختصرة",
|
||||
"appHotkeys": "مفاتيح التطبيق",
|
||||
"generalHotkeys": "مفاتيح عامة",
|
||||
"galleryHotkeys": "مفاتيح المعرض",
|
||||
"unifiedCanvasHotkeys": "مفاتيح اللوحةالموحدة ",
|
||||
"invoke": {
|
||||
"title": "أدعو",
|
||||
"desc": "إنشاء صورة"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "إلغاء",
|
||||
"desc": "إلغاء إنشاء الصورة"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "تركيز الإشعار",
|
||||
"desc": "تركيز منطقة الإدخال الإشعار"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "تبديل الخيارات",
|
||||
"desc": "فتح وإغلاق لوحة الخيارات"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "خيارات التثبيت",
|
||||
"desc": "ثبت لوحة الخيارات"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "تبديل العارض",
|
||||
"desc": "فتح وإغلاق مشاهد الصور"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "تبديل المعرض",
|
||||
"desc": "فتح وإغلاق درابزين المعرض"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "تكبير مساحة العمل",
|
||||
"desc": "إغلاق اللوحات وتكبير مساحة العمل"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "تغيير الألسنة",
|
||||
"desc": "التبديل إلى مساحة عمل أخرى"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "تبديل الطرفية",
|
||||
"desc": "فتح وإغلاق الطرفية"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "ضبط التشعب",
|
||||
"desc": "استخدم تشعب الصورة الحالية"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "ضبط البذور",
|
||||
"desc": "استخدم بذور الصورة الحالية"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "ضبط المعلمات",
|
||||
"desc": "استخدم جميع المعلمات الخاصة بالصورة الحالية"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "استعادة الوجوه",
|
||||
"desc": "استعادة الصورة الحالية"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "تحسين الحجم",
|
||||
"desc": "تحسين حجم الصورة الحالية"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "عرض المعلومات",
|
||||
"desc": "عرض معلومات البيانات الخاصة بالصورة الحالية"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "أرسل إلى صورة إلى صورة",
|
||||
"desc": "أرسل الصورة الحالية إلى صورة إلى صورة"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "حذف الصورة",
|
||||
"desc": "حذف الصورة الحالية"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "أغلق اللوحات",
|
||||
"desc": "يغلق اللوحات المفتوحة"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "الصورة السابقة",
|
||||
"desc": "عرض الصورة السابقة في الصالة"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "الصورة التالية",
|
||||
"desc": "عرض الصورة التالية في الصالة"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "تبديل تثبيت الصالة",
|
||||
"desc": "يثبت ويفتح تثبيت الصالة على الواجهة الرسومية"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "زيادة حجم صورة الصالة",
|
||||
"desc": "يزيد حجم الصور المصغرة في الصالة"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "انقاص حجم صورة الصالة",
|
||||
"desc": "ينقص حجم الصور المصغرة في الصالة"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "تحديد الفرشاة",
|
||||
"desc": "يحدد الفرشاة على اللوحة"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "تحديد الممحاة",
|
||||
"desc": "يحدد الممحاة على اللوحة"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "تصغير حجم الفرشاة",
|
||||
"desc": "يصغر حجم الفرشاة/الممحاة على اللوحة"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "زيادة حجم الفرشاة",
|
||||
"desc": "يزيد حجم فرشة اللوحة / الممحاة"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "تخفيض شفافية الفرشاة",
|
||||
"desc": "يخفض شفافية فرشة اللوحة"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "زيادة شفافية الفرشاة",
|
||||
"desc": "يزيد شفافية فرشة اللوحة"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "أداة التحريك",
|
||||
"desc": "يتيح التحرك في اللوحة"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "ملء الصندوق المحدد",
|
||||
"desc": "يملأ الصندوق المحدد بلون الفرشاة"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "محو الصندوق المحدد",
|
||||
"desc": "يمحو منطقة الصندوق المحدد"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "اختيار منتقي اللون",
|
||||
"desc": "يختار منتقي اللون الخاص باللوحة"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "تبديل التأكيد",
|
||||
"desc": "يبديل تأكيد الشبكة"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "تبديل سريع للتحريك",
|
||||
"desc": "يبديل مؤقتا وضع التحريك"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "تبديل الطبقة",
|
||||
"desc": "يبديل إختيار الطبقة القناع / الأساسية"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "مسح القناع",
|
||||
"desc": "مسح القناع بأكمله"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "إخفاء الكمامة",
|
||||
"desc": "إخفاء وإظهار الكمامة"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "إظهار / إخفاء علبة التحديد",
|
||||
"desc": "تبديل ظهور علبة التحديد"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "دمج الطبقات الظاهرة",
|
||||
"desc": "دمج جميع الطبقات الظاهرة في اللوحة"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "حفظ إلى صالة الأزياء",
|
||||
"desc": "حفظ اللوحة الحالية إلى صالة الأزياء"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "نسخ إلى الحافظة",
|
||||
"desc": "نسخ اللوحة الحالية إلى الحافظة"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "تنزيل الصورة",
|
||||
"desc": "تنزيل اللوحة الحالية"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "تراجع عن الخط",
|
||||
"desc": "تراجع عن خط الفرشاة"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "إعادة الخط",
|
||||
"desc": "إعادة خط الفرشاة"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "إعادة تعيين العرض",
|
||||
"desc": "إعادة تعيين عرض اللوحة"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "الصورة السابقة في المرحلة التجريبية",
|
||||
"desc": "الصورة السابقة في منطقة المرحلة التجريبية"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "الصورة التالية في المرحلة التجريبية",
|
||||
"desc": "الصورة التالية في منطقة المرحلة التجريبية"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "قبول الصورة في المرحلة التجريبية",
|
||||
"desc": "قبول الصورة الحالية في منطقة المرحلة التجريبية"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "مدير النموذج",
|
||||
"model": "نموذج",
|
||||
"allModels": "جميع النماذج",
|
||||
"checkpointModels": "نقاط التحقق",
|
||||
"diffusersModels": "المصادر المتعددة",
|
||||
"safetensorModels": "التنسورات الآمنة",
|
||||
"modelAdded": "تمت إضافة النموذج",
|
||||
"modelUpdated": "تم تحديث النموذج",
|
||||
"modelEntryDeleted": "تم حذف مدخل النموذج",
|
||||
"cannotUseSpaces": "لا يمكن استخدام المساحات",
|
||||
"addNew": "إضافة جديد",
|
||||
"addNewModel": "إضافة نموذج جديد",
|
||||
"addCheckpointModel": "إضافة نقطة تحقق / نموذج التنسور الآمن",
|
||||
"addDiffuserModel": "إضافة مصادر متعددة",
|
||||
"addManually": "إضافة يدويًا",
|
||||
"manual": "يدوي",
|
||||
"name": "الاسم",
|
||||
"nameValidationMsg": "أدخل اسما لنموذجك",
|
||||
"description": "الوصف",
|
||||
"descriptionValidationMsg": "أضف وصفا لنموذجك",
|
||||
"config": "تكوين",
|
||||
"configValidationMsg": "مسار الملف الإعدادي لنموذجك.",
|
||||
"modelLocation": "موقع النموذج",
|
||||
"modelLocationValidationMsg": "موقع النموذج على الجهاز الخاص بك.",
|
||||
"repo_id": "معرف المستودع",
|
||||
"repoIDValidationMsg": "المستودع الإلكتروني لنموذجك",
|
||||
"vaeLocation": "موقع فاي إي",
|
||||
"vaeLocationValidationMsg": "موقع فاي إي على الجهاز الخاص بك.",
|
||||
"vaeRepoID": "معرف مستودع فاي إي",
|
||||
"vaeRepoIDValidationMsg": "المستودع الإلكتروني فاي إي",
|
||||
"width": "عرض",
|
||||
"widthValidationMsg": "عرض افتراضي لنموذجك.",
|
||||
"height": "ارتفاع",
|
||||
"heightValidationMsg": "ارتفاع افتراضي لنموذجك.",
|
||||
"addModel": "أضف نموذج",
|
||||
"updateModel": "تحديث النموذج",
|
||||
"availableModels": "النماذج المتاحة",
|
||||
"search": "بحث",
|
||||
"load": "تحميل",
|
||||
"active": "نشط",
|
||||
"notLoaded": "غير محمل",
|
||||
"cached": "مخبأ",
|
||||
"checkpointFolder": "مجلد التدقيق",
|
||||
"clearCheckpointFolder": "مسح مجلد التدقيق",
|
||||
"findModels": "إيجاد النماذج",
|
||||
"scanAgain": "فحص مرة أخرى",
|
||||
"modelsFound": "النماذج الموجودة",
|
||||
"selectFolder": "حدد المجلد",
|
||||
"selected": "تم التحديد",
|
||||
"selectAll": "حدد الكل",
|
||||
"deselectAll": "إلغاء تحديد الكل",
|
||||
"showExisting": "إظهار الموجود",
|
||||
"addSelected": "أضف المحدد",
|
||||
"modelExists": "النموذج موجود",
|
||||
"selectAndAdd": "حدد وأضف النماذج المدرجة أدناه",
|
||||
"noModelsFound": "لم يتم العثور على نماذج",
|
||||
"delete": "حذف",
|
||||
"deleteModel": "حذف النموذج",
|
||||
"deleteConfig": "حذف التكوين",
|
||||
"deleteMsg1": "هل أنت متأكد من رغبتك في حذف إدخال النموذج هذا من استحضر الذكاء الصناعي",
|
||||
"deleteMsg2": "هذا لن يحذف ملف نقطة التحكم للنموذج من القرص الخاص بك. يمكنك إعادة إضافتهم إذا كنت ترغب في ذلك.",
|
||||
"formMessageDiffusersModelLocation": "موقع النموذج للمصعد",
|
||||
"formMessageDiffusersModelLocationDesc": "يرجى إدخال واحد على الأقل.",
|
||||
"formMessageDiffusersVAELocation": "موقع فاي إي",
|
||||
"formMessageDiffusersVAELocationDesc": "إذا لم يتم توفيره، سيبحث استحضر الذكاء الصناعي عن ملف فاي إي داخل موقع النموذج المعطى أعلاه."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "الصور",
|
||||
"steps": "الخطوات",
|
||||
"cfgScale": "مقياس الإعداد الذاتي للجملة",
|
||||
"width": "عرض",
|
||||
"height": "ارتفاع",
|
||||
"sampler": "مزج",
|
||||
"seed": "بذرة",
|
||||
"randomizeSeed": "تبديل بذرة",
|
||||
"shuffle": "تشغيل",
|
||||
"noiseThreshold": "عتبة الضوضاء",
|
||||
"perlinNoise": "ضجيج برلين",
|
||||
"variations": "تباينات",
|
||||
"variationAmount": "كمية التباين",
|
||||
"seedWeights": "أوزان البذور",
|
||||
"faceRestoration": "استعادة الوجه",
|
||||
"restoreFaces": "استعادة الوجوه",
|
||||
"type": "نوع",
|
||||
"strength": "قوة",
|
||||
"upscaling": "تصغير",
|
||||
"upscale": "تصغير",
|
||||
"upscaleImage": "تصغير الصورة",
|
||||
"scale": "مقياس",
|
||||
"otherOptions": "خيارات أخرى",
|
||||
"seamlessTiling": "تجهيز بلاستيكي بدون تشققات",
|
||||
"hiresOptim": "تحسين الدقة العالية",
|
||||
"imageFit": "ملائمة الصورة الأولية لحجم الخرج",
|
||||
"codeformerFidelity": "الوثوقية",
|
||||
"seamSize": "حجم التشقق",
|
||||
"seamBlur": "ضباب التشقق",
|
||||
"seamStrength": "قوة التشقق",
|
||||
"seamSteps": "خطوات التشقق",
|
||||
"scaleBeforeProcessing": "تحجيم قبل المعالجة",
|
||||
"scaledWidth": "العرض المحجوب",
|
||||
"scaledHeight": "الارتفاع المحجوب",
|
||||
"infillMethod": "طريقة التعبئة",
|
||||
"tileSize": "حجم البلاطة",
|
||||
"boundingBoxHeader": "صندوق التحديد",
|
||||
"seamCorrectionHeader": "تصحيح التشقق",
|
||||
"infillScalingHeader": "التعبئة والتحجيم",
|
||||
"img2imgStrength": "قوة صورة إلى صورة",
|
||||
"toggleLoopback": "تبديل الإعادة",
|
||||
"invoke": "إطلاق",
|
||||
"promptPlaceholder": "اكتب المحث هنا. [العلامات السلبية], (زيادة الوزن) ++, (نقص الوزن)--, التبديل و الخلط متاحة (انظر الوثائق)",
|
||||
"sendTo": "أرسل إلى",
|
||||
"sendToImg2Img": "أرسل إلى صورة إلى صورة",
|
||||
"sendToUnifiedCanvas": "أرسل إلى الخطوط الموحدة",
|
||||
"copyImage": "نسخ الصورة",
|
||||
"copyImageToLink": "نسخ الصورة إلى الرابط",
|
||||
"downloadImage": "تحميل الصورة",
|
||||
"openInViewer": "فتح في العارض",
|
||||
"closeViewer": "إغلاق العارض",
|
||||
"usePrompt": "استخدم المحث",
|
||||
"useSeed": "استخدام البذور",
|
||||
"useAll": "استخدام الكل",
|
||||
"useInitImg": "استخدام الصورة الأولية",
|
||||
"info": "معلومات",
|
||||
"deleteImage": "حذف الصورة",
|
||||
"initialImage": "الصورة الأولية",
|
||||
"showOptionsPanel": "إظهار لوحة الخيارات"
|
||||
},
|
||||
"settings": {
|
||||
"models": "موديلات",
|
||||
"displayInProgress": "عرض الصور المؤرشفة",
|
||||
"saveSteps": "حفظ الصور كل n خطوات",
|
||||
"confirmOnDelete": "تأكيد عند الحذف",
|
||||
"displayHelpIcons": "عرض أيقونات المساعدة",
|
||||
"useCanvasBeta": "استخدام مخطط الأزرار بيتا",
|
||||
"enableImageDebugging": "تمكين التصحيح عند التصوير",
|
||||
"resetWebUI": "إعادة تعيين واجهة الويب",
|
||||
"resetWebUIDesc1": "إعادة تعيين واجهة الويب يعيد فقط ذاكرة التخزين المؤقت للمتصفح لصورك وإعداداتك المذكورة. لا يحذف أي صور من القرص.",
|
||||
"resetWebUIDesc2": "إذا لم تظهر الصور في الصالة أو إذا كان شيء آخر غير ناجح، يرجى المحاولة إعادة تعيين قبل تقديم مشكلة على جيت هب.",
|
||||
"resetComplete": "تم إعادة تعيين واجهة الويب. تحديث الصفحة لإعادة التحميل."
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "تم تفريغ مجلد المؤقت",
|
||||
"uploadFailed": "فشل التحميل",
|
||||
"uploadFailedMultipleImagesDesc": "تم الصق صور متعددة، قد يتم تحميل صورة واحدة فقط في الوقت الحالي",
|
||||
"uploadFailedUnableToLoadDesc": "تعذر تحميل الملف",
|
||||
"downloadImageStarted": "بدأ تنزيل الصورة",
|
||||
"imageCopied": "تم نسخ الصورة",
|
||||
"imageLinkCopied": "تم نسخ رابط الصورة",
|
||||
"imageNotLoaded": "لم يتم تحميل أي صورة",
|
||||
"imageNotLoadedDesc": "لم يتم العثور على صورة لإرسالها إلى وحدة الصورة",
|
||||
"imageSavedToGallery": "تم حفظ الصورة في المعرض",
|
||||
"canvasMerged": "تم دمج الخط",
|
||||
"sentToImageToImage": "تم إرسال إلى صورة إلى صورة",
|
||||
"sentToUnifiedCanvas": "تم إرسال إلى لوحة موحدة",
|
||||
"parametersSet": "تم تعيين المعلمات",
|
||||
"parametersNotSet": "لم يتم تعيين المعلمات",
|
||||
"parametersNotSetDesc": "لم يتم العثور على معلمات بيانية لهذه الصورة.",
|
||||
"parametersFailed": "حدث مشكلة في تحميل المعلمات",
|
||||
"parametersFailedDesc": "تعذر تحميل صورة البدء.",
|
||||
"seedSet": "تم تعيين البذرة",
|
||||
"seedNotSet": "لم يتم تعيين البذرة",
|
||||
"seedNotSetDesc": "تعذر العثور على البذرة لهذه الصورة.",
|
||||
"promptSet": "تم تعيين الإشعار",
|
||||
"promptNotSet": "Prompt Not Set",
|
||||
"promptNotSetDesc": "تعذر العثور على الإشعار لهذه الصورة.",
|
||||
"upscalingFailed": "فشل التحسين",
|
||||
"faceRestoreFailed": "فشل استعادة الوجه",
|
||||
"metadataLoadFailed": "فشل تحميل البيانات الوصفية",
|
||||
"initialImageSet": "تم تعيين الصورة الأولية",
|
||||
"initialImageNotSet": "لم يتم تعيين الصورة الأولية",
|
||||
"initialImageNotSetDesc": "تعذر تحميل الصورة الأولية"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "هذا هو حقل التحذير. يشمل التحذير عناصر الإنتاج والمصطلحات الأسلوبية. يمكنك إضافة الأوزان (أهمية الرمز) في التحذير أيضًا، ولكن أوامر CLI والمعلمات لن تعمل.",
|
||||
"gallery": "تعرض Gallery منتجات من مجلد الإخراج عندما يتم إنشاؤها. تخزن الإعدادات داخل الملفات ويتم الوصول إليها عن طريق قائمة السياق.",
|
||||
"other": "ستمكن هذه الخيارات من وضع عمليات معالجة بديلة لـاستحضر الذكاء الصناعي. سيؤدي 'الزخرفة بلا جدران' إلى إنشاء أنماط تكرارية في الإخراج. 'دقة عالية' هي الإنتاج خلال خطوتين عبر صورة إلى صورة: استخدم هذا الإعداد عندما ترغب في توليد صورة أكبر وأكثر تجانبًا دون العيوب. ستستغرق الأشياء وقتًا أطول من نص إلى صورة المعتاد.",
|
||||
"seed": "يؤثر قيمة البذور على الضوضاء الأولي الذي يتم تكوين الصورة منه. يمكنك استخدام البذور الخاصة بالصور السابقة. 'عتبة الضوضاء' يتم استخدامها لتخفيف العناصر الخللية في قيم CFG العالية (جرب مدى 0-10), و Perlin لإضافة ضوضاء Perlin أثناء الإنتاج: كلا منهما يعملان على إضافة التنوع إلى النتائج الخاصة بك.",
|
||||
"variations": "جرب التغيير مع قيمة بين 0.1 و 1.0 لتغيير النتائج لبذور معينة. التغييرات المثيرة للاهتمام للبذور تكون بين 0.1 و 0.3.",
|
||||
"upscale": "استخدم إي إس آر جان لتكبير الصورة على الفور بعد الإنتاج.",
|
||||
"faceCorrection": "تصحيح الوجه باستخدام جي إف بي جان أو كود فورمر: يكتشف الخوارزمية الوجوه في الصورة وتصحح أي عيوب. قيمة عالية ستغير الصورة أكثر، مما يؤدي إلى وجوه أكثر جمالا. كود فورمر بدقة أعلى يحتفظ بالصورة الأصلية على حساب تصحيح وجه أكثر قوة.",
|
||||
"imageToImage": "تحميل صورة إلى صورة أي صورة كأولية، والتي يتم استخدامها لإنشاء صورة جديدة مع التشعيب. كلما كانت القيمة أعلى، كلما تغيرت نتيجة الصورة. من الممكن أن تكون القيم بين 0.0 و 1.0، وتوصي النطاق الموصى به هو .25-.75",
|
||||
"boundingBox": "مربع الحدود هو نفس الإعدادات العرض والارتفاع لنص إلى صورة أو صورة إلى صورة. فقط المنطقة في المربع سيتم معالجتها.",
|
||||
"seamCorrection": "يتحكم بالتعامل مع الخطوط المرئية التي تحدث بين الصور المولدة في سطح اللوحة.",
|
||||
"infillAndScaling": "إدارة أساليب التعبئة (المستخدمة على المناطق المخفية أو الممحوة في سطح اللوحة) والزيادة في الحجم (مفيدة لحجوزات الإطارات الصغيرة)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "طبقة",
|
||||
"base": "قاعدة",
|
||||
"mask": "قناع",
|
||||
"maskingOptions": "خيارات القناع",
|
||||
"enableMask": "مكن القناع",
|
||||
"preserveMaskedArea": "الحفاظ على المنطقة المقنعة",
|
||||
"clearMask": "مسح القناع",
|
||||
"brush": "فرشاة",
|
||||
"eraser": "ممحاة",
|
||||
"fillBoundingBox": "ملئ إطار الحدود",
|
||||
"eraseBoundingBox": "مسح إطار الحدود",
|
||||
"colorPicker": "اختيار اللون",
|
||||
"brushOptions": "خيارات الفرشاة",
|
||||
"brushSize": "الحجم",
|
||||
"move": "تحريك",
|
||||
"resetView": "إعادة تعيين العرض",
|
||||
"mergeVisible": "دمج الظاهر",
|
||||
"saveToGallery": "حفظ إلى المعرض",
|
||||
"copyToClipboard": "نسخ إلى الحافظة",
|
||||
"downloadAsImage": "تنزيل على شكل صورة",
|
||||
"undo": "تراجع",
|
||||
"redo": "إعادة",
|
||||
"clearCanvas": "مسح سبيكة الكاملة",
|
||||
"canvasSettings": "إعدادات سبيكة الكاملة",
|
||||
"showIntermediates": "إظهار الوسطاء",
|
||||
"showGrid": "إظهار الشبكة",
|
||||
"snapToGrid": "الالتفاف إلى الشبكة",
|
||||
"darkenOutsideSelection": "تعمية خارج التحديد",
|
||||
"autoSaveToGallery": "حفظ تلقائي إلى المعرض",
|
||||
"saveBoxRegionOnly": "حفظ منطقة الصندوق فقط",
|
||||
"limitStrokesToBox": "تحديد عدد الخطوط إلى الصندوق",
|
||||
"showCanvasDebugInfo": "إظهار معلومات تصحيح سبيكة الكاملة",
|
||||
"clearCanvasHistory": "مسح تاريخ سبيكة الكاملة",
|
||||
"clearHistory": "مسح التاريخ",
|
||||
"clearCanvasHistoryMessage": "مسح تاريخ اللوحة تترك اللوحة الحالية عائمة، ولكن تمسح بشكل غير قابل للتراجع تاريخ التراجع والإعادة.",
|
||||
"clearCanvasHistoryConfirm": "هل أنت متأكد من رغبتك في مسح تاريخ اللوحة؟",
|
||||
"emptyTempImageFolder": "إفراغ مجلد الصور المؤقتة",
|
||||
"emptyFolder": "إفراغ المجلد",
|
||||
"emptyTempImagesFolderMessage": "إفراغ مجلد الصور المؤقتة يؤدي أيضًا إلى إعادة تعيين اللوحة الموحدة بشكل كامل. وهذا يشمل كل تاريخ التراجع / الإعادة والصور في منطقة التخزين وطبقة الأساس لللوحة.",
|
||||
"emptyTempImagesFolderConfirm": "هل أنت متأكد من رغبتك في إفراغ مجلد الصور المؤقتة؟",
|
||||
"activeLayer": "الطبقة النشطة",
|
||||
"canvasScale": "مقياس اللوحة",
|
||||
"boundingBox": "صندوق الحدود",
|
||||
"scaledBoundingBox": "صندوق الحدود المكبر",
|
||||
"boundingBoxPosition": "موضع صندوق الحدود",
|
||||
"canvasDimensions": "أبعاد اللوحة",
|
||||
"canvasPosition": "موضع اللوحة",
|
||||
"cursorPosition": "موضع المؤشر",
|
||||
"previous": "السابق",
|
||||
"next": "التالي",
|
||||
"accept": "قبول",
|
||||
"showHide": "إظهار/إخفاء",
|
||||
"discardAll": "تجاهل الكل",
|
||||
"betaClear": "مسح",
|
||||
"betaDarkenOutside": "ظل الخارج",
|
||||
"betaLimitToBox": "تحديد إلى الصندوق",
|
||||
"betaPreserveMasked": "المحافظة على المخفية"
|
||||
}
|
||||
}
|
||||
55
invokeai/frontend/dist/locales/common/de.json
vendored
55
invokeai/frontend/dist/locales/common/de.json
vendored
@@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Thema",
|
||||
"languagePickerLabel": "Sprachauswahl",
|
||||
"reportBugLabel": "Fehler melden",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Einstellungen",
|
||||
"darkTheme": "Dunkel",
|
||||
"lightTheme": "Hell",
|
||||
"greenTheme": "Grün",
|
||||
"langEnglish": "Englisch",
|
||||
"langRussian": "Russisch",
|
||||
"langItalian": "Italienisch",
|
||||
"langPortuguese": "Portugiesisch",
|
||||
"langFrench": "Französich",
|
||||
"langGerman": "Deutsch",
|
||||
"langSpanish": "Spanisch",
|
||||
"text2img": "Text zu Bild",
|
||||
"img2img": "Bild zu Bild",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Knoten",
|
||||
"nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.",
|
||||
"postProcessing": "Nachbearbeitung",
|
||||
"postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.",
|
||||
"postProcessDesc2": "Eine spezielle Benutzeroberfläche wird in Kürze veröffentlicht, um erweiterte Nachbearbeitungs-Workflows zu erleichtern.",
|
||||
"postProcessDesc3": "Die InvokeAI Kommandozeilen-Schnittstelle bietet verschiedene andere Funktionen, darunter Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
|
||||
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
|
||||
"upload": "Upload",
|
||||
"close": "Schließen",
|
||||
"load": "Laden",
|
||||
"statusConnected": "Verbunden",
|
||||
"statusDisconnected": "Getrennt",
|
||||
"statusError": "Fehler",
|
||||
"statusPreparing": "Vorbereiten",
|
||||
"statusProcessingCanceled": "Verarbeitung abgebrochen",
|
||||
"statusProcessingComplete": "Verarbeitung komplett",
|
||||
"statusGenerating": "Generieren",
|
||||
"statusGeneratingTextToImage": "Erzeugen von Text zu Bild",
|
||||
"statusGeneratingImageToImage": "Erzeugen von Bild zu Bild",
|
||||
"statusGeneratingInpainting": "Erzeuge Inpainting",
|
||||
"statusGeneratingOutpainting": "Erzeuge Outpainting",
|
||||
"statusGenerationComplete": "Generierung abgeschlossen",
|
||||
"statusIterationComplete": "Iteration abgeschlossen",
|
||||
"statusSavingImage": "Speichere Bild",
|
||||
"statusRestoringFaces": "Gesichter restaurieren",
|
||||
"statusRestoringFacesGFPGAN": "Gesichter restaurieren (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Gesichter restaurieren (CodeFormer)",
|
||||
"statusUpscaling": "Hochskalierung",
|
||||
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
|
||||
"statusLoadingModel": "Laden des Modells",
|
||||
"statusModelChanged": "Modell Geändert"
|
||||
}
|
||||
62
invokeai/frontend/dist/locales/common/en-US.json
vendored
62
invokeai/frontend/dist/locales/common/en-US.json
vendored
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"langDutch": "Dutch",
|
||||
"langUkranian": "Ukranian",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed"
|
||||
}
|
||||
62
invokeai/frontend/dist/locales/common/en.json
vendored
62
invokeai/frontend/dist/locales/common/en.json
vendored
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"langDutch": "Dutch",
|
||||
"langUkranian": "Ukranian",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed"
|
||||
}
|
||||
58
invokeai/frontend/dist/locales/common/es.json
vendored
58
invokeai/frontend/dist/locales/common/es.json
vendored
@@ -1,58 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Atajos de teclado",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Selector de idioma",
|
||||
"reportBugLabel": "Reportar errores",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Ajustes",
|
||||
"darkTheme": "Oscuro",
|
||||
"lightTheme": "Claro",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "Inglés",
|
||||
"langRussian": "Ruso",
|
||||
"langItalian": "Italiano",
|
||||
"langBrPortuguese": "Portugués (Brasil)",
|
||||
"langGerman": "Alemán",
|
||||
"langPortuguese": "Portugués",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSpanish": "Español",
|
||||
"text2img": "Texto a Imagen",
|
||||
"img2img": "Imagen a Imagen",
|
||||
"unifiedCanvas": "Lienzo Unificado",
|
||||
"nodes": "Nodos",
|
||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||
"postProcessing": "Post-procesamiento",
|
||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador",
|
||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||
"training": "Entrenamiento",
|
||||
"trainingDesc1": "Un flujo de trabajo dedicado para el entrenamiento de sus propios -embeddings- y puntos de control utilizando Inversión Textual y Dreambooth desde la interfaz web.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"trainingDesc2": "InvokeAI ya soporta el entrenamiento de -embeddings- personalizados utilizando la Inversión Textual mediante el script principal.",
|
||||
"upload": "Subir imagen",
|
||||
"close": "Cerrar",
|
||||
"load": "Cargar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Desconectado",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Procesamiento Cancelado",
|
||||
"statusProcessingComplete": "Procesamiento Completo",
|
||||
"statusGenerating": "Generando",
|
||||
"statusGeneratingTextToImage": "Generando Texto a Imagen",
|
||||
"statusGeneratingImageToImage": "Generando Imagen a Imagen",
|
||||
"statusGeneratingInpainting": "Generando pintura interior",
|
||||
"statusGeneratingOutpainting": "Generando pintura exterior",
|
||||
"statusGenerationComplete": "Generación Completa",
|
||||
"statusIterationComplete": "Iteración Completa",
|
||||
"statusSavingImage": "Guardando Imagen",
|
||||
"statusRestoringFaces": "Restaurando Rostros",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostros (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostros (CodeFormer)",
|
||||
"statusUpscaling": "Aumentando Tamaño",
|
||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||
"statusLoadingModel": "Cargando Modelo",
|
||||
"statusModelChanged": "Modelo cambiado"
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user